CombinedText stringlengths 4 3.42M |
|---|
package main
import (
"encoding/xml"
"flag"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
const (
namespace = "bind"
resolver = "resolver"
)
var (
up = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "up"),
"Was the Bind instance query successful?",
nil, nil,
)
incomingQueries = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "incoming_queries_total"),
"Number of incomming DNS queries.",
[]string{"type"}, nil,
)
incomingRequests = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "incoming_requests_total"),
"Number of incomming DNS queries.",
[]string{"name"}, nil,
)
resolverCache = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "cache_rrsets"),
"Number of RRSets in Cache database.",
[]string{"view", "type"}, nil,
)
resolverQueries = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "queries_total"),
"Number of outgoing DNS queries.",
[]string{"view", "name"}, nil,
)
resolverQueryDuration = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_duration_seconds"),
"Resolver query round-trip time in seconds.",
[]string{"view"}, nil,
)
resolverQueryErrors = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_errors_total"),
"Number of resolver queries failed.",
[]string{"view", "error"}, nil,
)
resolverResponseErrors = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_errors_total"),
"Number of resolver reponse errors received.",
[]string{"view", "error"}, nil,
)
resolverDNSSECSucess = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "dnssec_validation_success_total"),
"Number of DNSSEC validation attempts succeeded.",
[]string{"view", "result"}, nil,
)
resolverMetricStats = map[string]*prometheus.Desc{
"Lame": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_lame_total"),
"Number of lame delegation responses received.",
[]string{"view"}, nil,
),
"EDNS0Fail": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_edns0_errors_total"),
"Number of EDNS(0) query errors.",
[]string{"view"}, nil,
),
"Mismatch": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_mismatch_total"),
"Number of mismatch responses received.",
[]string{"view"}, nil,
),
"Retry": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_retries_total"),
"Number of resolver query retries.",
[]string{"view"}, nil,
),
"Truncated": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_truncated_total"),
"Number of truncated responses received.",
[]string{"view"}, nil,
),
"ValFail": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "dnssec_validation_errors_total"),
"Number of DNSSEC validation attempt errors.",
[]string{"view"}, nil,
),
}
resolverLabelStats = map[string]*prometheus.Desc{
"QueryAbort": resolverQueryErrors,
"QuerySockFail": resolverQueryErrors,
"QueryTimeout": resolverQueryErrors,
"NXDOMAIN": resolverResponseErrors,
"SERVFAIL": resolverResponseErrors,
"FORMERR": resolverResponseErrors,
"OtherError": resolverResponseErrors,
"ValOk": resolverDNSSECSucess,
"ValNegOk": resolverDNSSECSucess,
}
serverReponses = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "responses_total"),
"Number of responses sent.",
[]string{"result"}, nil,
)
serverLabelStats = map[string]*prometheus.Desc{
"QrySuccess": serverReponses,
"QryReferral": serverReponses,
"QryNxrrset": serverReponses,
"QrySERVFAIL": serverReponses,
"QryFORMERR": serverReponses,
"QryNXDOMAIN": serverReponses,
}
tasksRunning = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_running"),
"Number of running tasks.",
nil, nil,
)
workerThreads = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "worker_threads"),
"Total number of available worker threads.",
nil, nil,
)
)
// Exporter collects Binds stats from the given server and exports
// them using the prometheus metrics package.
type Exporter struct {
URI string
client *http.Client
}
// NewExporter returns an initialized Exporter.
func NewExporter(uri string, timeout time.Duration) *Exporter {
return &Exporter{
URI: uri,
client: &http.Client{
Transport: &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, timeout)
if err != nil {
return nil, err
}
if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
return nil, err
}
return c, nil
},
},
},
}
}
// Describe describes all the metrics ever exported by the bind
// exporter. It implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- up
ch <- incomingQueries
ch <- incomingRequests
ch <- resolverDNSSECSucess
ch <- resolverQueries
ch <- resolverQueryDuration
ch <- resolverQueryErrors
ch <- resolverResponseErrors
for _, desc := range resolverMetricStats {
ch <- desc
}
ch <- serverReponses
ch <- tasksRunning
ch <- workerThreads
}
// Collect fetches the stats from configured bind location and
// delivers them as Prometheus metrics. It implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
var status float64
defer func() {
ch <- prometheus.MustNewConstMetric(up, prometheus.GaugeValue, status)
}()
resp, err := e.client.Get(e.URI)
if err != nil {
log.Error("Error while querying Bind: ", err)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error("Failed to read XML response body: ", err)
return
}
status = 1
root := Isc{}
if err := xml.Unmarshal([]byte(body), &root); err != nil {
log.Error("Failed to unmarshal XML response: ", err)
return
}
stats := root.Bind.Statistics
for _, s := range stats.Server.QueriesIn.Rdtype {
ch <- prometheus.MustNewConstMetric(
incomingQueries, prometheus.CounterValue, float64(s.Counter), s.Name,
)
}
for _, s := range stats.Server.Requests.Opcode {
ch <- prometheus.MustNewConstMetric(
incomingRequests, prometheus.CounterValue, float64(s.Counter), s.Name,
)
}
for _, s := range stats.Server.NsStats {
if desc, ok := serverLabelStats[s.Name]; ok {
r := strings.TrimPrefix(s.Name, "Qry")
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), r,
)
}
}
for _, v := range stats.Views {
for _, s := range v.Cache {
ch <- prometheus.MustNewConstMetric(
resolverCache, prometheus.GaugeValue, float64(s.Counter), v.Name, s.Name,
)
}
for _, s := range v.Rdtype {
ch <- prometheus.MustNewConstMetric(
resolverQueries, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
)
}
for _, s := range v.Resstat {
if desc, ok := resolverMetricStats[s.Name]; ok {
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), v.Name,
)
}
if desc, ok := resolverLabelStats[s.Name]; ok {
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
)
}
}
if buckets, count, err := histogram(v.Resstat); err == nil {
ch <- prometheus.MustNewConstHistogram(
resolverQueryDuration, count, math.NaN(), buckets, v.Name,
)
} else {
log.Warn("Error parsing RTT:", err)
}
}
threadModel := stats.Taskmgr.ThreadModel
ch <- prometheus.MustNewConstMetric(
tasksRunning, prometheus.GaugeValue, float64(threadModel.TasksRunning),
)
ch <- prometheus.MustNewConstMetric(
workerThreads, prometheus.GaugeValue, float64(threadModel.WorkerThreads),
)
}
func histogram(stats []Stat) (map[float64]uint64, uint64, error) {
buckets := map[float64]uint64{}
var count uint64
for _, s := range stats {
if strings.HasPrefix(s.Name, qryRTT) {
b := math.Inf(0)
if !strings.HasSuffix(s.Name, "+") {
var err error
rrt := strings.TrimPrefix(s.Name, qryRTT)
b, err = strconv.ParseFloat(rrt, 32)
if err != nil {
return buckets, 0, fmt.Errorf("could not parse RTT: %s", rrt)
}
}
buckets[b/1000] = count + uint64(s.Counter)
count += uint64(s.Counter)
}
}
return buckets, count, nil
}
func main() {
var (
listenAddress = flag.String("web.listen-address", ":9109", "Address to listen on for web interface and telemetry.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
bindURI = flag.String("bind.statsuri", "http://localhost:8053/", "HTTP XML API address of an Bind server.")
bindTimeout = flag.Duration("bind.timeout", 10*time.Second, "Timeout for trying to get stats from Bind.")
bindPidFile = flag.String("bind.pid-file", "", "Path to Bind's pid file to export process information.")
)
flag.Parse()
prometheus.MustRegister(NewExporter(*bindURI, *bindTimeout))
if *bindPidFile != "" {
procExporter := prometheus.NewProcessCollectorPIDFn(
func() (int, error) {
content, err := ioutil.ReadFile(*bindPidFile)
if err != nil {
return 0, fmt.Errorf("Can't read pid file: %s", err)
}
value, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("Can't parse pid file: %s", err)
}
return value, nil
}, namespace)
prometheus.MustRegister(procExporter)
}
log.Info("Starting Server: ", *listenAddress)
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Bind Exporter</title></head>
<body>
<h1>Bind Exporter</h1>
<p><a href='` + *metricsPath + `'>Metrics</a></p>
</body>
</html>`))
})
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
Add server query errors metric
package main
import (
"encoding/xml"
"flag"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
const (
namespace = "bind"
resolver = "resolver"
)
var (
up = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "up"),
"Was the Bind instance query successful?",
nil, nil,
)
incomingQueries = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "incoming_queries_total"),
"Number of incomming DNS queries.",
[]string{"type"}, nil,
)
incomingRequests = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "incoming_requests_total"),
"Number of incomming DNS queries.",
[]string{"name"}, nil,
)
resolverCache = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "cache_rrsets"),
"Number of RRSets in Cache database.",
[]string{"view", "type"}, nil,
)
resolverQueries = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "queries_total"),
"Number of outgoing DNS queries.",
[]string{"view", "name"}, nil,
)
resolverQueryDuration = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_duration_seconds"),
"Resolver query round-trip time in seconds.",
[]string{"view"}, nil,
)
resolverQueryErrors = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_errors_total"),
"Number of resolver queries failed.",
[]string{"view", "error"}, nil,
)
resolverResponseErrors = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_errors_total"),
"Number of resolver reponse errors received.",
[]string{"view", "error"}, nil,
)
resolverDNSSECSucess = prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "dnssec_validation_success_total"),
"Number of DNSSEC validation attempts succeeded.",
[]string{"view", "result"}, nil,
)
resolverMetricStats = map[string]*prometheus.Desc{
"Lame": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_lame_total"),
"Number of lame delegation responses received.",
[]string{"view"}, nil,
),
"EDNS0Fail": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_edns0_errors_total"),
"Number of EDNS(0) query errors.",
[]string{"view"}, nil,
),
"Mismatch": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_mismatch_total"),
"Number of mismatch responses received.",
[]string{"view"}, nil,
),
"Retry": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "query_retries_total"),
"Number of resolver query retries.",
[]string{"view"}, nil,
),
"Truncated": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "response_truncated_total"),
"Number of truncated responses received.",
[]string{"view"}, nil,
),
"ValFail": prometheus.NewDesc(
prometheus.BuildFQName(namespace, resolver, "dnssec_validation_errors_total"),
"Number of DNSSEC validation attempt errors.",
[]string{"view"}, nil,
),
}
resolverLabelStats = map[string]*prometheus.Desc{
"QueryAbort": resolverQueryErrors,
"QuerySockFail": resolverQueryErrors,
"QueryTimeout": resolverQueryErrors,
"NXDOMAIN": resolverResponseErrors,
"SERVFAIL": resolverResponseErrors,
"FORMERR": resolverResponseErrors,
"OtherError": resolverResponseErrors,
"ValOk": resolverDNSSECSucess,
"ValNegOk": resolverDNSSECSucess,
}
serverQueryErrors = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "query_errors_total"),
"Number of query failures.",
[]string{"error"}, nil,
)
serverReponses = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "responses_total"),
"Number of responses sent.",
[]string{"result"}, nil,
)
serverLabelStats = map[string]*prometheus.Desc{
"QryDuplicate": serverQueryErrors,
"QryDropped": serverQueryErrors,
"QryFailure": serverQueryErrors,
"QrySuccess": serverReponses,
"QryReferral": serverReponses,
"QryNxrrset": serverReponses,
"QrySERVFAIL": serverReponses,
"QryFORMERR": serverReponses,
"QryNXDOMAIN": serverReponses,
}
tasksRunning = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_running"),
"Number of running tasks.",
nil, nil,
)
workerThreads = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "worker_threads"),
"Total number of available worker threads.",
nil, nil,
)
)
// Exporter collects Binds stats from the given server and exports
// them using the prometheus metrics package.
type Exporter struct {
URI string
client *http.Client
}
// NewExporter returns an initialized Exporter.
func NewExporter(uri string, timeout time.Duration) *Exporter {
return &Exporter{
URI: uri,
client: &http.Client{
Transport: &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, timeout)
if err != nil {
return nil, err
}
if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
return nil, err
}
return c, nil
},
},
},
}
}
// Describe describes all the metrics ever exported by the bind
// exporter. It implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- up
ch <- incomingQueries
ch <- incomingRequests
ch <- resolverDNSSECSucess
ch <- resolverQueries
ch <- resolverQueryDuration
ch <- resolverQueryErrors
ch <- resolverResponseErrors
for _, desc := range resolverMetricStats {
ch <- desc
}
ch <- serverReponses
ch <- tasksRunning
ch <- workerThreads
}
// Collect fetches the stats from configured bind location and
// delivers them as Prometheus metrics. It implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
var status float64
defer func() {
ch <- prometheus.MustNewConstMetric(up, prometheus.GaugeValue, status)
}()
resp, err := e.client.Get(e.URI)
if err != nil {
log.Error("Error while querying Bind: ", err)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error("Failed to read XML response body: ", err)
return
}
status = 1
root := Isc{}
if err := xml.Unmarshal([]byte(body), &root); err != nil {
log.Error("Failed to unmarshal XML response: ", err)
return
}
stats := root.Bind.Statistics
for _, s := range stats.Server.QueriesIn.Rdtype {
ch <- prometheus.MustNewConstMetric(
incomingQueries, prometheus.CounterValue, float64(s.Counter), s.Name,
)
}
for _, s := range stats.Server.Requests.Opcode {
ch <- prometheus.MustNewConstMetric(
incomingRequests, prometheus.CounterValue, float64(s.Counter), s.Name,
)
}
for _, s := range stats.Server.NsStats {
if desc, ok := serverLabelStats[s.Name]; ok {
r := strings.TrimPrefix(s.Name, "Qry")
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), r,
)
}
}
for _, v := range stats.Views {
for _, s := range v.Cache {
ch <- prometheus.MustNewConstMetric(
resolverCache, prometheus.GaugeValue, float64(s.Counter), v.Name, s.Name,
)
}
for _, s := range v.Rdtype {
ch <- prometheus.MustNewConstMetric(
resolverQueries, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
)
}
for _, s := range v.Resstat {
if desc, ok := resolverMetricStats[s.Name]; ok {
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), v.Name,
)
}
if desc, ok := resolverLabelStats[s.Name]; ok {
ch <- prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
)
}
}
if buckets, count, err := histogram(v.Resstat); err == nil {
ch <- prometheus.MustNewConstHistogram(
resolverQueryDuration, count, math.NaN(), buckets, v.Name,
)
} else {
log.Warn("Error parsing RTT:", err)
}
}
threadModel := stats.Taskmgr.ThreadModel
ch <- prometheus.MustNewConstMetric(
tasksRunning, prometheus.GaugeValue, float64(threadModel.TasksRunning),
)
ch <- prometheus.MustNewConstMetric(
workerThreads, prometheus.GaugeValue, float64(threadModel.WorkerThreads),
)
}
func histogram(stats []Stat) (map[float64]uint64, uint64, error) {
buckets := map[float64]uint64{}
var count uint64
for _, s := range stats {
if strings.HasPrefix(s.Name, qryRTT) {
b := math.Inf(0)
if !strings.HasSuffix(s.Name, "+") {
var err error
rrt := strings.TrimPrefix(s.Name, qryRTT)
b, err = strconv.ParseFloat(rrt, 32)
if err != nil {
return buckets, 0, fmt.Errorf("could not parse RTT: %s", rrt)
}
}
buckets[b/1000] = count + uint64(s.Counter)
count += uint64(s.Counter)
}
}
return buckets, count, nil
}
func main() {
var (
listenAddress = flag.String("web.listen-address", ":9109", "Address to listen on for web interface and telemetry.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
bindURI = flag.String("bind.statsuri", "http://localhost:8053/", "HTTP XML API address of an Bind server.")
bindTimeout = flag.Duration("bind.timeout", 10*time.Second, "Timeout for trying to get stats from Bind.")
bindPidFile = flag.String("bind.pid-file", "", "Path to Bind's pid file to export process information.")
)
flag.Parse()
prometheus.MustRegister(NewExporter(*bindURI, *bindTimeout))
if *bindPidFile != "" {
procExporter := prometheus.NewProcessCollectorPIDFn(
func() (int, error) {
content, err := ioutil.ReadFile(*bindPidFile)
if err != nil {
return 0, fmt.Errorf("Can't read pid file: %s", err)
}
value, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("Can't parse pid file: %s", err)
}
return value, nil
}, namespace)
prometheus.MustRegister(procExporter)
}
log.Info("Starting Server: ", *listenAddress)
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Bind Exporter</title></head>
<body>
<h1>Bind Exporter</h1>
<p><a href='` + *metricsPath + `'>Metrics</a></p>
</body>
</html>`))
})
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
|
// Copyright © 2016 Jip J. Dekker <jip@dekker.li>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"os"
"os/exec"
"path/filepath"
"sort"
log "github.com/Sirupsen/logrus"
"github.com/jjdekker/ponder/helpers"
"github.com/jjdekker/ponder/settings"
)
// MakeBook will combine all scores into a single songbook
// generated using LaTeX.
func MakeBook(path string, opts *settings.Settings) {
// Everything needs to be compiled
CompileDir(path, opts)
// Sort scores
sort.Sort(settings.ScoresByName{scores})
templ, err := parseBookTemplate(opts)
texPath := filepath.Join(opts.OutputDir, "songbook.tex")
log.WithFields(log.Fields{
"path": texPath,
}).Info("compiling songbook template")
f, err := os.Create(texPath)
helpers.Check(err, "could not create songbook texfile")
err = templ.Execute(f, &struct {
Scores *[]settings.Score
Settings *settings.Settings
Categories []string
}{
Scores: &scores,
Settings: opts,
Categories: scoreCategories(&scores),
})
helpers.Check(err, "error executing book template")
f.Close()
// TODO: Better error messages when there is an error compiling latex
cmd := exec.Command("latexmk", "-silent", "-pdf", "-cd", texPath)
out, err := cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Fatal("songbook failed to compile")
}
cmd = exec.Command("latexmk", "-c", "-cd", texPath)
out, err = cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Error("failed to clean songbook latex files")
}
// TODO: Make optional by flag
err = os.Remove(texPath)
helpers.Check(err, "could not remove songbook latex template")
}
// scoreCategories returns a sorted slice of all categories used
// in the given slice of scores
func scoreCategories(scores *[]settings.Score) []string {
catMap := make(map[string]struct{})
for i := range *scores {
for _, cat := range (*scores)[i].Categories {
catMap[cat] = struct{}{}
}
}
categories := make([]string, 0, len(catMap))
for cat := range catMap {
categories = append(categories, cat)
}
sort.Strings(categories)
return categories
}
// unknownCategories returns true if the slice contains any scores with
// unknown categories
func unknownCategories(scores *[]settings.Score) bool {
for i := range *scores {
if len((*scores)[i].Categories) == 0 {
return true
}
}
return false
}
Use Project name for the songbook
// Copyright © 2016 Jip J. Dekker <jip@dekker.li>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"os"
"os/exec"
"path/filepath"
"sort"
log "github.com/Sirupsen/logrus"
"github.com/jjdekker/ponder/helpers"
"github.com/jjdekker/ponder/settings"
)
// MakeBook will combine all scores into a single songbook
// generated using LaTeX.
func MakeBook(path string, opts *settings.Settings) {
// Everything needs to be compiled
CompileDir(path, opts)
// Sort scores
sort.Sort(settings.ScoresByName{scores})
templ, err := parseBookTemplate(opts)
texPath := filepath.Join(opts.OutputDir, opts.Name+".tex")
log.WithFields(log.Fields{
"path": texPath,
}).Info("compiling songbook template")
f, err := os.Create(texPath)
helpers.Check(err, "could not create songbook texfile")
err = templ.Execute(f, &struct {
Scores *[]settings.Score
Settings *settings.Settings
Categories []string
}{
Scores: &scores,
Settings: opts,
Categories: scoreCategories(&scores),
})
helpers.Check(err, "error executing book template")
f.Close()
// TODO: Better error messages when there is an error compiling latex
cmd := exec.Command("latexmk", "-silent", "-pdf", "-cd", texPath)
out, err := cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Fatal("songbook failed to compile")
}
cmd = exec.Command("latexmk", "-c", "-cd", texPath)
out, err = cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Error("failed to clean songbook latex files")
}
// TODO: Make optional by flag
err = os.Remove(texPath)
helpers.Check(err, "could not remove songbook latex template")
}
// scoreCategories returns a sorted slice of all categories used
// in the given slice of scores
func scoreCategories(scores *[]settings.Score) []string {
catMap := make(map[string]struct{})
for i := range *scores {
for _, cat := range (*scores)[i].Categories {
catMap[cat] = struct{}{}
}
}
categories := make([]string, 0, len(catMap))
for cat := range catMap {
categories = append(categories, cat)
}
sort.Strings(categories)
return categories
}
// unknownCategories returns true if the slice contains any scores with
// unknown categories
func unknownCategories(scores *[]settings.Score) bool {
for i := range *scores {
if len((*scores)[i].Categories) == 0 {
return true
}
}
return false
}
|
package main
import (
"exp/draw/x11"
"exp/draw"
"image"
"fmt"
"math"
"rand"
"time"
"rog-go.googlecode.com/hg/canvas"
)
// to add:
// Rectangle.Eq()
// modifications for mac os X11
// should it crash if Draw is passed a non-canonical rectangle?
// it's a pity that image.RGBAColor isn't a draw.Color
type RectFlusherContext interface {
draw.Context
FlushImageRect(r draw.Rectangle)
}
type line struct {
obj *canvas.Line
p0, p1 draw.Point
}
type realPoint struct {
x, y float64
}
type ball struct {
p realPoint
v realPoint
col draw.Color
}
type lineList struct {
line line
next *lineList
}
func flushFunc(ctxt draw.Context) func(r draw.Rectangle) {
if fctxt, ok := ctxt.(RectFlusherContext); ok {
return func(r draw.Rectangle) {
fctxt.FlushImageRect(r)
}
}
return func(_ draw.Rectangle) {
ctxt.FlushImage()
}
}
var currtime int64
const updateTime = 0.01e9
var window *canvas.Canvas
var lines *lineList
var lineVersion int
const ballSize = 8
func main() {
rand.Seed(0)
ctxt, err := x11.NewWindow()
if ctxt == nil {
fmt.Printf("no window: %v\n", err)
return
}
screen := ctxt.Screen()
window = canvas.NewCanvas(screen.(*image.RGBA), draw.White, flushFunc(ctxt))
nballs := 1
ctxt.FlushImage()
csz := draw.Pt(window.Width(), window.Height())
// add edges of window
addLine(draw.Pt(-1, -1), draw.Pt(csz.X, -1))
addLine(draw.Pt(csz.X, -1), draw.Pt(csz.X, csz.Y))
addLine(draw.Pt(csz.X, csz.Y), draw.Pt(-1, csz.Y))
addLine(draw.Pt(-1, csz.Y), draw.Pt(-1, -1))
lineMouse := make(chan draw.Mouse)
go lineMaker(lineMouse)
mkball := make(chan ball)
delball := make(chan bool)
go monitor(mkball, delball)
for i := 0; i < nballs; i++ {
mkball <- randBall()
}
prevButtons := 0
mc := ctxt.MouseChan()
qc := ctxt.QuitChan()
for {
select {
case <-qc:
fmt.Printf("quitting\n")
return
case m := <-mc:
switch {
case m.Buttons&4 != 0:
return
case m.Buttons&2 == 0 && prevButtons&2 != 0:
// button 2 release - make a new ball
mkball <- randBall()
fallthrough
default:
lineMouse <- m
}
prevButtons = m.Buttons
}
}
}
func randBall() ball {
csz := draw.Point{window.Width(), window.Height()}
return ball{randPoint(csz), makeUnit(randPoint(csz)), randColour()}
}
func randPoint(size draw.Point) realPoint {
return realPoint{
rand.Float64() * float64(size.X-1),
rand.Float64() * float64(size.Y-1),
}
}
func randColour() (c draw.Color) {
return draw.Color(uint32(rand.Int63()<<8) | 0x808080ff)
}
func addLine(p0, p1 draw.Point) *line {
obj := window.Line(image.Black, p0, p1, 3)
ln := line{obj, p0, p1}
lines = &lineList{ln, lines}
lineVersion++
return &lines.line
}
func (p realPoint) point() draw.Point {
return draw.Point{round(p.x), round(p.y)}
}
func lineMaker(mc <-chan draw.Mouse) {
for {
m := <-mc
if m.Buttons&1 == 0 {
continue
}
p0 := m.Point
ln := addLine(p0, p0)
for m.Buttons&1 != 0 {
m = <-mc
ln.obj.Move(p0, m.Point)
ln.p1 = m.Point
lineVersion++
window.Flush()
}
}
}
func nullproc(c chan bool) {
for <-c {
c <- true
}
}
func monitor(mkball <-chan ball, delball chan bool) {
type procList struct {
c chan bool
next *procList
}
procl := &procList{make(chan bool), nil}
proc := procl
go nullproc(procl.c) // always there to avoid deadlock when no balls.
procl.c <- true // hand token to dummy proc
for {
select {
case b := <-mkball:
procl = &procList{make(chan bool), procl}
go animateBall(procl.c, b)
case <-proc.c:
if proc = proc.next; proc == nil {
proc = procl
}
proc.c <- true
}
}
}
type Ball struct {
*canvas.Image
}
func makeBall(b ball) Ball {
img := canvas.Box(ballSize, ballSize, b.col, 1, image.Black)
p := b.p.point().Sub(draw.Pt(ballSize/2, ballSize/2))
return Ball{window.Image(img, p)}
}
func (obj *Ball) Move(p realPoint) {
bp := draw.Point{round(p.x), round(p.y)}.Sub(draw.Pt(ballSize/2, ballSize/2))
obj.Image.Move(bp)
}
const large = 1000000
func animateBall(c chan bool, b ball) {
speed := 0.1e-6 + rand.Float64()*0.4e-6
obj := makeBall(b)
var hitline line
smallcount := 0
version := lineVersion
loop:
for {
var hitp realPoint
dist := float64(large)
oldline := hitline
for l := lines; l != nil; l = l.next {
ln := l.line
ok, hp, hdist := intersect(b.p, b.v, ln)
if ok && hdist < dist && ln.obj != oldline.obj && (smallcount < 10 || hdist > 1.5) {
hitp, hitline, dist = hp, ln, hdist
}
}
if dist == large {
fmt.Printf("no intersection!\n")
obj.Delete()
for <-c {
c <- true
}
}
if dist < 1e-4 {
smallcount++
} else {
smallcount = 0
}
bouncev := boing(b.v, hitline)
t0 := time.Nanoseconds()
dt := int64(dist / speed)
t := int64(0)
for {
s := float64(t) * speed
currp := realPoint{b.p.x + s*b.v.x, b.p.y + s*b.v.y}
obj.Move(currp)
window.Flush()
if lineVersion > version {
b.p, hitline, version = currp, oldline, lineVersion
continue loop
}
// pass the token back to the monitor
if !<-c {
obj.Delete()
window.Flush()
return
}
c <- true
t = time.Nanoseconds() - t0
if t >= dt {
break
}
}
b.p = hitp
b.v = bouncev
}
}
// makeUnit makes a vector of unit-length parallel to v.
func makeUnit(v realPoint) realPoint {
mag := math.Sqrt(v.x*v.x + v.y*v.y)
return realPoint{v.x / mag, v.y / mag}
}
// bounce ball travelling in direction av off line b.
// return the new unit vector.
func boing(av realPoint, ln line) realPoint {
f := ln.p1.Sub(ln.p0)
d := math.Atan2(float64(f.Y), float64(f.X))*2 - math.Atan2(av.y, av.x)
p := realPoint{math.Cos(d), math.Sin(d)}
return p
}
// compute the intersection of lines a and b.
// b is assumed to be fixed, and a is indefinitely long
// but doesn't extend backwards from its starting point.
// a is defined by the starting point p and the unit vector v.
func intersect(p, v realPoint, b line) (ok bool, pt realPoint, dist float64) {
const zero = 1e-6
w := realPoint{float64(b.p1.X - b.p0.X), float64(b.p1.Y - b.p0.Y)}
det := w.x*v.y - v.x*w.y
if det > -zero && det < zero {
return
}
y21 := float64(b.p0.Y) - p.y
x21 := float64(b.p0.X) - p.x
dist = (w.x*y21 - w.y*x21) / det
if dist < 0.0 {
return
}
pt = realPoint{p.x + v.x*dist, p.y + v.y*dist}
if b.p0.X > b.p1.X {
b.p0.X, b.p1.X = b.p1.X, b.p0.X
}
if b.p0.Y > b.p1.Y {
b.p0.Y, b.p1.Y = b.p1.Y, b.p0.Y
}
ok = round(pt.x) >= b.p0.X &&
round(pt.x) <= b.p1.X &&
round(pt.y) >= b.p0.Y &&
round(pt.y) <= b.p1.Y
return
}
func round(x float64) int {
if x < 0 {
x -= 0.5
} else {
x += 0.5
}
return int(x)
}
revamp mouse handling. now depends on timestamp being delivered in mouse events.
button 2 starts balls at indicated position with given velocity.
package main
import (
"exp/draw/x11"
"exp/draw"
"image"
"fmt"
"math"
"rand"
"time"
"rog-go.googlecode.com/hg/canvas"
)
// to add:
// Rectangle.Eq()
// modifications for mac os X11
// should it crash if Draw is passed a non-canonical rectangle?
// it's a pity that image.RGBAColor isn't a draw.Color
type RectFlusherContext interface {
draw.Context
FlushImageRect(r draw.Rectangle)
}
type line struct {
obj *canvas.Line
p0, p1 draw.Point
}
type realPoint struct {
x, y float64
}
type ball struct {
p realPoint
v realPoint
col draw.Color
}
type lineList struct {
line line
next *lineList
}
func flushFunc(ctxt draw.Context) func(r draw.Rectangle) {
if fctxt, ok := ctxt.(RectFlusherContext); ok {
return func(r draw.Rectangle) {
fctxt.FlushImageRect(r)
}
}
return func(_ draw.Rectangle) {
ctxt.FlushImage()
}
}
var currtime int64
const updateTime = 0.01e9
var window *canvas.Canvas
var lines *lineList
var lineVersion int
const ballSize = 8
func main() {
rand.Seed(0)
ctxt, err := x11.NewWindow()
if ctxt == nil {
fmt.Printf("no window: %v\n", err)
return
}
screen := ctxt.Screen()
window = canvas.NewCanvas(screen.(*image.RGBA), draw.White, flushFunc(ctxt))
nballs := 1
ctxt.FlushImage()
csz := draw.Pt(window.Width(), window.Height())
// add edges of window
addLine(draw.Pt(-1, -1), draw.Pt(csz.X, -1))
addLine(draw.Pt(csz.X, -1), draw.Pt(csz.X, csz.Y))
addLine(draw.Pt(csz.X, csz.Y), draw.Pt(-1, csz.Y))
addLine(draw.Pt(-1, csz.Y), draw.Pt(-1, -1))
mkball := make(chan ball)
delball := make(chan bool)
go monitor(mkball, delball)
for i := 0; i < nballs; i++ {
mkball <- randBall()
}
mc := ctxt.MouseChan()
mcc := make(chan (<-chan draw.Mouse))
qc := ctxt.QuitChan()
for {
select {
case <-qc:
fmt.Printf("quitting\n")
return
case m := <-mc:
switch {
case m.Buttons&4 != 0:
return
case m.Buttons&1 != 0:
go handleMouse(m, mc, mcc, lineMaker)
mc = nil
case m.Buttons&2 != 0:
go handleMouse(m, mc, mcc, func(m draw.Mouse, mc <-chan draw.Mouse){
ballMaker(m, mc, mkball)
})
mc = nil
}
case mc = <-mcc:
break
}
}
}
// Start a modal loop to handle mouse events, running f.
// f is passed the mouse event that caused the modal loop
// to be started, and the mouse channel.
// When f finishes, the mouse channel is handed back
// on mcc.
func handleMouse(m draw.Mouse,
mc <-chan draw.Mouse,
mcc chan (<-chan draw.Mouse),
f func(first draw.Mouse, mc <-chan draw.Mouse)) {
defer func() {
mcc <- mc
}()
f(m, mc)
}
func randBall() ball {
csz := draw.Point{window.Width(), window.Height()}
var b ball
b.p = randPoint(csz)
b.v.x = rand.Float64() - 1
b.v.y = rand.Float64() - 1
if b.v.x == 0 && b.v.y == 0 {
panic("did that really happen?!")
}
b.v, _ = makeUnit(b.v)
speed := 0.1e-6 + rand.Float64()*0.4e-6
b.v.x *= speed
b.v.y *= speed
b.col = randColour()
return b
}
func randPoint(size draw.Point) realPoint {
return realPoint{
rand.Float64() * float64(size.X-1),
rand.Float64() * float64(size.Y-1),
}
}
func randColour() (c draw.Color) {
return draw.Color(uint32(rand.Int63()<<8) | 0x808080ff)
}
func addLine(p0, p1 draw.Point) *line {
obj := window.Line(image.Black, p0, p1, 3)
ln := line{obj, p0, p1}
lines = &lineList{ln, lines}
lineVersion++
return &lines.line
}
func (p realPoint) point() draw.Point {
return draw.Point{round(p.x), round(p.y)}
}
func lineMaker(m draw.Mouse, mc <-chan draw.Mouse) {
p0 := m.Point
ln := addLine(p0, p0)
for m.Buttons&1 != 0 {
m = <-mc
ln.obj.Move(p0, m.Point)
ln.p1 = m.Point
lineVersion++
window.Flush()
}
}
func abs(x float64) float64 {
if x < 0 {
return -x
}
return x
}
func ballMaker(m draw.Mouse, mc <-chan draw.Mouse, mkball chan<-ball){
const sampleTime = 0.25e9
var vecs [8]realPoint // approx sampleTime's worth of velocities
i := 0
n := 0
m0 := m
m1 := m
for {
m1 = <-mc
dt := m1.Nsec - m.Nsec
if dt >= sampleTime/int64(len(vecs)) || m.Buttons&2 == 0{
delta := draw2realPoint(m1.Sub(m.Point))
vecs[i].x = delta.x / float64(dt)
vecs[i].y = delta.y / float64(dt)
i = (i + 1) % len(vecs)
if n < len(vecs) {
n++
}
m = m1
}
if m.Buttons&2 == 0 {
break
}
}
var avg realPoint
for _, v := range vecs {
avg.x += v.x
avg.y += v.y
}
avg.x /= float64(n)
avg.y /= float64(n)
var b ball
speed := math.Sqrt(avg.x*avg.x + avg.y*avg.y) // in pixels/ns
if speed < 3e-9 {
// a click with no drag starts a ball with random velocity.
b = randBall()
b.p = draw2realPoint(m0.Point)
}else{
v, _ := makeUnit(draw2realPoint(m1.Sub(m0.Point)))
v.x *= speed
v.y *= speed
b = ball{
realPoint{float64(m0.X), float64(m0.Y)},
v,
randColour(),
}
}
mkball <- b
}
func draw2realPoint(p draw.Point) realPoint {
return realPoint{float64(p.X), float64(p.Y)}
}
func nullproc(c chan bool) {
for <-c {
c <- true
}
}
func monitor(mkball <-chan ball, delball chan bool) {
type procList struct {
c chan bool
next *procList
}
procl := &procList{make(chan bool), nil}
proc := procl
go nullproc(procl.c) // always there to avoid deadlock when no balls.
procl.c <- true // hand token to dummy proc
for {
select {
case b := <-mkball:
procl = &procList{make(chan bool), procl}
go animateBall(procl.c, b)
case <-proc.c:
if proc = proc.next; proc == nil {
proc = procl
}
proc.c <- true
}
}
}
type Ball struct {
*canvas.Image
}
func makeBall(b ball) Ball {
img := canvas.Box(ballSize, ballSize, b.col, 1, image.Black)
p := b.p.point().Sub(draw.Pt(ballSize/2, ballSize/2))
return Ball{window.Image(img, p)}
}
func (obj *Ball) Move(p realPoint) {
bp := draw.Point{round(p.x), round(p.y)}.Sub(draw.Pt(ballSize/2, ballSize/2))
obj.Image.Move(bp)
}
const large = 1000000
func animateBall(c chan bool, b ball) {
var speed float64
b.v, speed = makeUnit(b.v)
obj := makeBall(b)
var hitline line
smallcount := 0
version := lineVersion
loop:
for {
var hitp realPoint
dist := float64(large)
oldline := hitline
for l := lines; l != nil; l = l.next {
ln := l.line
ok, hp, hdist := intersect(b.p, b.v, ln)
if ok && hdist < dist && ln.obj != oldline.obj && (smallcount < 10 || hdist > 1.5) {
hitp, hitline, dist = hp, ln, hdist
}
}
if dist == large {
fmt.Printf("no intersection!\n")
obj.Delete()
for <-c {
c <- true
}
}
if dist < 1e-4 {
smallcount++
} else {
smallcount = 0
}
bouncev := boing(b.v, hitline)
t0 := time.Nanoseconds()
dt := int64(dist / speed)
t := int64(0)
for {
s := float64(t) * speed
currp := realPoint{b.p.x + s*b.v.x, b.p.y + s*b.v.y}
obj.Move(currp)
window.Flush()
if lineVersion > version {
b.p, hitline, version = currp, oldline, lineVersion
continue loop
}
// pass the token back to the monitor
if !<-c {
obj.Delete()
window.Flush()
return
}
c <- true
t = time.Nanoseconds() - t0
if t >= dt {
break
}
}
b.p = hitp
b.v = bouncev
}
}
// makeUnit makes a vector of unit-length parallel to v.
func makeUnit(v realPoint) (realPoint, float64) {
mag := math.Sqrt(v.x*v.x + v.y*v.y)
return realPoint{v.x / mag, v.y / mag}, mag
}
// bounce ball travelling in direction av off line b.
// return the new unit vector.
func boing(av realPoint, ln line) realPoint {
f := ln.p1.Sub(ln.p0)
d := math.Atan2(float64(f.Y), float64(f.X))*2 - math.Atan2(av.y, av.x)
p := realPoint{math.Cos(d), math.Sin(d)}
return p
}
// compute the intersection of lines a and b.
// b is assumed to be fixed, and a is indefinitely long
// but doesn't extend backwards from its starting point.
// a is defined by the starting point p and the unit vector v.
func intersect(p, v realPoint, b line) (ok bool, pt realPoint, dist float64) {
const zero = 1e-6
w := realPoint{float64(b.p1.X - b.p0.X), float64(b.p1.Y - b.p0.Y)}
det := w.x*v.y - v.x*w.y
if det > -zero && det < zero {
return
}
y21 := float64(b.p0.Y) - p.y
x21 := float64(b.p0.X) - p.x
dist = (w.x*y21 - w.y*x21) / det
if dist < 0.0 {
return
}
pt = realPoint{p.x + v.x*dist, p.y + v.y*dist}
if b.p0.X > b.p1.X {
b.p0.X, b.p1.X = b.p1.X, b.p0.X
}
if b.p0.Y > b.p1.Y {
b.p0.Y, b.p1.Y = b.p1.Y, b.p0.Y
}
ok = round(pt.x) >= b.p0.X &&
round(pt.x) <= b.p1.X &&
round(pt.y) >= b.p0.Y &&
round(pt.y) <= b.p1.Y
return
}
func round(x float64) int {
if x < 0 {
x -= 0.5
} else {
x += 0.5
}
return int(x)
}
|
package broker
import (
"encoding/json"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/nats-io/go-nats"
"github.com/pkg/errors"
"github.com/pborman/uuid"
"time"
"fmt"
)
type jsonError struct {
Message string `json:"message"`
}
type Broker struct {
Logger logrus.Logger
N *nats.Conn
Version string
Timeout time.Duration
}
type Container struct {
ID string `json:"i"`
Version string `json:"v"`
RequestID string `json:"r"`
Status int `json:"s"`
Payload interface{} `json:"p"`
}
type stackTracer interface {
StackTrace() errors.StackTrace
}
func (h *Broker) RID(r *http.Request) string {
return r.Header.Get("X-REQUEST-ID")
}
func (h *Broker) GetTimeout() time.Duration {
if h.Timeout == 0 {
return time.Second * 5
}
return h.Timeout
}
func (h *Broker) GetVersion() string {
if h.Version == "" {
return "0.0.0"
}
return h.Version
}
func (h *Broker) Reply(m *nats.Msg, rid string, e interface{}) {
h.WriteCode(m.Reply, rid, http.StatusOK, e)
}
func (h *Broker) WriteCode(message string, rid string, code int, e interface{}) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Status: code,
Payload: e,
RequestID: rid,
})
if err != nil {
h.WriteErrorCode(message, rid, http.StatusInternalServerError, errors.Wrap(err, "Could not marshal container"))
}
if err := h.N.Publish(message, p); err != nil {
h.Logger.WithError(err).WithField("request", rid).Errorln("Message can not be published.")
}
}
func (h *Broker) Parse(m *nats.Msg, e interface{}) (*Container, error) {
var c = &Container{Payload: e}
if err := json.Unmarshal(m.Data, c); err != nil {
return c, errors.Wrap(err, "Could not unmarshal message container")
}
if c.Status < 200 || c.Status >= 300 {
var e jsonError
if err := json.Unmarshal(m.Data, &e); err != nil {
return c, errors.Wrap(err, "Could not unmarshal message error")
}
return c, errors.Errorf("An error (code: %d) occurred on the other side: ", c.Status, e.Message)
}
return c, nil
}
func (h *Broker) Request(message string, rid string, in, out interface{}) (*Container, error) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Payload: in,
Status: http.StatusOK,
RequestID: rid,
})
if err != nil {
return nil, errors.Wrap(err, "")
}
rep, err := h.N.Request(message, p, h.GetTimeout())
if err != nil {
return nil, errors.Wrap(err, "")
}
return h.Parse(rep, out)
}
func (h *Broker) Publish(message string, rid string, in interface{}) (error) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Payload: in,
Status: http.StatusOK,
RequestID: rid,
})
if err != nil {
return errors.Wrap(err, "")
}
if err := h.N.Publish(message, p); err != nil {
return errors.Wrap(err, "")
}
return nil
}
func (h *Broker) WriteErrorCode(message string, rid string, code int, err error) {
if code == 0 {
code = http.StatusInternalServerError
}
var stack = "not available"
if e, ok := err.(stackTracer); ok {
stack = fmt.Sprintf("%+v", e.StackTrace())
} else if e, ok := errors.Cause(err).(stackTracer); ok {
stack = fmt.Sprintf("%+v", e.StackTrace())
}
h.Logger.WithError(err).WithField("request", rid).WithField("stack", stack).Errorln("An error occurred while sending the response.")
h.WriteCode(
message,
rid,
code,
&jsonError{
Message: err.Error(),
},
)
}
broker updates
package broker
import (
"encoding/json"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/nats-io/go-nats"
"github.com/pkg/errors"
"github.com/pborman/uuid"
"time"
"fmt"
)
type jsonError struct {
Message string `json:"message"`
}
type Broker struct {
Logger *logrus.Logger
N *nats.Conn
Version string
Timeout time.Duration
}
func New(n *nats.Conn, version string) *Broker {
return &Broker{
Logger: logrus.New(),
Version: version,
N: n,
Timeout: time.Second * 5,
}
}
type Container struct {
ID string `json:"i"`
Version string `json:"v"`
RequestID string `json:"r"`
Status int `json:"s"`
Payload interface{} `json:"p"`
}
type stackTracer interface {
StackTrace() errors.StackTrace
}
func (h *Broker) RID(r *http.Request) string {
return r.Header.Get("X-REQUEST-ID")
}
func (h *Broker) GetTimeout() time.Duration {
if h.Timeout == 0 {
return time.Second * 5
}
return h.Timeout
}
func (h *Broker) GetVersion() string {
if h.Version == "" {
return "0.0.0"
}
return h.Version
}
func (h *Broker) Reply(m *nats.Msg, rid string, e interface{}) {
h.WriteCode(m.Reply, rid, http.StatusOK, e)
}
func (h *Broker) WriteCode(message string, rid string, code int, e interface{}) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Status: code,
Payload: e,
RequestID: rid,
})
if err != nil {
h.WriteErrorCode(message, rid, http.StatusInternalServerError, errors.Wrap(err, "Could not marshal container"))
}
if err := h.N.Publish(message, p); err != nil {
h.Logger.WithError(err).WithField("request", rid).Errorln("Message can not be published.")
}
}
func (h *Broker) Parse(m *nats.Msg, e interface{}) (*Container, error) {
var c = &Container{}
if err := json.Unmarshal(m.Data, c); err != nil {
return c, errors.Wrap(err, "Could not unmarshal message container")
}
if c.Status < 200 || c.Status >= 300 {
var e jsonError
c = &Container{Payload: &e}
if err := json.Unmarshal(m.Data, c); err != nil {
return c, errors.Wrap(err, "Could not unmarshal message error")
}
return c, errors.Errorf("An error code (%d) occurred on the other side: %s", c.Status, e.Message)
}
c = &Container{Payload: e}
if err := json.Unmarshal(m.Data, c); err != nil {
return c, errors.Wrap(err, "Could not unmarshal message container")
}
return c, nil
}
func (h *Broker) Request(message string, rid string, in, out interface{}) (*Container, error) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Payload: in,
Status: http.StatusOK,
RequestID: rid,
})
if err != nil {
return nil, errors.Wrap(err, "")
}
rep, err := h.N.Request(message, p, h.GetTimeout())
if err != nil {
return nil, errors.Wrap(err, "")
}
return h.Parse(rep, out)
}
func (h *Broker) Publish(message string, rid string, in interface{}) (error) {
p, err := json.Marshal(&Container{
ID: uuid.New(),
Version: h.Version,
Payload: in,
Status: http.StatusOK,
RequestID: rid,
})
if err != nil {
return errors.Wrap(err, "")
}
if err := h.N.Publish(message, p); err != nil {
return errors.Wrap(err, "")
}
return nil
}
func (h *Broker) MessageLogger(f func (m *nats.Msg)) func (m *nats.Msg) {
return func (m *nats.Msg) {
c, _ := h.Parse(m, nil)
logrus.WithField("id", c.ID).WithField("request", c.RequestID).WithField("subject", m.Subject).Info("Received message")
f(m)
logrus.WithField("id", c.ID).WithField("request", c.RequestID).WithField("subject", m.Subject).Info("Handled message")
}
}
func (h *Broker) WriteErrorCode(message string, rid string, code int, err error) {
if code == 0 {
code = http.StatusInternalServerError
}
var stack = "not available"
if e, ok := err.(stackTracer); ok {
stack = fmt.Sprintf("%+v", e.StackTrace())
} else if e, ok := errors.Cause(err).(stackTracer); ok {
stack = fmt.Sprintf("%+v", e.StackTrace())
}
h.Logger.WithError(err).WithField("request", rid).WithField("stack", stack).Errorln("An error occurred while sending the response.")
h.WriteCode(
message,
rid,
code,
&jsonError{
Message: err.Error(),
},
)
}
|
// Package listener implements the listener pool and handlers for metrics.
package listener
import (
"bufio"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/jeffpierce/cassabon/config"
"github.com/jeffpierce/cassabon/logging"
)
// CarbonMetric is the canonical representation of Carbon data.
type CarbonMetric struct {
Path string // Metric path
Value float64 // Metric Value
Timestamp float64 // Epoch timestamp
}
// CarbonTCP listens for incoming Carbon TCP traffic and dispatches it.
func CarbonTCP(addr string, port string) {
// Resolve the address:port, and start listening for TCP connections.
tcpaddr, _ := net.ResolveTCPAddr("tcp4", net.JoinHostPort(addr, port))
tcpListener, err := net.ListenTCP("tcp4", tcpaddr)
if err != nil {
// If we can't grab a port, we can't do our job. Log, whine, and crash.
config.G.Log.System.LogFatal("Cannot listen for Carbon on TCP address %s: %v", tcpListener.Addr().String(), err)
os.Exit(3)
}
defer tcpListener.Close()
config.G.Log.System.LogInfo("Listening on %s TCP for Carbon plaintext protocol", tcpListener.Addr().String())
// Start listener and pass incoming connections to handler.
for {
select {
case <-config.G.Quit:
config.G.Log.System.LogInfo("CarbonTCP received QUIT message")
config.G.WG.Done()
return
default:
// On receipt of a connection, spawn a goroutine to handle it.
tcpListener.SetDeadline(time.Now().Add(5 * time.Second))
if conn, err := tcpListener.Accept(); err == nil {
go getTCPData(conn)
} else {
if err.(net.Error).Timeout() {
config.G.Log.System.LogDebug("CarbonTCP Accept() timed out")
} else {
config.G.Log.System.LogWarn("CarbonTCP Accept() error: %v", err)
}
}
}
}
}
// getTCPData reads a line from a TCP connection and dispatches it.
func getTCPData(conn net.Conn) {
// Carbon metrics are terminated by newlines. Read line-by-line, and dispatch.
defer conn.Close()
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
metricHandler(scanner.Text())
}
config.G.Log.Carbon.LogDebug("Returning from getTCPData")
}
// CarbonUDP listens for incoming Carbon UDP traffic and dispatches it.
func CarbonUDP(addr string, port string) {
// Resolve the address:port, and start listening for UDP connections.
udpaddr, _ := net.ResolveUDPAddr("udp4", net.JoinHostPort(addr, port))
udpConn, err := net.ListenUDP("udp", udpaddr)
if err != nil {
// If we can't grab a port, we can't do our job. Log, whine, and crash.
config.G.Log.System.LogFatal("Cannot listen for Carbon on UDP address %s: %v", udpConn.LocalAddr().String(), err)
os.Exit(3)
}
defer udpConn.Close()
config.G.Log.System.LogInfo("Listening on %s UDP for Carbon plaintext protocol", udpConn.LocalAddr().String())
// Start reading UDP packets and pass data to handler.
buf := make([]byte, 150)
for {
select {
case <-config.G.Quit:
config.G.Log.System.LogInfo("CarbonUDP received QUIT message")
config.G.WG.Done()
return
default:
udpConn.SetDeadline(time.Now().Add(5 * time.Second))
_, _, err := udpConn.ReadFromUDP(buf)
if err == nil {
go getUDPData(string(buf))
} else {
if err.(net.Error).Timeout() {
config.G.Log.System.LogDebug("CarbonUDP Read() timed out")
} else {
config.G.Log.System.LogDebug("CarbonUDP Read() error: %v", err)
}
}
}
}
}
// getUDPData scans data received from a UDP connection and dispatches it.
func getUDPData(buf string) {
// Carbon metrics are terminated by newlines. Read line-by-line, and dispatch.
scanner := bufio.NewScanner(strings.NewReader(buf))
for scanner.Scan() {
// Scanner returns nulls remaining in fixed-size buffer at end-of-data; skip them.
if scanner.Bytes()[0] != byte(0) {
metricHandler(scanner.Text())
}
}
config.G.Log.Carbon.LogDebug("Returning from getUDPData")
}
// metricHandler reads, parses, and sends on a Carbon data packet.
func metricHandler(line string) {
// Examine metric to ensure that it's a valid carbon metric triplet.
splitMetric := strings.Fields(line)
if len(splitMetric) != 3 {
// Log this as a Warn, because it's the client's error, not ours.
config.G.Log.Carbon.LogWarn("Malformed metric, expected 3 fields, found %d: \"%s\"", len(splitMetric), line)
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Pull out the first field from the triplet.
statPath := splitMetric[0]
// Pull out and validate the second field from the triplet.
val, err := strconv.ParseFloat(splitMetric[1], 64)
if err != nil {
config.G.Log.Carbon.LogWarn("Malformed metric, cannnot parse value as float: \"%s\"", splitMetric[1])
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Pull out and validate the third field from the triplet.
ts, err := strconv.ParseFloat(splitMetric[2], 64)
if err != nil {
config.G.Log.Carbon.LogWarn("Malformed metric, cannnot parse timestamp as float: \"%s\"", splitMetric[2])
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Assemble into canonical struct and send to enqueueing worker.
parsedMetric := CarbonMetric{statPath, val, ts}
config.G.Log.Carbon.LogDebug("Woohoo! Pushing metric into channel: %v", parsedMetric)
logging.Statsd.Client.Inc("cassabon.carbon.received.success", 1, 1.0)
}
Reassemble metrics lines that span buffer boundaries.
// Package listener implements the listener pool and handlers for metrics.
package listener
import (
"bufio"
"bytes"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/jeffpierce/cassabon/config"
"github.com/jeffpierce/cassabon/logging"
)
// CarbonMetric is the canonical representation of Carbon data.
type CarbonMetric struct {
Path string // Metric path
Value float64 // Metric Value
Timestamp float64 // Epoch timestamp
}
// CarbonTCP listens for incoming Carbon TCP traffic and dispatches it.
func CarbonTCP(addr string, port string) {
// Resolve the address:port, and start listening for TCP connections.
tcpaddr, _ := net.ResolveTCPAddr("tcp4", net.JoinHostPort(addr, port))
tcpListener, err := net.ListenTCP("tcp4", tcpaddr)
if err != nil {
// If we can't grab a port, we can't do our job. Log, whine, and crash.
config.G.Log.System.LogFatal("Cannot listen for Carbon on TCP address %s: %v", tcpListener.Addr().String(), err)
os.Exit(3)
}
defer tcpListener.Close()
config.G.Log.System.LogInfo("Listening on %s TCP for Carbon plaintext protocol", tcpListener.Addr().String())
// Start listener and pass incoming connections to handler.
for {
select {
case <-config.G.Quit:
config.G.Log.System.LogInfo("CarbonTCP received QUIT message")
config.G.WG.Done()
return
default:
// On receipt of a connection, spawn a goroutine to handle it.
tcpListener.SetDeadline(time.Now().Add(5 * time.Second))
if conn, err := tcpListener.Accept(); err == nil {
go getTCPData(conn)
} else {
if err.(net.Error).Timeout() {
config.G.Log.System.LogDebug("CarbonTCP Accept() timed out")
} else {
config.G.Log.System.LogWarn("CarbonTCP Accept() error: %v", err)
}
}
}
}
}
// getTCPData reads a line from a TCP connection and dispatches it.
func getTCPData(conn net.Conn) {
// Carbon metrics are terminated by newlines. Read line-by-line, and dispatch.
defer conn.Close()
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
metricHandler(scanner.Text())
}
config.G.Log.Carbon.LogDebug("Returning from getTCPData")
}
// CarbonUDP listens for incoming Carbon UDP traffic and dispatches it.
func CarbonUDP(addr string, port string) {
// Resolve the address:port, and start listening for UDP connections.
udpaddr, _ := net.ResolveUDPAddr("udp4", net.JoinHostPort(addr, port))
udpConn, err := net.ListenUDP("udp", udpaddr)
if err != nil {
// If we can't grab a port, we can't do our job. Log, whine, and crash.
config.G.Log.System.LogFatal("Cannot listen for Carbon on UDP address %s: %v", udpConn.LocalAddr().String(), err)
os.Exit(3)
}
defer udpConn.Close()
config.G.Log.System.LogInfo("Listening on %s UDP for Carbon plaintext protocol", udpConn.LocalAddr().String())
/* Read UDP packets and pass data to handler.
*
* Individual metrics lines may be spread across packet boundaries. This means that
* we must avoid dispatching partial lines, because they will ilikely be invalid,
* and certainly wrong.
*
* To resolve this, we only dispatch the part of the buffer up to the last newline,
* and save the remainder for prepending to the next incoming buffer.
*/
line := "" // The (possibly concatenated) line to be dispatched
buf := make([]byte, 4096) // The buffer into which UDP messages will be read
remBuf := make([]byte, 4096) // The buffer into which data following last newline will be copied
remBytes := 0 // The number of data bytes in remBuf
for {
select {
case <-config.G.Quit:
config.G.Log.System.LogInfo("CarbonUDP received QUIT message")
config.G.WG.Done()
return
default:
udpConn.SetDeadline(time.Now().Add(5 * time.Second))
bytesRead, _, err := udpConn.ReadFromUDP(buf)
if err == nil {
// Capture the position of the last newline in the input buffer.
lastNewline := bytes.LastIndex(buf[:bytesRead], []byte("\n"))
if remBytes > 0 {
// Concatenate previous remainder and current input.
line = string(append(remBuf[:remBytes], buf[:lastNewline]...))
} else {
// Use current input up to last newline present.
line = string(buf[:lastNewline])
}
// Is there a truncated metric in the current input buffer?
if lastNewline < bytesRead-1 {
// Save the unterminated data for prepending to next input.
remBytes = (bytesRead - 1) - lastNewline
copy(remBuf, buf[lastNewline+1:])
} else {
// Current input buffer ends on a metrics boundary.
remBytes = 0
}
go getUDPData(line)
} else {
if err.(net.Error).Timeout() {
config.G.Log.System.LogDebug("CarbonUDP Read() timed out")
} else {
config.G.Log.System.LogWarn("CarbonUDP Read() error: %v", err)
}
}
}
}
}
// getUDPData scans data received from a UDP connection and dispatches it.
func getUDPData(buf string) {
// Carbon metrics are terminated by newlines. Read line-by-line, and dispatch.
scanner := bufio.NewScanner(strings.NewReader(buf))
for scanner.Scan() {
metricHandler(scanner.Text())
}
config.G.Log.Carbon.LogDebug("Returning from getUDPData")
}
// metricHandler reads, parses, and sends on a Carbon data packet.
func metricHandler(line string) {
// Examine metric to ensure that it's a valid carbon metric triplet.
splitMetric := strings.Fields(line)
if len(splitMetric) != 3 {
// Log this as a Warn, because it's the client's error, not ours.
config.G.Log.Carbon.LogWarn("Malformed metric, expected 3 fields, found %d: \"%s\"", len(splitMetric), line)
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Pull out the first field from the triplet.
statPath := splitMetric[0]
// Pull out and validate the second field from the triplet.
val, err := strconv.ParseFloat(splitMetric[1], 64)
if err != nil {
config.G.Log.Carbon.LogWarn("Malformed metric, cannnot parse value as float: \"%s\"", splitMetric[1])
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Pull out and validate the third field from the triplet.
ts, err := strconv.ParseFloat(splitMetric[2], 64)
if err != nil {
config.G.Log.Carbon.LogWarn("Malformed metric, cannnot parse timestamp as float: \"%s\"", splitMetric[2])
logging.Statsd.Client.Inc("cassabon.carbon.received.failure", 1, 1.0)
return
}
// Assemble into canonical struct and send to enqueueing worker.
parsedMetric := CarbonMetric{statPath, val, ts}
config.G.Log.Carbon.LogDebug("Woohoo! Pushing metric into channel: %v", parsedMetric)
logging.Statsd.Client.Inc("cassabon.carbon.received.success", 1, 1.0)
}
|
package docker
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestBadRequest(t *testing.T) {
var tests = []struct {
err error
want string
}{
{
fmt.Errorf(`BadRequest`),
`BadRequest
`,
},
}
for _, test := range tests {
writer := httptest.NewRecorder()
badRequest(writer, test.err)
if result := writer.Result().StatusCode; result != http.StatusBadRequest {
t.Errorf(`badRequest(%v) = %v, want %v`, test.err, result, http.StatusBadRequest)
}
if result, _ := readBody(writer.Result().Body); string(result) != string(test.want) {
t.Errorf(`badRequest(%v) = %v, want %v`, test.err, string(result), string(test.want))
}
}
}
Adding test for forbidden
package docker
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestBadRequest(t *testing.T) {
var tests = []struct {
err error
want string
}{
{
fmt.Errorf(`BadRequest`),
`BadRequest
`,
},
}
for _, test := range tests {
writer := httptest.NewRecorder()
badRequest(writer, test.err)
if result := writer.Result().StatusCode; result != http.StatusBadRequest {
t.Errorf(`badRequest(%v) = %v, want %v`, test.err, result, http.StatusBadRequest)
}
if result, _ := readBody(writer.Result().Body); string(result) != string(test.want) {
t.Errorf(`badRequest(%v) = %v, want %v`, test.err, string(result), string(test.want))
}
}
}
func TestForbidden(t *testing.T) {
var tests = []struct {
}{
{},
}
for range tests {
writer := httptest.NewRecorder()
forbidden(writer)
if result := writer.Result().StatusCode; result != http.StatusForbidden {
t.Errorf(`forbidden() = %v, want %v`, result, http.StatusForbidden)
}
}
}
|
// Copyright (c) 2013 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package base
import (
"github.com/couchbaselabs/go.assert"
"testing"
)
func TestFixJSONNumbers(t *testing.T) {
assert.DeepEquals(t, FixJSONNumbers(1), 1)
assert.DeepEquals(t, FixJSONNumbers(float64(1.23)), float64(1.23))
assert.DeepEquals(t, FixJSONNumbers(float64(123456)), int64(123456))
assert.DeepEquals(t, FixJSONNumbers(float64(123456789)), int64(123456789))
assert.DeepEquals(t, FixJSONNumbers(float64(12345678901234567890)),
float64(12345678901234567890))
assert.DeepEquals(t, FixJSONNumbers("foo"), "foo")
assert.DeepEquals(t, FixJSONNumbers([]interface{}{1, float64(123456)}),
[]interface{}{1, int64(123456)})
assert.DeepEquals(t, FixJSONNumbers(map[string]interface{}{"foo": float64(123456)}),
map[string]interface{}{"foo": int64(123456)})
}
func TestBackQuotedStrings(t *testing.T) {
input := `{"foo": "bar"}`
output := ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), input)
input = "{\"foo\": `bar`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar"}`)
input = "{\"foo\": `bar\nbaz\nboo`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\nbaz\nboo"}`)
input = "{\"foo\": `bar\n\"baz\n\tboo`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\n\"baz\n\tboo"}`)
input = "{\"foo\": `bar\n`, \"baz\": `howdy`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\n", "baz": "howdy"}`)
}
Added unit test for removal of \r from back quoted strings, this is required for the sync function in SG configs otherwise they will fail to parse.
// Copyright (c) 2013 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package base
import (
"github.com/couchbaselabs/go.assert"
"testing"
)
func TestFixJSONNumbers(t *testing.T) {
assert.DeepEquals(t, FixJSONNumbers(1), 1)
assert.DeepEquals(t, FixJSONNumbers(float64(1.23)), float64(1.23))
assert.DeepEquals(t, FixJSONNumbers(float64(123456)), int64(123456))
assert.DeepEquals(t, FixJSONNumbers(float64(123456789)), int64(123456789))
assert.DeepEquals(t, FixJSONNumbers(float64(12345678901234567890)),
float64(12345678901234567890))
assert.DeepEquals(t, FixJSONNumbers("foo"), "foo")
assert.DeepEquals(t, FixJSONNumbers([]interface{}{1, float64(123456)}),
[]interface{}{1, int64(123456)})
assert.DeepEquals(t, FixJSONNumbers(map[string]interface{}{"foo": float64(123456)}),
map[string]interface{}{"foo": int64(123456)})
}
func TestBackQuotedStrings(t *testing.T) {
input := `{"foo": "bar"}`
output := ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), input)
input = "{\"foo\": `bar`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar"}`)
input = "{\"foo\": `bar\nbaz\nboo`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\nbaz\nboo"}`)
input = "{\"foo\": `bar\n\"baz\n\tboo`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\n\"baz\n\tboo"}`)
input = "{\"foo\": `bar\n`, \"baz\": `howdy`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\n", "baz": "howdy"}`)
input = "{\"foo\": `bar\r\n`, \"baz\": `\r\nhowdy`}"
output = ConvertBackQuotedStrings([]byte(input))
assert.Equals(t, string(output), `{"foo": "bar\n", "baz": "\nhowdy"}`)
}
|
package jsonhl
import (
"bytes"
"github.com/dawi/jsont"
"io"
"strings"
)
var resetColor = "\x1b[0m"
type Colors map[jsont.TokenType]string
var DefaultColors = Colors{
jsont.ObjectStart: "\x1b[38;5;242m",
jsont.ObjectEnd: "\x1b[38;5;242m",
jsont.ArrayStart: "\x1b[38;5;242m",
jsont.ArrayEnd: "\x1b[38;5;242m",
jsont.Colon: "\x1b[38;5;242m",
jsont.Comma: "\x1b[38;5;242m",
jsont.FieldName: "\x1b[38;5;33m",
jsont.True: "\x1b[38;5;22m",
jsont.False: "\x1b[38;5;124m",
jsont.Null: "\x1b[38;5;124m",
jsont.Integer: "\x1b[38;5;117m",
jsont.Float: "\x1b[38;5;117m",
jsont.String: "\x1b[38;5;45m",
jsont.Unknown: "\x1b[38;5;1m",
}
func HighlightString(jsonString string) (string, error) {
b := &bytes.Buffer{}
err := Highlight(strings.NewReader(jsonString), b)
return string(b.Bytes()), err
}
func HighlightBytes(jsonString []byte) ([]byte, error) {
b := &bytes.Buffer{}
err := Highlight(strings.NewReader(string(jsonString)), b)
return b.Bytes(), err
}
func Highlight(reader io.Reader, writer io.Writer) error {
tokenizer := jsont.NewTokenizer(reader)
for tokenizer.Next() {
token := tokenizer.Token()
color := DefaultColors[token.Type]
if _, err := writer.Write([]byte(color + token.Value + resetColor)); err != nil {
return err
}
}
return tokenizer.Error()
}
Cleaner HighlightString and HighlightBytes functions
package jsonhl
import (
"bytes"
"github.com/dawi/jsont"
"io"
"strings"
)
var resetColor = "\x1b[0m"
type Colors map[jsont.TokenType]string
var DefaultColors = Colors{
jsont.ObjectStart: "\x1b[38;5;242m",
jsont.ObjectEnd: "\x1b[38;5;242m",
jsont.ArrayStart: "\x1b[38;5;242m",
jsont.ArrayEnd: "\x1b[38;5;242m",
jsont.Colon: "\x1b[38;5;242m",
jsont.Comma: "\x1b[38;5;242m",
jsont.FieldName: "\x1b[38;5;33m",
jsont.True: "\x1b[38;5;22m",
jsont.False: "\x1b[38;5;124m",
jsont.Null: "\x1b[38;5;124m",
jsont.Integer: "\x1b[38;5;117m",
jsont.Float: "\x1b[38;5;117m",
jsont.String: "\x1b[38;5;45m",
jsont.Unknown: "\x1b[38;5;1m",
}
func HighlightString(jsonString string) (string, error) {
reader := strings.NewReader(jsonString)
writer := &bytes.Buffer{}
err := Highlight(reader, writer)
return writer.String(), err
}
func HighlightBytes(jsonString []byte) ([]byte, error) {
reader := bytes.NewBuffer(jsonString)
writer := &bytes.Buffer{}
err := Highlight(reader, writer)
return writer.Bytes(), err
}
func Highlight(reader io.Reader, writer io.Writer) error {
tokenizer := jsont.NewTokenizer(reader)
for tokenizer.Next() {
token := tokenizer.Token()
color := DefaultColors[token.Type]
if _, err := writer.Write([]byte(color + token.Value + resetColor)); err != nil {
return err
}
}
return tokenizer.Error()
}
|
/*
Copyright (c) 2017 Ross Oreto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package tree
import (
"fmt"
)
// Btree represents an AVL tree
type Btree struct {
root *Node
values []interface{}
len int
compare Comp
}
// CompareTo interface to define the compare method used to insert and find values
type CompareTo interface {
Comp(val interface{}) int8
}
// Comp compare function expressed as a type
type Comp func(v1, v2 interface{}) int8
// Node represents a node in the tree with a value, left and right children, and a height/balance of the node.
type Node struct {
Value interface{}
left, right *Node
height int8
}
// New returns a new btree which expects types that implement the CompareTo or Stringer Interfaces
func New() *Btree { return new(Btree).Init() }
// NewInt returns a new btree which expects int types
func NewInt() *Btree { return new(Btree).InitWithCompare(intComp) }
// NewString returns a new btree which expects string types
func NewString() *Btree { return new(Btree).InitWithCompare(stringComp) }
// NewUint returns a new btree which expects uint types
func NewUint() *Btree { return new(Btree).InitWithCompare(uintComp) }
// NewFloat32 returns a new btree which expects float32 types
func NewFloat32() *Btree { return new(Btree).InitWithCompare(float32Comp) }
// NewFloat64 returns a new btree which expects float32 types
func NewFloat64() *Btree { return new(Btree).InitWithCompare(float64Comp) }
// NewUintptr returns a new btree which expects uintptr types
func NewUintptr() *Btree { return new(Btree).InitWithCompare(uintptrComp) }
// NewRune returns a new btree which expects rune types
func NewRune() *Btree { return new(Btree).InitWithCompare(runeComp) }
// NewByte returns a new btree which expects byte types
func NewByte() *Btree { return new(Btree).InitWithCompare(byteComp) }
// NewComplex64 returns a new btree which expects complex64 types
func NewComplex64() *Btree { return new(Btree).InitWithCompare(complex64Comp) }
// NewComplex128 returns a new btree which expects complex128 types
func NewComplex128() *Btree { return new(Btree).InitWithCompare(complex128Comp) }
// NewStringPtr returns a new btree which expects *string types
func NewStringPtr() *Btree { return new(Btree).InitWithCompare(stringPtrComp) }
// NewUintPtr returns a new btree which expects *uint types
func NewUintPtr() *Btree { return new(Btree).InitWithCompare(uintPtrComp) }
// NewIntPtr returns a new btree which expects *int types
func NewIntPtr() *Btree { return new(Btree).InitWithCompare(intPtrComp) }
// NewBytePtr returns a new btree which expects *byte types
func NewBytePtr() *Btree { return new(Btree).InitWithCompare(bytePtrComp) }
// NewRunePtr returns a new btree which expects *rune types
func NewRunePtr() *Btree { return new(Btree).InitWithCompare(runePtrComp) }
// NewFloat32Ptr returns a new btree which expects *flost32 types
func NewFloat32Ptr() *Btree { return new(Btree).InitWithCompare(float32PtrComp) }
// NewFloat64Ptr returns a new btree which expects *flost64 types
func NewFloat64Ptr() *Btree { return new(Btree).InitWithCompare(float64PtrComp) }
// NewComplex32Ptr returns a new btree which expects *complex32 types
func NewComplex32Ptr() *Btree { return new(Btree).InitWithCompare(complex32PtrComp) }
// NewComplex64Ptr returns a new btree which expects *complex64 types
func NewComplex64Ptr() *Btree { return new(Btree).InitWithCompare(complex64PtrComp) }
// Init initializes all values/clears the tree using the default compare method and returns the tree pointer
func (t *Btree) Init() *Btree {
t.root = nil
t.values = nil
t.len = 0
t.compare = comp
return t
}
// InitWithCompare initializes all values/clears the tree using the specified compare method and returns the tree pointer
func (t *Btree) InitWithCompare(compare Comp) *Btree {
t.Init()
t.compare = compare
return t
}
// String returns a string representation of the tree values
func (t *Btree) String() string {
return fmt.Sprint(t.Values())
}
// Empty returns true if the tree is empty
func (t *Btree) Empty() bool {
return t.root == nil
}
// NotEmpty returns true if the tree is not empty
func (t *Btree) NotEmpty() bool {
return t.root != nil
}
func (t *Btree) balance() int8 {
if t.root != nil {
return balance(t.root)
}
return 0
}
// Insert inserts a new value into the tree and returns the tree pointer
func (t *Btree) Insert(value interface{}) *Btree {
added := false
t.root = insert(t.root, value, &added, t.compare)
if added {
t.len++
}
t.values = nil
return t
}
func insert(n *Node, value interface{}, added *bool, compare Comp) *Node {
if n == nil {
*added = true
return (&Node{Value: value}).Init()
}
c := compare(value, n.Value)
if c > 0 {
n.right = insert(n.right, value, added, compare)
} else if c < 0 {
n.left = insert(n.left, value, added, compare)
} else {
n.Value = value
*added = false
return n
}
n.height = n.maxHeight() + 1
c = balance(n)
if c > 1 {
c = compare(value, n.left.Value)
if c < 0 {
return n.rotateRight()
} else if c > 0 {
n.left = n.left.rotateLeft()
return n.rotateRight()
}
} else if c < -1 {
c = compare(value, n.right.Value)
if c > 0 {
return n.rotateLeft()
} else if c < 0 {
n.right = n.right.rotateRight()
return n.rotateLeft()
}
}
return n
}
// InsertAll inserts all the values into the tree and returns the tree pointer
func (t *Btree) InsertAll(values []interface{}) *Btree {
for _, v := range values {
t.Insert(v)
}
return t
}
// Contains returns true if the tree contains the specified value
func (t *Btree) Contains(value interface{}) bool {
return t.Get(value) != nil
}
// ContainsAny returns true if the tree contains any of the values
func (t *Btree) ContainsAny(values []interface{}) bool {
for _, v := range values {
if t.Contains(v) {
return true
}
}
return false
}
// ContainsAll returns true if the tree contains all of the values
func (t *Btree) ContainsAll(values []interface{}) bool {
for _, v := range values {
if !t.Contains(v) {
return false
}
}
return true
}
// Get returns the node value associated with the search value
func (t *Btree) Get(value interface{}) interface{} {
var node *Node
if t.root != nil {
node = t.root.get(value, t.compare)
}
if node != nil {
return node.Value
}
return nil
}
// Len return the number of nodes in the tree
func (t *Btree) Len() int {
return t.len
}
// Head returns the first value in the tree
func (t *Btree) Head() interface{} {
if t.root == nil {
return nil
}
var beginning = t.root
for beginning.left != nil {
beginning = beginning.left
}
if beginning == nil {
for beginning.right != nil {
beginning = beginning.right
}
}
if beginning != nil {
return beginning.Value
}
return nil
}
// Tail returns the last value in the tree
func (t *Btree) Tail() interface{} {
if t.root == nil {
return nil
}
var beginning = t.root
for beginning.right != nil {
beginning = beginning.right
}
if beginning == nil {
for beginning.left != nil {
beginning = beginning.left
}
}
if beginning != nil {
return beginning.Value
}
return nil
}
// Values returns a slice of all the values in tree in order
func (t *Btree) Values() []interface{} {
if t.values == nil {
t.values = make([]interface{}, t.len)
t.Ascend(func(n *Node, i int) bool {
t.values[i] = n.Value
return true
})
}
return t.values
}
// Delete deletes the node from the tree associated with the search value
func (t *Btree) Delete(value interface{}) *Btree {
deleted := false
t.root = deleteNode(t.root, value, &deleted, t.compare)
if deleted {
t.len--
}
t.values = nil
return t
}
// DeleteAll deletes the nodes from the tree associated with the search values
func (t *Btree) DeleteAll(values []interface{}) *Btree {
for _, v := range values {
t.Delete(v)
}
return t
}
func deleteNode(n *Node, value interface{}, deleted *bool, compare Comp) *Node {
if n == nil {
return n
}
c := compare(value, n.Value)
if c < 0 {
n.left = deleteNode(n.left, value, deleted, compare)
} else if c > 0 {
n.right = deleteNode(n.right, value, deleted, compare)
} else {
if n.left == nil {
t := n.right
n.Init()
return t
} else if n.right == nil {
t := n.left
n.Init()
return t
}
t := n.right.min()
n.Value = t.Value
n.right = deleteNode(n.right, t.Value, deleted, compare)
*deleted = true
}
//re-balance
if n == nil {
return n
}
n.height = n.maxHeight() + 1
bal := balance(n)
if bal > 1 {
if balance(n.left) >= 0 {
return n.rotateRight()
}
n.left = n.left.rotateLeft()
return n.rotateRight()
} else if bal < -1 {
if balance(n.right) <= 0 {
return n.rotateLeft()
}
n.right = n.right.rotateRight()
return n.rotateLeft()
}
return n
}
// Pop deletes the last node from the tree and returns its value
func (t *Btree) Pop() interface{} {
value := t.Tail()
if value != nil {
t.Delete(value)
}
return value
}
// Pull deletes the first node from the tree and returns its value
func (t *Btree) Pull() interface{} {
value := t.Head()
if value != nil {
t.Delete(value)
}
return value
}
// NodeIterator expresses the iterator function used for traversals
type NodeIterator func(n *Node, i int) bool
// Ascend performs an ascending order traversal of the tree calling the iterator function on each node
// the iterator will continue as long as the NodeIterator returns true
func (t *Btree) Ascend(iterator NodeIterator) {
var i int
if t.root != nil {
t.root.iterate(iterator, &i, true)
}
}
// Descend performs a descending order traversal of the tree using the iterator
// the iterator will continue as long as the NodeIterator returns true
func (t *Btree) Descend(iterator NodeIterator) {
var i int
if t.root != nil {
t.root.rIterate(iterator, &i, true)
}
}
// Debug prints out useful debug information about the tree for debugging purposes
func (t *Btree) Debug() {
fmt.Println("----------------------------------------------------------------------------------------------")
if t.Empty() {
fmt.Println("tree is empty")
} else {
fmt.Println(t.Len(), "elements")
}
t.Ascend(func(n *Node, i int) bool {
if t.root.Value == n.Value {
fmt.Print("ROOT ** ")
}
n.Debug()
return true
})
fmt.Println("----------------------------------------------------------------------------------------------")
}
// Init initializes the values of the node or clears the node and returns the node pointer
func (n *Node) Init() *Node {
n.height = 1
n.left = nil
n.right = nil
return n
}
// String returns a string representing the node
func (n *Node) String() string {
return fmt.Sprint(n.Value)
}
// Debug prints out useful debug information about the tree node for debugging purposes
func (n *Node) Debug() {
var children string
if n.left == nil && n.right == nil {
children = "no children |"
} else if n.left != nil && n.right != nil {
children = fmt.Sprint("left child:", n.left.String(), " right child:", n.right.String())
} else if n.right != nil {
children = fmt.Sprint("right child:", n.right.String())
} else {
children = fmt.Sprint("left child:", n.left.String())
}
fmt.Println(n.String(), "|", "height", n.height, "|", "balance", balance(n), "|", children)
}
func height(n *Node) int8 {
if n != nil {
return n.height
}
return 0
}
func balance(n *Node) int8 {
if n == nil {
return 0
}
return height(n.left) - height(n.right)
}
func (n *Node) get(val interface{}, compare Comp) *Node {
var node *Node
c := compare(val, n.Value)
if c < 0 {
if n.left != nil {
node = n.left.get(val, compare)
}
} else if c > 0 {
if n.right != nil {
node = n.right.get(val, compare)
}
} else {
node = n
}
return node
}
func (n *Node) rotateRight() *Node {
l := n.left
// Rotation
l.right, n.left = n, l.right
// update heights
n.height = n.maxHeight() + 1
l.height = l.maxHeight() + 1
return l
}
func (n *Node) rotateLeft() *Node {
r := n.right
// Rotation
r.left, n.right = n, r.left
// update heights
n.height = n.maxHeight() + 1
r.height = r.maxHeight() + 1
return r
}
func (n *Node) iterate(iterator NodeIterator, i *int, cont bool) {
if n != nil && cont {
n.left.iterate(iterator, i, cont)
cont = iterator(n, *i)
*i++
n.right.iterate(iterator, i, cont)
}
}
func (n *Node) rIterate(iterator NodeIterator, i *int, cont bool) {
if n != nil && cont {
n.right.iterate(iterator, i, cont)
cont = iterator(n, *i)
*i++
n.left.iterate(iterator, i, cont)
}
}
func (n *Node) min() *Node {
current := n
for current.left != nil {
current = current.left
}
return current
}
func (n *Node) maxHeight() int8 {
rh := height(n.right)
lh := height(n.left)
if rh > lh {
return rh
}
return lh
}
func intComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(int), v2.(int)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func stringComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(string), v2.(string)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func uintComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(uint), v2.(uint)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func float32Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float32), v2.(float32)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func float64Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func uintptrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(uintptr), v2.(uintptr)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func byteComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(byte), v2.(byte)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func runeComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(rune), v2.(rune)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func complex64Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func complex128Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func stringPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*string), v2.(*string)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func intPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*int), v2.(*int)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func uintPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*uint), v2.(*uint)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func bytePtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*byte), v2.(*byte)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func runePtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*rune), v2.(*rune)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func float32PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float32), v2.(*float32)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func float64PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func complex32PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func complex64PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func comp(v1, v2 interface{}) int8 {
var c int8
switch v1.(type) {
case CompareTo:
c = v1.(CompareTo).Comp(v2)
case fmt.Stringer:
s1, s2 := v1.(fmt.Stringer).String(), v2.(fmt.Stringer).String()
if s1 > s2 {
c = 1
} else if s1 < s2 {
c = -1
} else {
c = 0
}
}
return c
}
format change
/*
Copyright (c) 2017 Ross Oreto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package tree
import (
"fmt"
)
// Btree represents an AVL tree
type Btree struct {
root *Node
values []interface{}
len int
compare Comp
}
// CompareTo interface to define the compare method used to insert and find values
type CompareTo interface {
Comp(val interface{}) int8
}
// Comp compare function expressed as a type
type Comp func(v1, v2 interface{}) int8
// Node represents a node in the tree with a value, left and right children, and a height/balance of the node.
type Node struct {
Value interface{}
left, right *Node
height int8
}
// New returns a new btree which expects types that implement the CompareTo or Stringer Interfaces
func New() *Btree { return new(Btree).Init() }
// NewInt returns a new btree which expects int types
func NewInt() *Btree { return new(Btree).InitWithCompare(intComp) }
// NewString returns a new btree which expects string types
func NewString() *Btree { return new(Btree).InitWithCompare(stringComp) }
// NewUint returns a new btree which expects uint types
func NewUint() *Btree { return new(Btree).InitWithCompare(uintComp) }
// NewFloat32 returns a new btree which expects float32 types
func NewFloat32() *Btree { return new(Btree).InitWithCompare(float32Comp) }
// NewFloat64 returns a new btree which expects float32 types
func NewFloat64() *Btree { return new(Btree).InitWithCompare(float64Comp) }
// NewUintptr returns a new btree which expects uintptr types
func NewUintptr() *Btree { return new(Btree).InitWithCompare(uintptrComp) }
// NewRune returns a new btree which expects rune types
func NewRune() *Btree { return new(Btree).InitWithCompare(runeComp) }
// NewByte returns a new btree which expects byte types
func NewByte() *Btree { return new(Btree).InitWithCompare(byteComp) }
// NewComplex64 returns a new btree which expects complex64 types
func NewComplex64() *Btree { return new(Btree).InitWithCompare(complex64Comp) }
// NewComplex128 returns a new btree which expects complex128 types
func NewComplex128() *Btree { return new(Btree).InitWithCompare(complex128Comp) }
// NewStringPtr returns a new btree which expects *string types
func NewStringPtr() *Btree { return new(Btree).InitWithCompare(stringPtrComp) }
// NewUintPtr returns a new btree which expects *uint types
func NewUintPtr() *Btree { return new(Btree).InitWithCompare(uintPtrComp) }
// NewIntPtr returns a new btree which expects *int types
func NewIntPtr() *Btree { return new(Btree).InitWithCompare(intPtrComp) }
// NewBytePtr returns a new btree which expects *byte types
func NewBytePtr() *Btree { return new(Btree).InitWithCompare(bytePtrComp) }
// NewRunePtr returns a new btree which expects *rune types
func NewRunePtr() *Btree { return new(Btree).InitWithCompare(runePtrComp) }
// NewFloat32Ptr returns a new btree which expects *flost32 types
func NewFloat32Ptr() *Btree { return new(Btree).InitWithCompare(float32PtrComp) }
// NewFloat64Ptr returns a new btree which expects *flost64 types
func NewFloat64Ptr() *Btree { return new(Btree).InitWithCompare(float64PtrComp) }
// NewComplex32Ptr returns a new btree which expects *complex32 types
func NewComplex32Ptr() *Btree { return new(Btree).InitWithCompare(complex32PtrComp) }
// NewComplex64Ptr returns a new btree which expects *complex64 types
func NewComplex64Ptr() *Btree { return new(Btree).InitWithCompare(complex64PtrComp) }
// Init initializes all values/clears the tree using the default compare method and returns the tree pointer
func (t *Btree) Init() *Btree {
t.root = nil
t.values = nil
t.len = 0
t.compare = comp
return t
}
// InitWithCompare initializes all values/clears the tree using the specified compare method and returns the tree pointer
func (t *Btree) InitWithCompare(compare Comp) *Btree {
t.Init()
t.compare = compare
return t
}
// String returns a string representation of the tree values
func (t *Btree) String() string {
return fmt.Sprint(t.Values())
}
// Empty returns true if the tree is empty
func (t *Btree) Empty() bool {
return t.root == nil
}
// NotEmpty returns true if the tree is not empty
func (t *Btree) NotEmpty() bool {
return t.root != nil
}
func (t *Btree) balance() int8 {
if t.root != nil {
return balance(t.root)
}
return 0
}
// Insert inserts a new value into the tree and returns the tree pointer
func (t *Btree) Insert(value interface{}) *Btree {
added := false
t.root = insert(t.root, value, &added, t.compare)
if added {
t.len++
}
t.values = nil
return t
}
func insert(n *Node, value interface{}, added *bool, compare Comp) *Node {
if n == nil {
*added = true
return (&Node{Value: value}).Init()
}
c := compare(value, n.Value)
if c > 0 {
n.right = insert(n.right, value, added, compare)
} else if c < 0 {
n.left = insert(n.left, value, added, compare)
} else {
n.Value = value
*added = false
return n
}
n.height = n.maxHeight() + 1
c = balance(n)
if c > 1 {
c = compare(value, n.left.Value)
if c < 0 {
return n.rotateRight()
} else if c > 0 {
n.left = n.left.rotateLeft()
return n.rotateRight()
}
} else if c < -1 {
c = compare(value, n.right.Value)
if c > 0 {
return n.rotateLeft()
} else if c < 0 {
n.right = n.right.rotateRight()
return n.rotateLeft()
}
}
return n
}
// InsertAll inserts all the values into the tree and returns the tree pointer
func (t *Btree) InsertAll(values []interface{}) *Btree {
for _, v := range values {
t.Insert(v)
}
return t
}
// Contains returns true if the tree contains the specified value
func (t *Btree) Contains(value interface{}) bool {
return t.Get(value) != nil
}
// ContainsAny returns true if the tree contains any of the values
func (t *Btree) ContainsAny(values []interface{}) bool {
for _, v := range values {
if t.Contains(v) {
return true
}
}
return false
}
// ContainsAll returns true if the tree contains all of the values
func (t *Btree) ContainsAll(values []interface{}) bool {
for _, v := range values {
if !t.Contains(v) {
return false
}
}
return true
}
// Get returns the node value associated with the search value
func (t *Btree) Get(value interface{}) interface{} {
var node *Node
if t.root != nil {
node = t.root.get(value, t.compare)
}
if node != nil {
return node.Value
}
return nil
}
// Len return the number of nodes in the tree
func (t *Btree) Len() int {
return t.len
}
// Head returns the first value in the tree
func (t *Btree) Head() interface{} {
if t.root == nil {
return nil
}
var beginning = t.root
for beginning.left != nil {
beginning = beginning.left
}
if beginning == nil {
for beginning.right != nil {
beginning = beginning.right
}
}
if beginning != nil {
return beginning.Value
}
return nil
}
// Tail returns the last value in the tree
func (t *Btree) Tail() interface{} {
if t.root == nil {
return nil
}
var beginning = t.root
for beginning.right != nil {
beginning = beginning.right
}
if beginning == nil {
for beginning.left != nil {
beginning = beginning.left
}
}
if beginning != nil {
return beginning.Value
}
return nil
}
// Values returns a slice of all the values in tree in order
func (t *Btree) Values() []interface{} {
if t.values == nil {
t.values = make([]interface{}, t.len)
t.Ascend(func(n *Node, i int) bool {
t.values[i] = n.Value
return true
})
}
return t.values
}
// Delete deletes the node from the tree associated with the search value
func (t *Btree) Delete(value interface{}) *Btree {
deleted := false
t.root = deleteNode(t.root, value, &deleted, t.compare)
if deleted {
t.len--
}
t.values = nil
return t
}
// DeleteAll deletes the nodes from the tree associated with the search values
func (t *Btree) DeleteAll(values []interface{}) *Btree {
for _, v := range values {
t.Delete(v)
}
return t
}
func deleteNode(n *Node, value interface{}, deleted *bool, compare Comp) *Node {
if n == nil {
return n
}
c := compare(value, n.Value)
if c < 0 {
n.left = deleteNode(n.left, value, deleted, compare)
} else if c > 0 {
n.right = deleteNode(n.right, value, deleted, compare)
} else {
if n.left == nil {
t := n.right
n.Init()
return t
} else if n.right == nil {
t := n.left
n.Init()
return t
}
t := n.right.min()
n.Value = t.Value
n.right = deleteNode(n.right, t.Value, deleted, compare)
*deleted = true
}
//re-balance
if n == nil {
return n
}
n.height = n.maxHeight() + 1
bal := balance(n)
if bal > 1 {
if balance(n.left) >= 0 {
return n.rotateRight()
}
n.left = n.left.rotateLeft()
return n.rotateRight()
} else if bal < -1 {
if balance(n.right) <= 0 {
return n.rotateLeft()
}
n.right = n.right.rotateRight()
return n.rotateLeft()
}
return n
}
// Pop deletes the last node from the tree and returns its value
func (t *Btree) Pop() interface{} {
value := t.Tail()
if value != nil {
t.Delete(value)
}
return value
}
// Pull deletes the first node from the tree and returns its value
func (t *Btree) Pull() interface{} {
value := t.Head()
if value != nil {
t.Delete(value)
}
return value
}
// NodeIterator expresses the iterator function used for traversals
type NodeIterator func(n *Node, i int) bool
// Ascend performs an ascending order traversal of the tree calling the iterator function on each node
// the iterator will continue as long as the NodeIterator returns true
func (t *Btree) Ascend(iterator NodeIterator) {
var i int
if t.root != nil {
t.root.iterate(iterator, &i, true)
}
}
// Descend performs a descending order traversal of the tree using the iterator
// the iterator will continue as long as the NodeIterator returns true
func (t *Btree) Descend(iterator NodeIterator) {
var i int
if t.root != nil {
t.root.rIterate(iterator, &i, true)
}
}
// Debug prints out useful debug information about the tree for debugging purposes
func (t *Btree) Debug() {
fmt.Println("----------------------------------------------------------------------------------------------")
if t.Empty() {
fmt.Println("tree is empty")
} else {
fmt.Println(t.Len(), "elements")
}
t.Ascend(func(n *Node, i int) bool {
if t.root.Value == n.Value {
fmt.Print("ROOT ** ")
}
n.Debug()
return true
})
fmt.Println("----------------------------------------------------------------------------------------------")
}
// Init initializes the values of the node or clears the node and returns the node pointer
func (n *Node) Init() *Node {
n.height = 1
n.left = nil
n.right = nil
return n
}
// String returns a string representing the node
func (n *Node) String() string {
return fmt.Sprint(n.Value)
}
// Debug prints out useful debug information about the tree node for debugging purposes
func (n *Node) Debug() {
var children string
if n.left == nil && n.right == nil {
children = "no children |"
} else if n.left != nil && n.right != nil {
children = fmt.Sprint("left child:", n.left.String(), " right child:", n.right.String())
} else if n.right != nil {
children = fmt.Sprint("right child:", n.right.String())
} else {
children = fmt.Sprint("left child:", n.left.String())
}
fmt.Println(n.String(), "|", "height", n.height, "|", "balance", balance(n), "|", children)
}
func height(n *Node) int8 {
if n != nil {
return n.height
}
return 0
}
func balance(n *Node) int8 {
if n == nil {
return 0
}
return height(n.left) - height(n.right)
}
func (n *Node) get(val interface{}, compare Comp) *Node {
var node *Node
c := compare(val, n.Value)
if c < 0 {
if n.left != nil {
node = n.left.get(val, compare)
}
} else if c > 0 {
if n.right != nil {
node = n.right.get(val, compare)
}
} else {
node = n
}
return node
}
func (n *Node) rotateRight() *Node {
l := n.left
// Rotation
l.right, n.left = n, l.right
// update heights
n.height = n.maxHeight() + 1
l.height = l.maxHeight() + 1
return l
}
func (n *Node) rotateLeft() *Node {
r := n.right
// Rotation
r.left, n.right = n, r.left
// update heights
n.height = n.maxHeight() + 1
r.height = r.maxHeight() + 1
return r
}
func (n *Node) iterate(iterator NodeIterator, i *int, cont bool) {
if n != nil && cont {
n.left.iterate(iterator, i, cont)
cont = iterator(n, *i)
*i++
n.right.iterate(iterator, i, cont)
}
}
func (n *Node) rIterate(iterator NodeIterator, i *int, cont bool) {
if n != nil && cont {
n.right.iterate(iterator, i, cont)
cont = iterator(n, *i)
*i++
n.left.iterate(iterator, i, cont)
}
}
func (n *Node) min() *Node {
current := n
for current.left != nil {
current = current.left
}
return current
}
func (n *Node) maxHeight() int8 {
rh := height(n.right)
lh := height(n.left)
if rh > lh {
return rh
}
return lh
}
func intComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(int), v2.(int)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func stringComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(string), v2.(string)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func uintComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(uint), v2.(uint)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func float32Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float32), v2.(float32)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func float64Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func uintptrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(uintptr), v2.(uintptr)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func byteComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(byte), v2.(byte)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func runeComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(rune), v2.(rune)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func complex64Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func complex128Comp(v1, v2 interface{}) int8 {
t1, t2 := v1.(float64), v2.(float64)
if t1 > t2 {
return 1
} else if t1 < t2 {
return -1
} else {
return 0
}
}
func stringPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*string), v2.(*string)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func intPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*int), v2.(*int)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func uintPtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*uint), v2.(*uint)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func bytePtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*byte), v2.(*byte)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func runePtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*rune), v2.(*rune)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func float32PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float32), v2.(*float32)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func float64PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func complex32PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func complex64PtrComp(v1, v2 interface{}) int8 {
t1, t2 := v1.(*float64), v2.(*float64)
if *t1 > *t2 {
return 1
} else if *t1 < *t2 {
return -1
} else {
return 0
}
}
func comp(v1, v2 interface{}) int8 {
var c int8
switch v1.(type) {
case CompareTo:
c = v1.(CompareTo).Comp(v2)
case fmt.Stringer:
s1, s2 := v1.(fmt.Stringer).String(), v2.(fmt.Stringer).String()
if s1 > s2 {
c = 1
} else if s1 < s2 {
c = -1
} else {
c = 0
}
}
return c
}
|
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"strings"
"github.com/PuerkitoBio/goquery"
"golang.org/x/net/html"
)
// HTMLElement is the representation of a HTML tag.
type HTMLElement struct {
// Name is the name of the tag
Name string
Text string
attributes []html.Attribute
// Request is the request object of the element's HTML document
Request *Request
// Response is the Response object of the element's HTML document
Response *Response
// DOM is the goquery parsed DOM object of the page. DOM is relative
// to the current HTMLElement
DOM *goquery.Selection
// Index stores the position of the current element within all the elements matched by an OnHTML callback
Index int
}
// NewHTMLElementFromSelectionNode creates a HTMLElement from a goquery.Selection Node.
func NewHTMLElementFromSelectionNode(resp *Response, s *goquery.Selection, n *html.Node, idx int) *HTMLElement {
return &HTMLElement{
Name: n.Data,
Request: resp.Request,
Response: resp,
Text: goquery.NewDocumentFromNode(n).Text(),
DOM: s,
Index: idx,
attributes: n.Attr,
}
}
// Attr returns the selected attribute of a HTMLElement or empty string
// if no attribute found
func (h *HTMLElement) Attr(k string) string {
for _, a := range h.attributes {
if a.Key == k {
return a.Val
}
}
return ""
}
// ChildText returns the concatenated and stripped text content of the matching
// elements.
func (h *HTMLElement) ChildText(goquerySelector string) string {
return strings.TrimSpace(h.DOM.Find(goquerySelector).Text())
}
// ChildAttr returns the stripped text content of the first matching
// element's attribute.
func (h *HTMLElement) ChildAttr(goquerySelector, attrName string) string {
if attr, ok := h.DOM.Find(goquerySelector).Attr(attrName); ok {
return strings.TrimSpace(attr)
}
return ""
}
// ChildAttrs returns the stripped text content of all the matching
// element's attributes.
func (h *HTMLElement) ChildAttrs(goquerySelector, attrName string) []string {
var res []string
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
if attr, ok := s.Attr(attrName); ok {
res = append(res, strings.TrimSpace(attr))
}
})
return res
}
// ForEach iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
func (h *HTMLElement) ForEach(goquerySelector string, callback func(int, *HTMLElement)) {
i := 0
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
for _, n := range s.Nodes {
callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i))
i++
}
})
}
// ForEachWithBreak iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
// It is identical to ForEach except that it is possible to break
// out of the loop by returning false in the callback function. It returns the
// current Selection object.
func (h *HTMLElement) ForEachWithBreak(goquerySelector string, callback func(int, *HTMLElement) bool) {
i := 0
h.DOM.Find(goquerySelector).EachWithBreak(func(_ int, s *goquery.Selection) bool {
for _, n := range s.Nodes {
if callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i)) {
i++
return true
}
}
return false
})
}
added ChildTexts method to htmlelement: returns the stripped text content of all the matching elements in a []string
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"strings"
"github.com/PuerkitoBio/goquery"
"golang.org/x/net/html"
)
// HTMLElement is the representation of a HTML tag.
type HTMLElement struct {
// Name is the name of the tag
Name string
Text string
attributes []html.Attribute
// Request is the request object of the element's HTML document
Request *Request
// Response is the Response object of the element's HTML document
Response *Response
// DOM is the goquery parsed DOM object of the page. DOM is relative
// to the current HTMLElement
DOM *goquery.Selection
// Index stores the position of the current element within all the elements matched by an OnHTML callback
Index int
}
// NewHTMLElementFromSelectionNode creates a HTMLElement from a goquery.Selection Node.
func NewHTMLElementFromSelectionNode(resp *Response, s *goquery.Selection, n *html.Node, idx int) *HTMLElement {
return &HTMLElement{
Name: n.Data,
Request: resp.Request,
Response: resp,
Text: goquery.NewDocumentFromNode(n).Text(),
DOM: s,
Index: idx,
attributes: n.Attr,
}
}
// Attr returns the selected attribute of a HTMLElement or empty string
// if no attribute found
func (h *HTMLElement) Attr(k string) string {
for _, a := range h.attributes {
if a.Key == k {
return a.Val
}
}
return ""
}
// ChildText returns the concatenated and stripped text content of the matching
// elements.
func (h *HTMLElement) ChildText(goquerySelector string) string {
return strings.TrimSpace(h.DOM.Find(goquerySelector).Text())
}
// ChildTexts returns the stripped text content of all the matching
// elements.
func (h *HTMLElement) ChildTexts(goquerySelector string) []string {
var res []string
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
res = append(res, strings.TrimSpace(s.Text()))
})
return res
}
// ChildAttr returns the stripped text content of the first matching
// element's attribute.
func (h *HTMLElement) ChildAttr(goquerySelector, attrName string) string {
if attr, ok := h.DOM.Find(goquerySelector).Attr(attrName); ok {
return strings.TrimSpace(attr)
}
return ""
}
// ChildAttrs returns the stripped text content of all the matching
// element's attributes.
func (h *HTMLElement) ChildAttrs(goquerySelector, attrName string) []string {
var res []string
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
if attr, ok := s.Attr(attrName); ok {
res = append(res, strings.TrimSpace(attr))
}
})
return res
}
// ForEach iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
func (h *HTMLElement) ForEach(goquerySelector string, callback func(int, *HTMLElement)) {
i := 0
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
for _, n := range s.Nodes {
callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i))
i++
}
})
}
// ForEachWithBreak iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
// It is identical to ForEach except that it is possible to break
// out of the loop by returning false in the callback function. It returns the
// current Selection object.
func (h *HTMLElement) ForEachWithBreak(goquerySelector string, callback func(int, *HTMLElement) bool) {
i := 0
h.DOM.Find(goquerySelector).EachWithBreak(func(_ int, s *goquery.Selection) bool {
for _, n := range s.Nodes {
if callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i)) {
i++
return true
}
}
return false
})
}
|
package http
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/astaxie/beego/orm"
"github.com/bitly/go-simplejson"
"github.com/Cepave/query/g"
"io"
"io/ioutil"
"log"
"net/http"
"strconv"
"time"
)
type Endpoint struct {
Id int
Endpoint string
Ts int64
T_create string
T_modify string
Ipv4 string
Port string
}
type Grp struct {
Id int
Grp_name string
Create_user string
Create_at string
Come_from int
}
type Tpl struct {
Id int
Tpl_name string
Parent_id int
Action_id int
Create_user string
Create_at string
}
type Grp_tpl struct {
Id int
Grp_id int
Tpl_id int
Bind_user string
}
type Grp_host struct {
Id int
Grp_id int
Host_id int
}
/**
* @function name: func getNow() string
* @description: This function gets string of current time.
* @related issues: OWL-093
* @param: void
* @return: now sting
* @author: Don Hsieh
* @since: 10/21/2015
* @last modified: 10/21/2015
* @called by: func hostCreate(nodes map[string]interface{})
* func hostgroupCreate(nodes map[string]interface{})
* func templateCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
* func setResponse(rw http.ResponseWriter, resp map[string]interface{})
*/
func getNow() string {
t := time.Now()
now := t.Format("2006-01-02 15:04:05")
return now
}
/**
* @function name: func getHostId(params map[string]interface{}) string
* @description: This function gets host ID.
* @related issues: OWL-240
* @param: params map[string]interface{}
* @return: hostId string
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 12/16/2015
* @called by: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
*/
func getHostId(params map[string]interface{}) string {
hostId := ""
if val, ok := params["hostid"]; ok {
if val != nil {
hostId = val.(string)
}
}
return hostId
}
/**
* @function name: func getHostName(params map[string]interface{}) string
* @description: This function gets host name.
* @related issues: OWL-240
* @param: params map[string]interface{}
* @return: hostName string
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 12/16/2015
* @called by: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
* func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func getHostName(params map[string]interface{}) string {
hostName := ""
if val, ok := params["host"]; ok {
if val != nil {
hostName = val.(string)
} else if val, ok = params["name"]; ok {
if val != nil {
hostName = val.(string)
}
}
}
return hostName
}
/**
* @function name: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
* @description: This function checks if a host existed.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: result map[string]interface{}
* @return: endpoint Endpoint
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 01/01/2016
* @called by: func hostCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
*/
func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint {
var endpoint Endpoint
o := orm.NewOrm()
hostId := getHostId(params)
hostName := getHostName(params)
if hostId != "" {
hostIdint, err := strconv.Atoi(hostId)
if err != nil {
setError(err.Error(), result)
} else {
endpoint := Endpoint{Id: hostIdint}
err := o.Read(&endpoint)
if err != nil {
setError(err.Error(), result)
}
}
} else {
err := o.QueryTable("endpoint").Filter("endpoint", hostName).One(&endpoint)
if err == orm.ErrMultiRows {
// Have multiple records
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
// No result
setError("host not found", result)
}
}
return endpoint
}
/**
* @function name: func setError(error string, result map[string]interface{})
* @description: This function sets error message.
* @related issues: OWL-257
* @param: error string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{})
* func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
* func hostCreate(nodes map[string]interface{})
*/
func setError(error string, result map[string]interface{}) {
log.Println("Error =", error)
result["error"] = append(result["error"].([]string), error)
}
/**
* @function name: func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function binds a host to a host group.
* @related issues: OWL-257, OWL-240
* @param: hostId int64
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/15/2015
* @last modified: 01/01/2016
* @called by: func hostUpdate(nodes map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{}) {
if _, ok := params["groups"]; ok {
o := orm.NewOrm()
o.Using("falcon_portal")
sqlcmd := "DELETE FROM falcon_portal.grp_host WHERE host_id=?"
res, err := o.Raw(sqlcmd, hostId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 {
log.Println("mysql row affected nums =", num)
}
}
groups := params["groups"].([]interface{})
groupId := ""
for _, group := range groups {
groupId = group.(map[string]interface{})["groupid"].(string)
args["groupId"] = groupId
grp_id, err := strconv.Atoi(groupId)
sqlcmd := "SELECT COUNT(*) FROM falcon_portal.grp_host WHERE host_id=? AND grp_id=?"
res, err := o.Raw(sqlcmd, hostId, grp_id).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
log.Println("num =", num)
if num > 0 {
log.Println("Record existed. count =", num)
} else { // Record not existed. Insert new one.
grp_host := Grp_host{
Grp_id: grp_id,
Host_id: int(hostId),
}
log.Println("grp_host =", grp_host)
_, err = o.Insert(&grp_host)
if err != nil {
setError(err.Error(), result)
}
}
}
}
}
}
/**
* @function name: func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function binds a host to a template.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/15/2015
* @last modified: 01/01/2016
* @called by: func hostUpdate(nodes map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{}) {
if _, ok := params["templates"]; ok {
o := orm.NewOrm()
o.Using("falcon_portal")
groupId := args["groupId"]
grp_id, _ := strconv.Atoi(groupId)
templates := params["templates"].([]interface{})
for _, template := range templates {
templateId := template.(map[string]interface{})["templateid"].(string)
tpl_id, err := strconv.Atoi(templateId)
args["templateId"] = templateId
sqlcmd := "SELECT COUNT(*) FROM falcon_portal.grp_tpl WHERE grp_id=? AND tpl_id=?"
res, err := o.Raw(sqlcmd, grp_id, tpl_id).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
log.Println("num =", num)
if num > 0 {
log.Println("Record existed. count =", num)
} else { // Record not existed. Insert new one.
grp_tpl := Grp_tpl{
Grp_id: grp_id,
Tpl_id: tpl_id,
Bind_user: "zabbix",
}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
setError(err.Error(), result)
}
}
}
}
}
}
/**
* @function name: func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function inserts a host to "endpoint" table and binds the host to its group and template.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/21/2015
* @last modified: 01/01/2016
* @called by: func hostCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
*/
func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{}) {
hostName := getHostName(params)
if len(hostName) > 0 {
args["host"] = hostName
ip := ""
port := ""
if _, ok := params["interfaces"]; ok {
interfaces := params["interfaces"].([]interface{})
for i, arg := range interfaces {
if i == 0 {
ip = arg.(map[string]interface{})["ip"].(string)
port = arg.(map[string]interface{})["port"].(string)
args["ip"] = ip
args["port"] = port
}
}
}
t := time.Now()
timestamp := t.Unix()
log.Println(timestamp)
now := getNow()
endpoint := Endpoint{
Endpoint: hostName,
Ts: timestamp,
T_create: now,
T_modify: now,
Ipv4: ip,
}
if len(port) > 0 {
endpoint.Port = port
}
log.Println("endpoint =", endpoint)
o := orm.NewOrm()
hostId, err := o.Insert(&endpoint)
if err != nil {
setError(err.Error(), result)
} else {
bindGroup(hostId, params, args, result)
hostid := strconv.Itoa(int(hostId))
hostids := [1]string{string(hostid)}
result["hostids"] = hostids
bindTemplate(params, args, result)
}
} else {
setError("host name can not be null.", result)
}
}
/**
* @function name: func hostCreate(nodes map[string]interface{})
* @description: This function gets host data for database insertion.
* @related issues: OWL-257, OWL-240, OWL-093, OWL-086, OWL-085
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostCreate(nodes map[string]interface{}) {
log.Println("func hostCreate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
endpoint := checkHostExist(params, result)
if endpoint.Id > 0 {
setError("host name existed: " + endpoint.Endpoint, result)
} else {
args := map[string]string {}
addHost(params, args, result)
if _, ok := params["inventory"]; ok {
inventory := params["inventory"].(map[string]interface{})
macAddr := inventory["macaddress_a"].(string) + inventory["macaddress_b"].(string)
args["macAddr"] = macAddr
}
log.Println("args =", args)
}
nodes["result"] = result
}
/**
* @function name: func unbindGroup(hostId string, result map[string]interface{})
* @description: This function unbinds a host to a host group.
* @related issues: OWL-241
* @param: hostId string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func removeHost(hostIds []string, result map[string]interface{})
*/
func unbindGroup(hostId string, result map[string]interface{}) {
o := orm.NewOrm()
o.Using("falcon_portal")
sql := "DELETE FROM grp_host WHERE host_id = ?"
res, err := o.Raw(sql, hostId).Exec()
if err != nil {
setError(err.Error(), result)
}
num, _ := res.RowsAffected()
log.Println("mysql row affected nums =", num)
}
/**
* @function name: func removeHost(hostIds []string, result map[string]interface{})
* @description: This function deletes host from "endpoint" table.
* @related issues: OWL-241
* @param: hostIds []string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func hostDelete(nodes map[string]interface{})
*/
func removeHost(hostIds []string, result map[string]interface{}) {
o := orm.NewOrm()
hostids := []string{}
for _, hostId := range hostIds {
if id, err := strconv.Atoi(hostId); err == nil {
num, err := o.Delete(&Endpoint{Id: id})
if err != nil {
setError(err.Error(), result)
} else {
if num > 0 {
log.Println("RowsDeleted =", num)
unbindGroup(hostId, result)
hostids = append(hostids, hostId)
}
}
}
}
result["hostids"] = hostids
}
/**
* @function name: func hostDelete(nodes map[string]interface{})
* @description: This function handles host.delete API requests.
* @related issues: OWL-257, OWL-241, OWL-093, OWL-086, OWL-085
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostDelete(nodes map[string]interface{}) {
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostIds := []string{}
hostId := ""
for _, param := range params {
if val, ok := param.(map[string]interface{})["host_id"]; ok {
if val != nil {
hostId = string(val.(json.Number))
hostIds = append(hostIds, hostId)
}
}
}
removeHost(hostIds, result)
nodes["result"] = result
}
/**
* @function name: func hostGet(nodes map[string]interface{})
* @description: This function gets existed host data.
* @related issues: OWL-257, OWL-254
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/29/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostGet(nodes map[string]interface{}) {
log.Println("func hostGet()")
params := nodes["params"].(map[string]interface{})
items := []interface{}{}
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostNames := []string{}
queryAll := false
if val, ok := params["filter"]; ok {
filter := val.(map[string]interface{})
if val, ok = filter["host"]; ok {
for _, hostName := range val.([]interface{}) {
if hostName.(string) == "_all_" {
queryAll = true
} else {
hostNames = append(hostNames, hostName.(string))
}
}
}
}
o := orm.NewOrm()
if queryAll {
var endpoints []*Endpoint
num, err := o.QueryTable("endpoint").All(&endpoints)
if err != nil {
setError(err.Error(), result)
} else {
log.Println("num =", num)
for _, endpoint := range endpoints {
item := map[string]string {}
var grp_id int
o.Raw("SELECT grp_id FROM falcon_portal.grp_host WHERE host_id=?", endpoint.Id).QueryRow(&grp_id)
item["hostid"] = strconv.Itoa(endpoint.Id)
item["hostname"] = endpoint.Endpoint
item["ip"] = endpoint.Ipv4
item["groupid"] = strconv.Itoa(grp_id)
items = append(items, item)
}
}
} else {
ip := ""
hostId := ""
groupId := ""
var endpoint Endpoint
for _, hostName := range hostNames {
item := map[string]string {}
ip = ""
hostId = ""
groupId = ""
err := o.QueryTable("endpoint").Filter("endpoint", hostName).One(&endpoint)
if err == orm.ErrMultiRows {
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
setError("host not found", result)
} else if endpoint.Id > 0 {
ip = endpoint.Ipv4
var grp_id int
o.Raw("SELECT grp_id FROM falcon_portal.grp_host WHERE host_id=?", endpoint.Id).QueryRow(&grp_id)
log.Println("grp_id =", grp_id)
hostId = strconv.Itoa(endpoint.Id)
groupId = strconv.Itoa(grp_id)
}
item["hostid"] = hostId
item["hostname"] = hostName
item["ip"] = ip
item["groupid"] = groupId
items = append(items, item)
}
}
log.Println("items =", items)
result["items"] = items
nodes["result"] = result
}
/**
* @function name: func hostUpdate(nodes map[string]interface{})
* @description: This function updates host data.
* @related issues: OWL-257, OWL-240, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/23/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostUpdate(nodes map[string]interface{}) {
log.Println("func hostUpdate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
args := map[string]string {}
endpoint := checkHostExist(params, result)
if endpoint.Id > 0 {
log.Println("host existed")
hostId := endpoint.Id
now := getNow()
endpoint.T_modify = now
o := orm.NewOrm()
num, err := o.Update(&endpoint)
if err != nil {
setError(err.Error(), result)
} else {
log.Println("update hostId =", hostId)
log.Println("mysql row affected nums =", num)
bindGroup(int64(endpoint.Id), params, args, result)
hostid := strconv.Itoa(endpoint.Id)
hostids := [1]string{string(hostid)}
result["hostids"] = hostids
bindTemplate(params, args, result)
}
} else {
log.Println("host not existed")
addHost(params, args, result)
}
log.Println("args =", args)
nodes["result"] = result
}
/**
* @function name: func hostgroupCreate(nodes map[string]interface{})
* @description: This function gets hostgroup data for database insertion.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupCreate(nodes map[string]interface{}) {
log.Println("func hostgroupCreate()")
params := nodes["params"].(map[string]interface{})
hostgroupName := params["name"].(string)
user := "zabbix"
now := getNow()
o := orm.NewOrm()
o.Using("falcon_portal")
grp := Grp{
Grp_name: hostgroupName,
Create_user: user,
Create_at: now,
}
log.Println("grp =", grp)
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
id, err := o.Insert(&grp)
if err != nil {
setError(err.Error(), result)
} else {
groupid := strconv.Itoa(int(id))
groupids := [1]string{string(groupid)}
result["groupids"] = groupids
}
nodes["result"] = result
}
/**
* @function name: func hostgroupDelete(nodes map[string]interface{})
* @description: This function handles hostgroup.delete API requests.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupDelete(nodes map[string]interface{}) {
log.Println("func hostgroupDelete()")
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
o := orm.NewOrm()
o.Using("falcon_portal")
args := []interface{}{}
args = append(args, "DELETE FROM falcon_portal.grp WHERE id=?")
args = append(args, "DELETE FROM falcon_portal.grp_host WHERE grp_id=?")
args = append(args, "DELETE FROM falcon_portal.grp_tpl WHERE grp_id=?")
args = append(args, "DELETE FROM falcon_portal.plugin_dir WHERE grp_id=?")
log.Println("args =", args)
groupids := []string{}
for _, sqlcmd := range args {
for _, hostgroupId := range params {
res, err := o.Raw(sqlcmd.(string), hostgroupId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 && sqlcmd == "DELETE FROM falcon_portal.grp WHERE id=?" {
groupids = append(groupids, hostgroupId.(string))
log.Println("delete hostgroup id =", hostgroupId)
log.Println("mysql row affected nums =", num)
}
}
}
}
result["groupids"] = groupids
nodes["result"] = result
}
/**
* @function name: func hostgroupGet(nodes map[string]interface{})
* @description: This function gets existed hostgroup data.
* @related issues: OWL-257, OWL-254
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/29/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupGet(nodes map[string]interface{}) {
log.Println("func hostgroupGet()")
params := nodes["params"].(map[string]interface{})
items := []interface{}{}
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
groupNames := []string{}
queryAll := false
if val, ok := params["filter"]; ok {
filter := val.(map[string]interface{})
if val, ok = filter["name"]; ok {
for _, groupName := range val.([]interface{}) {
if groupName.(string) == "_all_" {
queryAll = true
} else {
groupNames = append(groupNames, groupName.(string))
}
}
}
}
groupId := ""
o := orm.NewOrm()
o.Using("falcon_portal")
if queryAll {
var grps []*Grp
_, err := o.QueryTable("grp").All(&grps)
if err != nil {
setError(err.Error(), result)
} else {
for _, grp := range grps {
item := map[string]string {}
item["groupid"] = strconv.Itoa(grp.Id)
item["groupname"] = grp.Grp_name
items = append(items, item)
}
}
} else {
var grp Grp
for _, groupName := range groupNames {
item := map[string]string {}
groupId = ""
err := o.QueryTable("grp").Filter("grp_name", groupName).One(&grp)
if err == orm.ErrMultiRows {
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
setError("host group not found", result)
} else if grp.Id > 0 {
groupId = strconv.Itoa(grp.Id)
}
item["groupid"] = groupId
item["groupname"] = groupName
items = append(items, item)
}
}
log.Println("result =", result)
result["items"] = items
nodes["result"] = result
}
/**
* @function name: func hostgroupUpdate(nodes map[string]interface{})
* @description: This function updates hostgroup data.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupUpdate(nodes map[string]interface{}) {
log.Println("func hostgroupUpdate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostgroupId, err := strconv.Atoi(params["groupid"].(string))
if err != nil {
setError(err.Error(), result)
}
o := orm.NewOrm()
o.Using("falcon_portal")
if _, ok := params["name"]; ok {
hostgroupName := params["name"].(string)
log.Println("hostgroupName =", hostgroupName)
if hostgroupName != "" {
grp := Grp{Id: hostgroupId}
err := o.Read(&grp)
if err != nil {
setError(err.Error(), result)
} else {
grp.Grp_name = hostgroupName
num, err := o.Update(&grp)
if err != nil {
setError(err.Error(), result)
} else {
if num > 0 {
groupids := [1]string{strconv.Itoa(hostgroupId)}
result["groupids"] = groupids
log.Println("update groupid =", hostgroupId)
log.Println("mysql row affected nums =", num)
}
}
}
}
}
nodes["result"] = result
}
/**
* @function name: func templateCreate(nodes map[string]interface{})
* @description: This function gets template data for database insertion.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateCreate(nodes map[string]interface{}) {
log.Println("func templateCreate()")
params := nodes["params"].(map[string]interface{})
templateName := params["host"].(string)
user := "zabbix"
groups := params["groups"]
groupid := groups.(map[string]interface{})["groupid"].(json.Number)
hostgroupId := string(groupid)
now := getNow()
o := orm.NewOrm()
o.Using("falcon_portal")
tpl := Tpl{
Tpl_name: templateName,
Create_user: user,
Create_at: now,
}
log.Println("tpl =", tpl)
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
id, err := o.Insert(&tpl)
if err != nil {
setError(err.Error(), result)
} else {
templateId := strconv.Itoa(int(id))
templateids := [1]string{string(templateId)}
result["templateids"] = templateids
groupId, err := strconv.Atoi(hostgroupId)
if err != nil {
setError(err.Error(), result)
}
grp_tpl := Grp_tpl{
Grp_id: groupId,
Tpl_id: int(id),
Bind_user: user,
}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
setError(err.Error(), result)
}
}
nodes["result"] = result
}
/**
* @function name: func templateDelete(nodes map[string]interface{})
* @description: This function handles template.delete API requests.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateDelete(nodes map[string]interface{}) {
log.Println("func templateDelete()")
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
o := orm.NewOrm()
args := []interface{}{}
args = append(args, "DELETE FROM falcon_portal.tpl WHERE id=?")
args = append(args, "DELETE FROM falcon_portal.grp_tpl WHERE tpl_id=?")
log.Println("args =", args)
templateids := []string{}
for _, sqlcmd := range args {
log.Println(sqlcmd)
for _, templateId := range params {
log.Println("templateId =", templateId)
res, err := o.Raw(sqlcmd.(string), templateId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 && sqlcmd == "DELETE FROM falcon_portal.tpl WHERE id=?" {
templateids = append(templateids, templateId.(string))
log.Println("delete template id =", templateId)
log.Println("mysql row affected nums =", num)
}
}
}
}
result["templateids"] = templateids
nodes["result"] = result
}
/**
* @function name: func templateUpdate(nodes map[string]interface{}, rw http.ResponseWriter)
* @description: This function gets hostgroup data for database insertion.
* @related issues: OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @param: rw http.ResponseWriter
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 10/23/2015
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateUpdate(nodes map[string]interface{}, rw http.ResponseWriter) {
params := nodes["params"].(map[string]interface{})
var result = make(map[string]interface{})
templateId, err := strconv.Atoi(params["templateid"].(string))
if err != nil {
log.Println("Error =", err.Error())
result["error"] = [1]string{string(err.Error())}
}
o := orm.NewOrm()
database := "falcon_portal"
o.Using(database)
if _, ok := params["name"]; ok {
templateName := params["name"].(string)
log.Println("templateName =", templateName)
if templateName != "" {
tpl := Tpl{Id: templateId}
log.Println("tpl =", tpl)
err := o.Read(&tpl)
if err != nil {
log.Println("Error =", err.Error())
result["error"] = [1]string{string(err.Error())}
} else {
log.Println("tpl =", tpl)
tpl.Tpl_name = templateName
log.Println("tpl =", tpl)
num, err := o.Update(&tpl)
if err != nil {
log.Println("Error =", err.Error())
result["error"] = [1]string{string(err.Error())}
} else {
if num > 0 {
templateids := [1]string{strconv.Itoa(templateId)}
result["templateids"] = templateids
log.Println("update template id =", templateId)
log.Println("mysql row affected nums =", num)
}
}
}
}
}
if _, ok := params["groups"]; ok {
groups := params["groups"].([]interface{})
log.Println("groups =", groups)
count := 0
for _, group := range groups {
log.Println("group =", group)
count += 1
}
log.Println("count =", count)
if count > 0 {
user := "zabbix"
sqlcmd := "DELETE FROM falcon_portal.grp_tpl WHERE tpl_id=?"
res, err := o.Raw(sqlcmd, templateId).Exec()
if err != nil {
log.Println("Error =", err.Error())
result["error"] = [1]string{string(err.Error())}
} else {
num, _ := res.RowsAffected()
if num > 0 {
log.Println("mysql row affected nums =", num)
}
}
for _, group := range groups {
log.Println("group =", group)
groupId, err := strconv.Atoi(group.(map[string]interface{})["groupid"].(string))
log.Println("groupId =", groupId)
grp_tpl := Grp_tpl{Grp_id: groupId, Tpl_id: templateId, Bind_user: user}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
log.Println("Error =", err.Error())
result["error"] = [1]string{string(err.Error())}
} else {
templateids := [1]string{strconv.Itoa(templateId)}
result["templateids"] = templateids
log.Println("update template id =", templateId)
}
}
}
}
resp := nodes
delete(resp, "params")
resp["result"] = result
RenderJson(rw, resp)
}
/**
* @function name: func getFctoken() fctoken string
* @description: This function returns fctoken for API request.
* @related issues: OWL-159
* @param: void
* @return: fctoken string
* @author: Don Hsieh
* @since: 11/24/2015
* @last modified: 11/24/2015
* @called by: func apiAlert(rw http.ResponseWriter, req *http.Request)
* in query/http/zabbix.go
* func getMapValues(chartType string) map[string]interface{}
* in query/http/grafana.go
*/
func getFctoken() string {
hasher := md5.New()
io.WriteString(hasher, g.Config().Api.Token)
s := hex.EncodeToString(hasher.Sum(nil))
t := time.Now()
now := t.Format("20060102")
s = now + s
hasher = md5.New()
io.WriteString(hasher, s)
fctoken := hex.EncodeToString(hasher.Sum(nil))
return fctoken
}
/**
* @function name: func apiAlert(rw http.ResponseWriter, req *http.Request)
* @description: This function handles alarm API request.
* @related issues: OWL-159, OWL-093
* @param: rw http.ResponseWriter
* @param: req *http.Request
* @return: void
* @author: Don Hsieh
* @since: 09/29/2015
* @last modified: 11/24/2015
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func apiAlert(rw http.ResponseWriter, req *http.Request) {
fcname := g.Config().Api.Name
fctoken := getFctoken()
param := req.URL.Query()
log.Println("param =", param)
arr := param["endpoint"]
hostname := arr[0]
arr = param["time"]
datetime := arr[0]
arr = param["stra_id"]
trigger_id, err := strconv.Atoi(arr[0])
if err != nil {
log.Println(err.Error())
}
arr = param["metric"]
metric := arr[0]
arr = param["step"]
step := arr[0]
arr = param["tpl_id"]
tpl_id := arr[0]
arr = param["status"]
zabbix_status := arr[0]
arr = param["priority"]
zabbix_level := arr[0]
summary := "[OWL] " + metric + "_" + step + "_" + zabbix_level
args := map[string]interface{} {
"summary": summary,
"zabbix_status": zabbix_status, // "PROBLEM",
"zabbix_level": "Information", // "Information" or "High"
"trigger_id": trigger_id,
"host_ip": "",
"hostname": hostname,
"event_id": tpl_id,
"template_name": "Template Server Basic Monitor",
"datetime": datetime,
"fcname": fcname,
"fctoken": fctoken,
}
log.Println("args =", args)
bs, err := json.Marshal(args)
if err != nil {
log.Println("Error =", err.Error())
}
url := g.Config().Api.Event
log.Println("url =", url)
reqAlert, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(bs)))
if err != nil {
log.Println("Error =", err.Error())
}
reqAlert.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(reqAlert)
if err != nil {
log.Println("Error =", err.Error())
}
defer resp.Body.Close()
log.Println("response Status =", resp.Status) // 200 OK TypeOf(resp.Status): string
log.Println("response Headers =", resp.Header)
body, _ := ioutil.ReadAll(resp.Body)
log.Println("response Body =", string(body))
rw.Header().Set("Content-Type", "application/json; charset=UTF-8")
rw.Write(body)
}
/**
* @function name: func setResponse(rw http.ResponseWriter, resp map[string]interface{})
* @description: This function sets content of response and returns it.
* @related issues: OWL-257
* @param: rw http.ResponseWriter
* @param: resp map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func setResponse(rw http.ResponseWriter, resp map[string]interface{}) {
if _, ok := resp["auth"]; ok {
delete(resp, "auth")
}
if _, ok := resp["method"]; ok {
delete(resp, "method")
}
if _, ok := resp["params"]; ok {
delete(resp, "params")
}
result := resp["result"].(map[string]interface{})
if val, ok := result["error"]; ok {
errors := val.([]string)
if len(errors) > 0 {
delete(resp, "result")
resp["error"] = errors
} else {
delete(resp["result"].(map[string]interface{}), "error")
if val, ok = result["items"]; ok {
items := result["items"]
resp["result"] = items
}
}
}
resp["time"] = getNow()
RenderJson(rw, resp)
}
/**
* @function name: func apiParser(rw http.ResponseWriter, req *http.Request)
* @description: This function parses the method of API request.
* @related issues: OWL-254, OWL-085
* @param: rw http.ResponseWriter
* @param: req *http.Request
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 12/29/2015
* @called by: http.HandleFunc("/api", apiParser)
* in func main()
*/
func apiParser(rw http.ResponseWriter, req *http.Request) {
log.Println("func apiParser(rw http.ResponseWriter, req *http.Request)")
buf := new(bytes.Buffer)
buf.ReadFrom(req.Body)
log.Println(buf.Len())
if buf.Len() == 0 {
apiAlert(rw, req)
} else {
s := buf.String() // Does a complete copy of the bytes in the buffer.
log.Println("s =", s)
json, err := simplejson.NewJson(buf.Bytes())
if err != nil {
log.Println(err.Error())
}
var nodes = make(map[string]interface{})
nodes, _ = json.Map()
method := nodes["method"]
log.Println(method)
delete(nodes, "method")
delete(nodes, "auth")
if method == "host.create" {
hostCreate(nodes)
} else if method == "host.delete" {
hostDelete(nodes)
} else if method == "host.get" {
hostGet(nodes)
} else if method == "host.update" {
hostUpdate(nodes)
} else if method == "hostgroup.create" {
hostgroupCreate(nodes)
} else if method == "hostgroup.delete" {
hostgroupDelete(nodes)
} else if method == "hostgroup.get" {
hostgroupGet(nodes)
} else if method == "hostgroup.update" {
hostgroupUpdate(nodes)
} else if method == "template.create" {
templateCreate(nodes)
} else if method == "template.delete" {
templateDelete(nodes)
} else if method == "template.update" {
templateUpdate(nodes, rw)
}
}
}
/**
* @function name: func configZabbixRoutes()
* @description: This function handles API requests.
* @related issues: OWL-093, OWL-085
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 09/09/2015
* @last modified: 10/21/2015
* @called by: func Start()
* in http/http.go
*/
func configZabbixRoutes() {
http.HandleFunc("/api", apiParser)
}
[OWL-257] update func templateUpdate()
package http
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/astaxie/beego/orm"
"github.com/bitly/go-simplejson"
"github.com/Cepave/query/g"
"io"
"io/ioutil"
"log"
"net/http"
"strconv"
"time"
)
type Endpoint struct {
Id int
Endpoint string
Ts int64
T_create string
T_modify string
Ipv4 string
Port string
}
type Grp struct {
Id int
Grp_name string
Create_user string
Create_at string
Come_from int
}
type Tpl struct {
Id int
Tpl_name string
Parent_id int
Action_id int
Create_user string
Create_at string
}
type Grp_tpl struct {
Id int
Grp_id int
Tpl_id int
Bind_user string
}
type Grp_host struct {
Id int
Grp_id int
Host_id int
}
/**
* @function name: func getNow() string
* @description: This function gets string of current time.
* @related issues: OWL-093
* @param: void
* @return: now sting
* @author: Don Hsieh
* @since: 10/21/2015
* @last modified: 10/21/2015
* @called by: func hostCreate(nodes map[string]interface{})
* func hostgroupCreate(nodes map[string]interface{})
* func templateCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
* func setResponse(rw http.ResponseWriter, resp map[string]interface{})
*/
func getNow() string {
t := time.Now()
now := t.Format("2006-01-02 15:04:05")
return now
}
/**
* @function name: func getHostId(params map[string]interface{}) string
* @description: This function gets host ID.
* @related issues: OWL-240
* @param: params map[string]interface{}
* @return: hostId string
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 12/16/2015
* @called by: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
*/
func getHostId(params map[string]interface{}) string {
hostId := ""
if val, ok := params["hostid"]; ok {
if val != nil {
hostId = val.(string)
}
}
return hostId
}
/**
* @function name: func getHostName(params map[string]interface{}) string
* @description: This function gets host name.
* @related issues: OWL-240
* @param: params map[string]interface{}
* @return: hostName string
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 12/16/2015
* @called by: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
* func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func getHostName(params map[string]interface{}) string {
hostName := ""
if val, ok := params["host"]; ok {
if val != nil {
hostName = val.(string)
} else if val, ok = params["name"]; ok {
if val != nil {
hostName = val.(string)
}
}
}
return hostName
}
/**
* @function name: func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint
* @description: This function checks if a host existed.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: result map[string]interface{}
* @return: endpoint Endpoint
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 01/01/2016
* @called by: func hostCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
*/
func checkHostExist(params map[string]interface{}, result map[string]interface{}) Endpoint {
var endpoint Endpoint
o := orm.NewOrm()
hostId := getHostId(params)
hostName := getHostName(params)
if hostId != "" {
hostIdint, err := strconv.Atoi(hostId)
if err != nil {
setError(err.Error(), result)
} else {
endpoint := Endpoint{Id: hostIdint}
err := o.Read(&endpoint)
if err != nil {
setError(err.Error(), result)
}
}
} else {
err := o.QueryTable("endpoint").Filter("endpoint", hostName).One(&endpoint)
if err == orm.ErrMultiRows {
// Have multiple records
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
// No result
setError("host not found", result)
}
}
return endpoint
}
/**
* @function name: func setError(error string, result map[string]interface{})
* @description: This function sets error message.
* @related issues: OWL-257
* @param: error string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{})
* func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
* func hostCreate(nodes map[string]interface{})
*/
func setError(error string, result map[string]interface{}) {
log.Println("Error =", error)
result["error"] = append(result["error"].([]string), error)
}
/**
* @function name: func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function binds a host to a host group.
* @related issues: OWL-257, OWL-240
* @param: hostId int64
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/15/2015
* @last modified: 01/01/2016
* @called by: func hostUpdate(nodes map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func bindGroup(hostId int64, params map[string]interface{}, args map[string]string, result map[string]interface{}) {
if _, ok := params["groups"]; ok {
o := orm.NewOrm()
o.Using("falcon_portal")
sqlcmd := "DELETE FROM falcon_portal.grp_host WHERE host_id=?"
res, err := o.Raw(sqlcmd, hostId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 {
log.Println("mysql row affected nums =", num)
}
}
groups := params["groups"].([]interface{})
groupId := ""
for _, group := range groups {
groupId = group.(map[string]interface{})["groupid"].(string)
args["groupId"] = groupId
grp_id, err := strconv.Atoi(groupId)
sqlcmd := "SELECT COUNT(*) FROM falcon_portal.grp_host WHERE host_id=? AND grp_id=?"
res, err := o.Raw(sqlcmd, hostId, grp_id).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
log.Println("num =", num)
if num > 0 {
log.Println("Record existed. count =", num)
} else { // Record not existed. Insert new one.
grp_host := Grp_host{
Grp_id: grp_id,
Host_id: int(hostId),
}
log.Println("grp_host =", grp_host)
_, err = o.Insert(&grp_host)
if err != nil {
setError(err.Error(), result)
}
}
}
}
}
}
/**
* @function name: func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function binds a host to a template.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/15/2015
* @last modified: 01/01/2016
* @called by: func hostUpdate(nodes map[string]interface{})
* func addHost(hostName string, params map[string]interface{}, args map[string]string, result map[string]interface{})
*/
func bindTemplate(params map[string]interface{}, args map[string]string, result map[string]interface{}) {
if _, ok := params["templates"]; ok {
o := orm.NewOrm()
o.Using("falcon_portal")
groupId := args["groupId"]
grp_id, _ := strconv.Atoi(groupId)
templates := params["templates"].([]interface{})
for _, template := range templates {
templateId := template.(map[string]interface{})["templateid"].(string)
tpl_id, err := strconv.Atoi(templateId)
args["templateId"] = templateId
sqlcmd := "SELECT COUNT(*) FROM falcon_portal.grp_tpl WHERE grp_id=? AND tpl_id=?"
res, err := o.Raw(sqlcmd, grp_id, tpl_id).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
log.Println("num =", num)
if num > 0 {
log.Println("Record existed. count =", num)
} else { // Record not existed. Insert new one.
grp_tpl := Grp_tpl{
Grp_id: grp_id,
Tpl_id: tpl_id,
Bind_user: "zabbix",
}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
setError(err.Error(), result)
}
}
}
}
}
}
/**
* @function name: func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{})
* @description: This function inserts a host to "endpoint" table and binds the host to its group and template.
* @related issues: OWL-257, OWL-240
* @param: params map[string]interface{}
* @param: args map[string]string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/21/2015
* @last modified: 01/01/2016
* @called by: func hostCreate(nodes map[string]interface{})
* func hostUpdate(nodes map[string]interface{})
*/
func addHost(params map[string]interface{}, args map[string]string, result map[string]interface{}) {
hostName := getHostName(params)
if len(hostName) > 0 {
args["host"] = hostName
ip := ""
port := ""
if _, ok := params["interfaces"]; ok {
interfaces := params["interfaces"].([]interface{})
for i, arg := range interfaces {
if i == 0 {
ip = arg.(map[string]interface{})["ip"].(string)
port = arg.(map[string]interface{})["port"].(string)
args["ip"] = ip
args["port"] = port
}
}
}
t := time.Now()
timestamp := t.Unix()
log.Println(timestamp)
now := getNow()
endpoint := Endpoint{
Endpoint: hostName,
Ts: timestamp,
T_create: now,
T_modify: now,
Ipv4: ip,
}
if len(port) > 0 {
endpoint.Port = port
}
log.Println("endpoint =", endpoint)
o := orm.NewOrm()
hostId, err := o.Insert(&endpoint)
if err != nil {
setError(err.Error(), result)
} else {
bindGroup(hostId, params, args, result)
hostid := strconv.Itoa(int(hostId))
hostids := [1]string{string(hostid)}
result["hostids"] = hostids
bindTemplate(params, args, result)
}
} else {
setError("host name can not be null.", result)
}
}
/**
* @function name: func hostCreate(nodes map[string]interface{})
* @description: This function gets host data for database insertion.
* @related issues: OWL-257, OWL-240, OWL-093, OWL-086, OWL-085
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostCreate(nodes map[string]interface{}) {
log.Println("func hostCreate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
endpoint := checkHostExist(params, result)
if endpoint.Id > 0 {
setError("host name existed: " + endpoint.Endpoint, result)
} else {
args := map[string]string {}
addHost(params, args, result)
if _, ok := params["inventory"]; ok {
inventory := params["inventory"].(map[string]interface{})
macAddr := inventory["macaddress_a"].(string) + inventory["macaddress_b"].(string)
args["macAddr"] = macAddr
}
log.Println("args =", args)
}
nodes["result"] = result
}
/**
* @function name: func unbindGroup(hostId string, result map[string]interface{})
* @description: This function unbinds a host to a host group.
* @related issues: OWL-241
* @param: hostId string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func removeHost(hostIds []string, result map[string]interface{})
*/
func unbindGroup(hostId string, result map[string]interface{}) {
o := orm.NewOrm()
o.Using("falcon_portal")
sql := "DELETE FROM grp_host WHERE host_id = ?"
res, err := o.Raw(sql, hostId).Exec()
if err != nil {
setError(err.Error(), result)
}
num, _ := res.RowsAffected()
log.Println("mysql row affected nums =", num)
}
/**
* @function name: func removeHost(hostIds []string, result map[string]interface{})
* @description: This function deletes host from "endpoint" table.
* @related issues: OWL-241
* @param: hostIds []string
* @param: result map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func hostDelete(nodes map[string]interface{})
*/
func removeHost(hostIds []string, result map[string]interface{}) {
o := orm.NewOrm()
hostids := []string{}
for _, hostId := range hostIds {
if id, err := strconv.Atoi(hostId); err == nil {
num, err := o.Delete(&Endpoint{Id: id})
if err != nil {
setError(err.Error(), result)
} else {
if num > 0 {
log.Println("RowsDeleted =", num)
unbindGroup(hostId, result)
hostids = append(hostids, hostId)
}
}
}
}
result["hostids"] = hostids
}
/**
* @function name: func hostDelete(nodes map[string]interface{})
* @description: This function handles host.delete API requests.
* @related issues: OWL-257, OWL-241, OWL-093, OWL-086, OWL-085
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostDelete(nodes map[string]interface{}) {
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostIds := []string{}
hostId := ""
for _, param := range params {
if val, ok := param.(map[string]interface{})["host_id"]; ok {
if val != nil {
hostId = string(val.(json.Number))
hostIds = append(hostIds, hostId)
}
}
}
removeHost(hostIds, result)
nodes["result"] = result
}
/**
* @function name: func hostGet(nodes map[string]interface{})
* @description: This function gets existed host data.
* @related issues: OWL-257, OWL-254
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/29/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostGet(nodes map[string]interface{}) {
log.Println("func hostGet()")
params := nodes["params"].(map[string]interface{})
items := []interface{}{}
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostNames := []string{}
queryAll := false
if val, ok := params["filter"]; ok {
filter := val.(map[string]interface{})
if val, ok = filter["host"]; ok {
for _, hostName := range val.([]interface{}) {
if hostName.(string) == "_all_" {
queryAll = true
} else {
hostNames = append(hostNames, hostName.(string))
}
}
}
}
o := orm.NewOrm()
if queryAll {
var endpoints []*Endpoint
num, err := o.QueryTable("endpoint").All(&endpoints)
if err != nil {
setError(err.Error(), result)
} else {
log.Println("num =", num)
for _, endpoint := range endpoints {
item := map[string]string {}
var grp_id int
o.Raw("SELECT grp_id FROM falcon_portal.grp_host WHERE host_id=?", endpoint.Id).QueryRow(&grp_id)
item["hostid"] = strconv.Itoa(endpoint.Id)
item["hostname"] = endpoint.Endpoint
item["ip"] = endpoint.Ipv4
item["groupid"] = strconv.Itoa(grp_id)
items = append(items, item)
}
}
} else {
ip := ""
hostId := ""
groupId := ""
var endpoint Endpoint
for _, hostName := range hostNames {
item := map[string]string {}
ip = ""
hostId = ""
groupId = ""
err := o.QueryTable("endpoint").Filter("endpoint", hostName).One(&endpoint)
if err == orm.ErrMultiRows {
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
setError("host not found", result)
} else if endpoint.Id > 0 {
ip = endpoint.Ipv4
var grp_id int
o.Raw("SELECT grp_id FROM falcon_portal.grp_host WHERE host_id=?", endpoint.Id).QueryRow(&grp_id)
log.Println("grp_id =", grp_id)
hostId = strconv.Itoa(endpoint.Id)
groupId = strconv.Itoa(grp_id)
}
item["hostid"] = hostId
item["hostname"] = hostName
item["ip"] = ip
item["groupid"] = groupId
items = append(items, item)
}
}
log.Println("items =", items)
result["items"] = items
nodes["result"] = result
}
/**
* @function name: func hostUpdate(nodes map[string]interface{})
* @description: This function updates host data.
* @related issues: OWL-257, OWL-240, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/23/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostUpdate(nodes map[string]interface{}) {
log.Println("func hostUpdate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
args := map[string]string {}
endpoint := checkHostExist(params, result)
if endpoint.Id > 0 {
log.Println("host existed")
hostId := endpoint.Id
now := getNow()
endpoint.T_modify = now
o := orm.NewOrm()
num, err := o.Update(&endpoint)
if err != nil {
setError(err.Error(), result)
} else {
log.Println("update hostId =", hostId)
log.Println("mysql row affected nums =", num)
bindGroup(int64(endpoint.Id), params, args, result)
hostid := strconv.Itoa(endpoint.Id)
hostids := [1]string{string(hostid)}
result["hostids"] = hostids
bindTemplate(params, args, result)
}
} else {
log.Println("host not existed")
addHost(params, args, result)
}
log.Println("args =", args)
nodes["result"] = result
}
/**
* @function name: func hostgroupCreate(nodes map[string]interface{})
* @description: This function gets hostgroup data for database insertion.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupCreate(nodes map[string]interface{}) {
log.Println("func hostgroupCreate()")
params := nodes["params"].(map[string]interface{})
hostgroupName := params["name"].(string)
user := "zabbix"
now := getNow()
o := orm.NewOrm()
o.Using("falcon_portal")
grp := Grp{
Grp_name: hostgroupName,
Create_user: user,
Create_at: now,
}
log.Println("grp =", grp)
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
id, err := o.Insert(&grp)
if err != nil {
setError(err.Error(), result)
} else {
groupid := strconv.Itoa(int(id))
groupids := [1]string{string(groupid)}
result["groupids"] = groupids
}
nodes["result"] = result
}
/**
* @function name: func hostgroupDelete(nodes map[string]interface{})
* @description: This function handles hostgroup.delete API requests.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupDelete(nodes map[string]interface{}) {
log.Println("func hostgroupDelete()")
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
o := orm.NewOrm()
o.Using("falcon_portal")
args := []interface{}{}
args = append(args, "DELETE FROM falcon_portal.grp WHERE id=?")
args = append(args, "DELETE FROM falcon_portal.grp_host WHERE grp_id=?")
args = append(args, "DELETE FROM falcon_portal.grp_tpl WHERE grp_id=?")
args = append(args, "DELETE FROM falcon_portal.plugin_dir WHERE grp_id=?")
log.Println("args =", args)
groupids := []string{}
for _, sqlcmd := range args {
for _, hostgroupId := range params {
res, err := o.Raw(sqlcmd.(string), hostgroupId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 && sqlcmd == "DELETE FROM falcon_portal.grp WHERE id=?" {
groupids = append(groupids, hostgroupId.(string))
log.Println("delete hostgroup id =", hostgroupId)
log.Println("mysql row affected nums =", num)
}
}
}
}
result["groupids"] = groupids
nodes["result"] = result
}
/**
* @function name: func hostgroupGet(nodes map[string]interface{})
* @description: This function gets existed hostgroup data.
* @related issues: OWL-257, OWL-254
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 12/29/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupGet(nodes map[string]interface{}) {
log.Println("func hostgroupGet()")
params := nodes["params"].(map[string]interface{})
items := []interface{}{}
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
groupNames := []string{}
queryAll := false
if val, ok := params["filter"]; ok {
filter := val.(map[string]interface{})
if val, ok = filter["name"]; ok {
for _, groupName := range val.([]interface{}) {
if groupName.(string) == "_all_" {
queryAll = true
} else {
groupNames = append(groupNames, groupName.(string))
}
}
}
}
groupId := ""
o := orm.NewOrm()
o.Using("falcon_portal")
if queryAll {
var grps []*Grp
_, err := o.QueryTable("grp").All(&grps)
if err != nil {
setError(err.Error(), result)
} else {
for _, grp := range grps {
item := map[string]string {}
item["groupid"] = strconv.Itoa(grp.Id)
item["groupname"] = grp.Grp_name
items = append(items, item)
}
}
} else {
var grp Grp
for _, groupName := range groupNames {
item := map[string]string {}
groupId = ""
err := o.QueryTable("grp").Filter("grp_name", groupName).One(&grp)
if err == orm.ErrMultiRows {
setError("returned multiple rows", result)
} else if err == orm.ErrNoRows {
setError("host group not found", result)
} else if grp.Id > 0 {
groupId = strconv.Itoa(grp.Id)
}
item["groupid"] = groupId
item["groupname"] = groupName
items = append(items, item)
}
}
log.Println("result =", result)
result["items"] = items
nodes["result"] = result
}
/**
* @function name: func hostgroupUpdate(nodes map[string]interface{})
* @description: This function updates hostgroup data.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/21/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func hostgroupUpdate(nodes map[string]interface{}) {
log.Println("func hostgroupUpdate()")
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
hostgroupId, err := strconv.Atoi(params["groupid"].(string))
if err != nil {
setError(err.Error(), result)
}
o := orm.NewOrm()
o.Using("falcon_portal")
if _, ok := params["name"]; ok {
hostgroupName := params["name"].(string)
log.Println("hostgroupName =", hostgroupName)
if hostgroupName != "" {
grp := Grp{Id: hostgroupId}
err := o.Read(&grp)
if err != nil {
setError(err.Error(), result)
} else {
grp.Grp_name = hostgroupName
num, err := o.Update(&grp)
if err != nil {
setError(err.Error(), result)
} else {
if num > 0 {
groupids := [1]string{strconv.Itoa(hostgroupId)}
result["groupids"] = groupids
log.Println("update groupid =", hostgroupId)
log.Println("mysql row affected nums =", num)
}
}
}
}
}
nodes["result"] = result
}
/**
* @function name: func templateCreate(nodes map[string]interface{})
* @description: This function gets template data for database insertion.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateCreate(nodes map[string]interface{}) {
log.Println("func templateCreate()")
params := nodes["params"].(map[string]interface{})
templateName := params["host"].(string)
user := "zabbix"
groups := params["groups"]
groupid := groups.(map[string]interface{})["groupid"].(json.Number)
hostgroupId := string(groupid)
now := getNow()
o := orm.NewOrm()
o.Using("falcon_portal")
tpl := Tpl{
Tpl_name: templateName,
Create_user: user,
Create_at: now,
}
log.Println("tpl =", tpl)
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
id, err := o.Insert(&tpl)
if err != nil {
setError(err.Error(), result)
} else {
templateId := strconv.Itoa(int(id))
templateids := [1]string{string(templateId)}
result["templateids"] = templateids
groupId, err := strconv.Atoi(hostgroupId)
if err != nil {
setError(err.Error(), result)
}
grp_tpl := Grp_tpl{
Grp_id: groupId,
Tpl_id: int(id),
Bind_user: user,
}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
setError(err.Error(), result)
}
}
nodes["result"] = result
}
/**
* @function name: func templateDelete(nodes map[string]interface{})
* @description: This function handles template.delete API requests.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateDelete(nodes map[string]interface{}) {
log.Println("func templateDelete()")
params := nodes["params"].([]interface {})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
o := orm.NewOrm()
args := []interface{}{}
args = append(args, "DELETE FROM falcon_portal.tpl WHERE id=?")
args = append(args, "DELETE FROM falcon_portal.grp_tpl WHERE tpl_id=?")
log.Println("args =", args)
templateids := []string{}
for _, sqlcmd := range args {
log.Println(sqlcmd)
for _, templateId := range params {
log.Println("templateId =", templateId)
res, err := o.Raw(sqlcmd.(string), templateId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 && sqlcmd == "DELETE FROM falcon_portal.tpl WHERE id=?" {
templateids = append(templateids, templateId.(string))
log.Println("delete template id =", templateId)
log.Println("mysql row affected nums =", num)
}
}
}
}
result["templateids"] = templateids
nodes["result"] = result
}
/**
* @function name: func templateUpdate(nodes map[string]interface{})
* @description: This function updates template data.
* @related issues: OWL-257, OWL-093, OWL-086
* @param: nodes map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 09/22/2015
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func templateUpdate(nodes map[string]interface{}) {
params := nodes["params"].(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
templateId, err := strconv.Atoi(params["templateid"].(string))
if err != nil {
setError(err.Error(), result)
}
o := orm.NewOrm()
o.Using("falcon_portal")
if _, ok := params["name"]; ok {
templateName := params["name"].(string)
log.Println("templateName =", templateName)
if templateName != "" {
tpl := Tpl{Id: templateId}
log.Println("tpl =", tpl)
err := o.Read(&tpl)
if err != nil {
setError(err.Error(), result)
} else {
log.Println("tpl =", tpl)
tpl.Tpl_name = templateName
log.Println("tpl =", tpl)
num, err := o.Update(&tpl)
if err != nil {
setError(err.Error(), result)
} else {
if num > 0 {
templateids := [1]string{strconv.Itoa(templateId)}
result["templateids"] = templateids
log.Println("update template id =", templateId)
log.Println("mysql row affected nums =", num)
}
}
}
}
}
if _, ok := params["groups"]; ok {
groups := params["groups"].([]interface{})
log.Println("groups =", groups)
count := 0
for _, group := range groups {
log.Println("group =", group)
count += 1
}
log.Println("count =", count)
if count > 0 {
user := "zabbix"
sqlcmd := "DELETE FROM falcon_portal.grp_tpl WHERE tpl_id=?"
res, err := o.Raw(sqlcmd, templateId).Exec()
if err != nil {
setError(err.Error(), result)
} else {
num, _ := res.RowsAffected()
if num > 0 {
log.Println("mysql row affected nums =", num)
}
}
for _, group := range groups {
log.Println("group =", group)
groupId, err := strconv.Atoi(group.(map[string]interface{})["groupid"].(string))
if err != nil {
setError(err.Error(), result)
}
log.Println("groupId =", groupId)
grp_tpl := Grp_tpl{Grp_id: groupId, Tpl_id: templateId, Bind_user: user}
log.Println("grp_tpl =", grp_tpl)
_, err = o.Insert(&grp_tpl)
if err != nil {
setError(err.Error(), result)
} else {
templateids := [1]string{strconv.Itoa(templateId)}
result["templateids"] = templateids
log.Println("update template id =", templateId)
}
}
}
}
nodes["result"] = result
}
/**
* @function name: func getFctoken() fctoken string
* @description: This function returns fctoken for API request.
* @related issues: OWL-159
* @param: void
* @return: fctoken string
* @author: Don Hsieh
* @since: 11/24/2015
* @last modified: 11/24/2015
* @called by: func apiAlert(rw http.ResponseWriter, req *http.Request)
* in query/http/zabbix.go
* func getMapValues(chartType string) map[string]interface{}
* in query/http/grafana.go
*/
func getFctoken() string {
hasher := md5.New()
io.WriteString(hasher, g.Config().Api.Token)
s := hex.EncodeToString(hasher.Sum(nil))
t := time.Now()
now := t.Format("20060102")
s = now + s
hasher = md5.New()
io.WriteString(hasher, s)
fctoken := hex.EncodeToString(hasher.Sum(nil))
return fctoken
}
/**
* @function name: func apiAlert(rw http.ResponseWriter, req *http.Request)
* @description: This function handles alarm API request.
* @related issues: OWL-159, OWL-093
* @param: rw http.ResponseWriter
* @param: req *http.Request
* @return: void
* @author: Don Hsieh
* @since: 09/29/2015
* @last modified: 11/24/2015
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func apiAlert(rw http.ResponseWriter, req *http.Request) {
fcname := g.Config().Api.Name
fctoken := getFctoken()
param := req.URL.Query()
log.Println("param =", param)
arr := param["endpoint"]
hostname := arr[0]
arr = param["time"]
datetime := arr[0]
arr = param["stra_id"]
trigger_id, err := strconv.Atoi(arr[0])
if err != nil {
log.Println(err.Error())
}
arr = param["metric"]
metric := arr[0]
arr = param["step"]
step := arr[0]
arr = param["tpl_id"]
tpl_id := arr[0]
arr = param["status"]
zabbix_status := arr[0]
arr = param["priority"]
zabbix_level := arr[0]
summary := "[OWL] " + metric + "_" + step + "_" + zabbix_level
args := map[string]interface{} {
"summary": summary,
"zabbix_status": zabbix_status, // "PROBLEM",
"zabbix_level": "Information", // "Information" or "High"
"trigger_id": trigger_id,
"host_ip": "",
"hostname": hostname,
"event_id": tpl_id,
"template_name": "Template Server Basic Monitor",
"datetime": datetime,
"fcname": fcname,
"fctoken": fctoken,
}
log.Println("args =", args)
bs, err := json.Marshal(args)
if err != nil {
log.Println("Error =", err.Error())
}
url := g.Config().Api.Event
log.Println("url =", url)
reqAlert, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(bs)))
if err != nil {
log.Println("Error =", err.Error())
}
reqAlert.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(reqAlert)
if err != nil {
log.Println("Error =", err.Error())
}
defer resp.Body.Close()
log.Println("response Status =", resp.Status) // 200 OK TypeOf(resp.Status): string
log.Println("response Headers =", resp.Header)
body, _ := ioutil.ReadAll(resp.Body)
log.Println("response Body =", string(body))
rw.Header().Set("Content-Type", "application/json; charset=UTF-8")
rw.Write(body)
}
/**
* @function name: func setResponse(rw http.ResponseWriter, resp map[string]interface{})
* @description: This function sets content of response and returns it.
* @related issues: OWL-257
* @param: rw http.ResponseWriter
* @param: resp map[string]interface{}
* @return: void
* @author: Don Hsieh
* @since: 01/01/2016
* @last modified: 01/01/2016
* @called by: func apiParser(rw http.ResponseWriter, req *http.Request)
*/
func setResponse(rw http.ResponseWriter, resp map[string]interface{}) {
if _, ok := resp["auth"]; ok {
delete(resp, "auth")
}
if _, ok := resp["method"]; ok {
delete(resp, "method")
}
if _, ok := resp["params"]; ok {
delete(resp, "params")
}
result := resp["result"].(map[string]interface{})
if val, ok := result["error"]; ok {
errors := val.([]string)
if len(errors) > 0 {
delete(resp, "result")
resp["error"] = errors
} else {
delete(resp["result"].(map[string]interface{}), "error")
if val, ok = result["items"]; ok {
items := result["items"]
resp["result"] = items
}
}
}
resp["time"] = getNow()
RenderJson(rw, resp)
}
/**
* @function name: func apiParser(rw http.ResponseWriter, req *http.Request)
* @description: This function parses the method of API request.
* @related issues: OWL-254, OWL-085
* @param: rw http.ResponseWriter
* @param: req *http.Request
* @return: void
* @author: Don Hsieh
* @since: 09/11/2015
* @last modified: 12/29/2015
* @called by: http.HandleFunc("/api", apiParser)
* in func main()
*/
func apiParser(rw http.ResponseWriter, req *http.Request) {
log.Println("func apiParser(rw http.ResponseWriter, req *http.Request)")
buf := new(bytes.Buffer)
buf.ReadFrom(req.Body)
log.Println(buf.Len())
if buf.Len() == 0 {
apiAlert(rw, req)
} else {
s := buf.String() // Does a complete copy of the bytes in the buffer.
log.Println("s =", s)
json, err := simplejson.NewJson(buf.Bytes())
if err != nil {
log.Println(err.Error())
}
var nodes = make(map[string]interface{})
nodes, _ = json.Map()
method := nodes["method"]
log.Println(method)
delete(nodes, "method")
delete(nodes, "auth")
if method == "host.create" {
hostCreate(nodes)
} else if method == "host.delete" {
hostDelete(nodes)
} else if method == "host.get" {
hostGet(nodes)
} else if method == "host.update" {
hostUpdate(nodes)
} else if method == "hostgroup.create" {
hostgroupCreate(nodes)
} else if method == "hostgroup.delete" {
hostgroupDelete(nodes)
} else if method == "hostgroup.get" {
hostgroupGet(nodes)
} else if method == "hostgroup.update" {
hostgroupUpdate(nodes)
} else if method == "template.create" {
templateCreate(nodes)
} else if method == "template.delete" {
templateDelete(nodes)
} else if method == "template.update" {
templateUpdate(nodes)
}
}
}
/**
* @function name: func configZabbixRoutes()
* @description: This function handles API requests.
* @related issues: OWL-093, OWL-085
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 09/09/2015
* @last modified: 10/21/2015
* @called by: func Start()
* in http/http.go
*/
func configZabbixRoutes() {
http.HandleFunc("/api", apiParser)
}
|
package toshl
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
)
// HTTPClient is an interface to define the client to access API resources
type HTTPClient interface {
Get(APIUrl, queryString string) (string, error)
Post(APIUrl, JSONPayload string) (string, error)
Update(APIUrl, JSONPayload string) (string, error)
Delete(APIUrl string) error
}
// RestHTTPClient is a real implementation of the HTTPClient
type RestHTTPClient struct {
BaseURL string
Token string
Client *http.Client
}
func (c *RestHTTPClient) setAuthenticationHeader(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.Token))
}
func (c *RestHTTPClient) setJSONContentTypeHeader(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
}
func (c *RestHTTPClient) setUserAgentHeader(req *http.Request) {
req.Header.Set("User-Agent", GetUserAgentString())
}
func (c *RestHTTPClient) getIDFromLocationHeader(
response *http.Response) (string, error) {
locationHeader := response.Header.Get("Location")
id, err := c.parseIDFromLocationHeader(locationHeader)
if err != nil {
log.Fatal("Location URL parsing: ", err)
return "", err
}
return id, nil
}
func (c *RestHTTPClient) parseIDFromLocationHeader(
locationURL string) (string, error) {
guid, err := url.Parse(locationURL)
if err != nil {
log.Fatal("Location URL parsing: ", err)
return "", err
}
values := strings.Split(guid.Path, "/")
if len(values) > 1 {
id := values[len(values)-1]
return id, nil
}
return "", errors.New("Cannot parse resource ID")
}
// Get takes an API endpoint and return a JSON string
func (c *RestHTTPClient) Get(APIUrl, queryString string) (string, error) {
url := c.BaseURL + "/" + APIUrl
if queryString != "" {
url = url + "?" + queryString
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
bs, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("ReadAll: ", err)
return "", err
}
return string(bs), nil
}
// Post takes an API endpoint and a JSON payload and return string ID
func (c *RestHTTPClient) Post(APIUrl, JSONPayload string) (string, error) {
url := c.BaseURL + "/" + APIUrl
var jsonStr = []byte(JSONPayload)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set JSON content type
c.setJSONContentTypeHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
// Parse Location header to get ID
id, err := c.getIDFromLocationHeader(resp)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
return id, nil
}
// Update takes an API endpoint and a JSON payload and update the resource
func (c *RestHTTPClient) Update(APIUrl, JSONPayload string) (string, error) {
url := c.BaseURL + "/" + APIUrl
var jsonStr = []byte(JSONPayload)
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonStr))
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set JSON content type
c.setJSONContentTypeHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
bs, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("ReadAll: ", err)
return "", err
}
return string(bs), nil
}
// Delete removes the Account having the ID specified in the endpoint
func (c *RestHTTPClient) Delete(APIUrl string) error {
url := c.BaseURL + "/" + APIUrl
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
_, err = c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return err
}
return nil
}
Add possibility to set a Timeout for the http.Client
package toshl
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
)
// HTTPClient is an interface to define the client to access API resources
type HTTPClient interface {
Get(APIUrl, queryString string) (string, error)
Post(APIUrl, JSONPayload string) (string, error)
Update(APIUrl, JSONPayload string) (string, error)
Delete(APIUrl string) error
}
// RestHTTPClient is a real implementation of the HTTPClient
type RestHTTPClient struct {
BaseURL string
Token string
Client *http.Client
}
func (c *RestHTTPClient) setAuthenticationHeader(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.Token))
}
func (c *RestHTTPClient) setJSONContentTypeHeader(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
}
func (c *RestHTTPClient) setUserAgentHeader(req *http.Request) {
req.Header.Set("User-Agent", GetUserAgentString())
}
func (c *RestHTTPClient) getIDFromLocationHeader(
response *http.Response) (string, error) {
locationHeader := response.Header.Get("Location")
id, err := c.parseIDFromLocationHeader(locationHeader)
if err != nil {
log.Fatal("Location URL parsing: ", err)
return "", err
}
return id, nil
}
func (c *RestHTTPClient) parseIDFromLocationHeader(
locationURL string) (string, error) {
guid, err := url.Parse(locationURL)
if err != nil {
log.Fatal("Location URL parsing: ", err)
return "", err
}
values := strings.Split(guid.Path, "/")
if len(values) > 1 {
id := values[len(values)-1]
return id, nil
}
return "", errors.New("Cannot parse resource ID")
}
// Get takes an API endpoint and return a JSON string
func (c *RestHTTPClient) Get(APIUrl, queryString string) (string, error) {
url := c.BaseURL + "/" + APIUrl
if queryString != "" {
url = url + "?" + queryString
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
bs, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("ReadAll: ", err)
return "", err
}
return string(bs), nil
}
// Post takes an API endpoint and a JSON payload and return string ID
func (c *RestHTTPClient) Post(APIUrl, JSONPayload string) (string, error) {
url := c.BaseURL + "/" + APIUrl
var jsonStr = []byte(JSONPayload)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set JSON content type
c.setJSONContentTypeHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
// Parse Location header to get ID
id, err := c.getIDFromLocationHeader(resp)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
return id, nil
}
// Update takes an API endpoint and a JSON payload and update the resource
func (c *RestHTTPClient) Update(APIUrl, JSONPayload string) (string, error) {
url := c.BaseURL + "/" + APIUrl
var jsonStr = []byte(JSONPayload)
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonStr))
if err != nil {
log.Fatal("NewRequest: ", err)
return "", err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set JSON content type
c.setJSONContentTypeHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
resp, err := c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return "", err
}
defer resp.Body.Close()
bs, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("ReadAll: ", err)
return "", err
}
return string(bs), nil
}
// Delete removes the Account having the ID specified in the endpoint
func (c *RestHTTPClient) Delete(APIUrl string) error {
url := c.BaseURL + "/" + APIUrl
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return err
}
// Set authorization token
c.setAuthenticationHeader(req)
// Set User-Agent header
c.setUserAgentHeader(req)
_, err = c.Client.Do(req)
if err != nil {
log.Fatal("Do: ", err)
return err
}
return nil
}
func (c *RestHTTPClient) SetTimeoutSeconds(timeout int) {
c.Client.Timeout = time.Duration(timeout) * time.Second
}
|
/*** Copyright (c) 2016, University of Florida Research Foundation, Inc. ***
*** For more information please refer to the LICENSE.md file ***/
package gorods
// #include "wrapper.h"
import "C"
import (
"fmt"
"html/template"
"log"
"mime"
"net/http"
"path/filepath"
"strconv"
"strings"
)
func FileServer(opts FSOptions) http.Handler {
handler := new(HttpHandler)
handler.client = opts.Client
handler.path = strings.TrimRight(opts.Path, "/")
handler.opts = opts
return handler
}
type FSOptions struct {
Client *Client
Path string
Download bool
StripPrefix string
}
type HttpHandler struct {
client *Client
path string
opts FSOptions
}
var check func(error) = func(err error) {
if err != nil {
log.Print(err)
}
}
const tpl = `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Collection: {{.Path}}</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.1.1.min.js" integrity="sha256-hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8=" crossorigin="anonymous"></script>
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
<style type="text/css">
.table td.fit,
.table th.fit {
white-space: nowrap;
width: 1%;
}
</style>
<script type="text/javascript">
$(function() {
$('.show-meta-modal').click(function() {
$('.modal', $(this).parent()).modal('show');
});
});
</script>
</head>
<body>
<nav class="navbar navbar-default navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">GoRODS HTTP FileServer</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav navbar-right">
{{range headerLinks}}
<li><a href="{{ index . "url" }}">{{ index . "name" }}</a></li>
{{end}}
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
<div class="container">
<br /><br /><br />
<h3>{{.Path}}</h3>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
<th>Type</th>
<th></th>
</tr>
</thead>
<tbody>
{{ $length := len headerLinks }}{{ if ne $length 0 }}
<tr>
<th><a href="..">..</a></th>
<td></td>
<td>Collection</td>
<td></td>
</tr>
{{ end }}
{{range .Collections}}
<tr>
<th><a href="{{.Name}}/">{{.Name}}</a></th>
<td>{{prettySize .Size}}</td>
<td>Collection</td>
<td>
<span style="cursor:pointer;color:#337ab7;" class="glyphicon glyphicon-th-list show-meta-modal"></span>
<!-- Modal -->
<div class="modal fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel">Collection "{{.Name}}"</h4>
</div>
<div class="modal-body">
<h4>Metadata</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Attribute</th>
<th>Value</th>
<th>Units</th>
</tr>
</thead>
<tbody>
{{ $metas := colMeta . }}{{ range $metas }}
<tr>
<td>{{ index . "attribute" }}</td>
<td>{{ index . "value" }}</td>
<td>{{ index . "units" }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No Metadata Found</td></tr>
{{ end }}
</tbody>
</table>
<h4>ACL</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Access Level</th>
<th>Type</th>
</tr>
</thead>
<tbody>
{{ range .ACL }}
<tr>
<td>{{ .AccessObject.Name }}</td>
<td>{{ getTypeString .AccessLevel }}</td>
<td>{{ getTypeString .Type }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No ACLs Found</td></tr>
{{ end }}
</tbody>
</table>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</td>
</tr>
{{end}}
{{range .DataObjs}}
<tr>
<th><a href="{{.Name}}">{{.Name}}</a></th>
<td>{{prettySize .Size}}</td>
<td>Data Object</td>
<td><a href="{{.Name}}?download=1"><span style="margin-right:10px;" class="glyphicon glyphicon-download-alt"></span></a>
<span style="cursor:pointer;color:#337ab7;" class="glyphicon glyphicon-th-list show-meta-modal"></span>
<!-- Modal -->
<div class="modal fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel">Data Object "{{.Name}}"</h4>
</div>
<div class="modal-body">
<h4>Metadata</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Attribute</th>
<th>Value</th>
<th>Units</th>
</tr>
</thead>
<tbody>
{{ $metas := colMeta . }}{{ range $metas }}
<tr>
<td>{{ index . "attribute" }}</td>
<td>{{ index . "value" }}</td>
<td>{{ index . "units" }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No Metadata Found</td></tr>
{{ end }}
</tbody>
</table>
<h4>ACL</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Access Level</th>
<th>Type</th>
</tr>
</thead>
<tbody>
{{ range .ACL }}
<tr>
<td>{{ .AccessObject.Name }}</td>
<td>{{ getTypeString .AccessLevel }}</td>
<td>{{ getTypeString .Type }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No ACLs Found</td></tr>
{{ end }}
</tbody>
</table>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</td></td>
</tr>
{{end}}
</tbody>
</table>
</div>
</body>
</html>
`
func (handler *HttpHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
handlerPath := strings.TrimRight(handler.path, "/")
urlPath := strings.TrimRight(request.URL.Path, "/")
openPath := strings.TrimRight(handlerPath+"/"+urlPath, "/")
if er := handler.client.OpenConnection(func(con *Connection) {
if objType, err := con.PathType(openPath); err == nil {
if objType == DataObjType {
if obj, er := con.DataObject(openPath); er == nil {
query := request.URL.Query()
if handler.opts.Download || query.Get("download") != "" {
response.Header().Set("Content-Disposition", "attachment; filename="+obj.Name())
response.Header().Set("Content-type", "application/octet-stream")
} else {
var mimeType string
ext := filepath.Ext(openPath)
if ext != "" {
mimeType = mime.TypeByExtension(ext)
if mimeType == "" {
log.Printf("Can't find mime type for %s extension", ext)
mimeType = "application/octet-stream"
}
} else {
mimeType = "application/octet-stream"
}
response.Header().Set("Content-type", mimeType)
}
response.Header().Set("Content-Length", strconv.FormatInt(obj.Size(), 10))
if readEr := obj.ReadChunk(1024000, func(chunk []byte) {
response.Write(chunk)
}); readEr != nil {
log.Print(readEr)
}
} else {
log.Print(er)
}
} else if objType == CollectionType {
uP := request.URL.Path
if uP != "/" && uP != "" && uP[len(uP)-1:] != "/" {
http.Redirect(response, request, (uP + "/"), http.StatusFound)
return
}
if col, er := con.Collection(CollectionOptions{
Path: openPath,
Recursive: false,
GetRepls: false,
}); er == nil {
response.Header().Set("Content-Type", "text/html")
t, err := template.New("collectionList").Funcs(template.FuncMap{
"prettySize": func(size int64) string {
if size < 1024 {
return fmt.Sprintf("%v bytes", size)
} else if size < 1048576 { // 1 MiB
return fmt.Sprintf("%.1f KiB", float64(size)/1024.0)
} else if size < 1073741824 { // 1 GiB
return fmt.Sprintf("%.1f MiB", float64(size)/1048576.0)
} else if size < 1099511627776 { // 1 TiB
return fmt.Sprintf("%.1f GiB", float64(size)/1073741824.0)
} else {
return fmt.Sprintf("%.1f TiB", float64(size)/1099511627776.0)
}
},
"headerLinks": func() []map[string]string {
headerLinks := make([]map[string]string, 0)
if openPath == handlerPath {
return headerLinks
}
p := strings.TrimPrefix(openPath, handlerPath+"/")
frags := strings.Split(p, "/")
for i := range frags {
var path string
if i > 0 {
path = strings.Join(frags[0:i], "/") + "/"
} else {
path = ""
}
headerLinks = append(headerLinks, map[string]string{
"name": frags[i],
"url": (handler.opts.StripPrefix + path + frags[i] + "/"),
})
}
return headerLinks
},
"colMeta": func(col IRodsObj) []map[string]string {
mc, err := col.Meta()
if err != nil {
fmt.Printf("%v\n", err)
}
metaMap := make([]map[string]string, 0)
mc.Each(func(m *Meta) {
metaMap = append(metaMap, map[string]string{
"attribute": m.Attribute,
"value": m.Value,
"units": m.Units,
})
})
return metaMap
},
"getTypeString": getTypeString,
}).Parse(tpl)
check(err)
err = t.Execute(response, col)
check(err)
} else {
log.Print(er)
}
}
} else {
response.Header().Set("Content-Type", "text/html")
response.WriteHeader(http.StatusNotFound)
response.Write([]byte("<h3>404 Not Found: " + openPath + "</h3>"))
log.Print(err)
}
}); er != nil {
log.Print(er)
return
}
}
Simplified meta template code for HTTP interface
/*** Copyright (c) 2016, University of Florida Research Foundation, Inc. ***
*** For more information please refer to the LICENSE.md file ***/
package gorods
// #include "wrapper.h"
import "C"
import (
"fmt"
"html/template"
"log"
"mime"
"net/http"
"path/filepath"
"strconv"
"strings"
)
func FileServer(opts FSOptions) http.Handler {
handler := new(HttpHandler)
handler.client = opts.Client
handler.path = strings.TrimRight(opts.Path, "/")
handler.opts = opts
return handler
}
type FSOptions struct {
Client *Client
Path string
Download bool
StripPrefix string
}
type HttpHandler struct {
client *Client
path string
opts FSOptions
}
var check func(error) = func(err error) {
if err != nil {
log.Print(err)
}
}
const tpl = `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Collection: {{.Path}}</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.1.1.min.js" integrity="sha256-hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8=" crossorigin="anonymous"></script>
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
<style type="text/css">
.table td.fit,
.table th.fit {
white-space: nowrap;
width: 1%;
}
</style>
<script type="text/javascript">
$(function() {
$('.show-meta-modal').click(function() {
$('.modal', $(this).parent()).modal('show');
});
});
</script>
</head>
<body>
<nav class="navbar navbar-default navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">GoRODS HTTP FileServer</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav navbar-right">
{{range headerLinks}}
<li><a href="{{ index . "url" }}">{{ index . "name" }}</a></li>
{{end}}
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
<div class="container">
<br /><br /><br />
<h3>{{.Path}}</h3>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
<th>Type</th>
<th></th>
</tr>
</thead>
<tbody>
{{ $length := len headerLinks }}{{ if ne $length 0 }}
<tr>
<th><a href="..">..</a></th>
<td></td>
<td>Collection</td>
<td></td>
</tr>
{{ end }}
{{range .Collections}}
<tr>
<th><a href="{{.Name}}/">{{.Name}}</a></th>
<td>{{prettySize .Size}}</td>
<td>Collection</td>
<td>
<span style="cursor:pointer;color:#337ab7;" class="glyphicon glyphicon-th-list show-meta-modal"></span>
<!-- Modal -->
<div class="modal fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel">Collection "{{.Name}}"</h4>
</div>
<div class="modal-body">
<h4>Metadata</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Attribute</th>
<th>Value</th>
<th>Units</th>
</tr>
</thead>
<tbody>
{{ range .Meta.Metas }}
<tr>
<td>{{ .Attribute }}</td>
<td>{{ .Value }}</td>
<td>{{ .Units }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No Metadata Found</td></tr>
{{ end }}
</tbody>
</table>
<h4>ACL</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Access Level</th>
<th>Type</th>
</tr>
</thead>
<tbody>
{{ range .ACL }}
<tr>
<td>{{ .AccessObject.Name }}</td>
<td>{{ getTypeString .AccessLevel }}</td>
<td>{{ getTypeString .Type }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No ACLs Found</td></tr>
{{ end }}
</tbody>
</table>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</td>
</tr>
{{end}}
{{range .DataObjs}}
<tr>
<th><a href="{{.Name}}">{{.Name}}</a></th>
<td>{{prettySize .Size}}</td>
<td>Data Object</td>
<td><a href="{{.Name}}?download=1"><span style="margin-right:10px;" class="glyphicon glyphicon-download-alt"></span></a>
<span style="cursor:pointer;color:#337ab7;" class="glyphicon glyphicon-th-list show-meta-modal"></span>
<!-- Modal -->
<div class="modal fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel">Data Object "{{.Name}}"</h4>
</div>
<div class="modal-body">
<h4>Metadata</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Attribute</th>
<th>Value</th>
<th>Units</th>
</tr>
</thead>
<tbody>
{{ range .Meta.Metas }}
<tr>
<td>{{ .Attribute }}</td>
<td>{{ .Value }}</td>
<td>{{ .Units }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No Metadata Found</td></tr>
{{ end }}
</tbody>
</table>
<h4>ACL</h4>
<table class="table table-hover">
<thead>
<tr>
<th>Name</th>
<th>Access Level</th>
<th>Type</th>
</tr>
</thead>
<tbody>
{{ range .ACL }}
<tr>
<td>{{ .AccessObject.Name }}</td>
<td>{{ getTypeString .AccessLevel }}</td>
<td>{{ getTypeString .Type }}</td>
</tr>
{{else}}
<tr><td colspan="3" style="text-align:center;">No ACLs Found</td></tr>
{{ end }}
</tbody>
</table>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</td></td>
</tr>
{{end}}
</tbody>
</table>
</div>
</body>
</html>
`
func (handler *HttpHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
handlerPath := strings.TrimRight(handler.path, "/")
urlPath := strings.TrimRight(request.URL.Path, "/")
openPath := strings.TrimRight(handlerPath+"/"+urlPath, "/")
if er := handler.client.OpenConnection(func(con *Connection) {
if objType, err := con.PathType(openPath); err == nil {
if objType == DataObjType {
if obj, er := con.DataObject(openPath); er == nil {
query := request.URL.Query()
if handler.opts.Download || query.Get("download") != "" {
response.Header().Set("Content-Disposition", "attachment; filename="+obj.Name())
response.Header().Set("Content-type", "application/octet-stream")
} else {
var mimeType string
ext := filepath.Ext(openPath)
if ext != "" {
mimeType = mime.TypeByExtension(ext)
if mimeType == "" {
log.Printf("Can't find mime type for %s extension", ext)
mimeType = "application/octet-stream"
}
} else {
mimeType = "application/octet-stream"
}
response.Header().Set("Content-type", mimeType)
}
response.Header().Set("Content-Length", strconv.FormatInt(obj.Size(), 10))
if readEr := obj.ReadChunk(1024000, func(chunk []byte) {
response.Write(chunk)
}); readEr != nil {
log.Print(readEr)
}
} else {
log.Print(er)
}
} else if objType == CollectionType {
uP := request.URL.Path
if uP != "/" && uP != "" && uP[len(uP)-1:] != "/" {
http.Redirect(response, request, (uP + "/"), http.StatusFound)
return
}
if col, er := con.Collection(CollectionOptions{
Path: openPath,
Recursive: false,
GetRepls: false,
}); er == nil {
response.Header().Set("Content-Type", "text/html")
t, err := template.New("collectionList").Funcs(template.FuncMap{
"prettySize": func(size int64) string {
if size < 1024 {
return fmt.Sprintf("%v bytes", size)
} else if size < 1048576 { // 1 MiB
return fmt.Sprintf("%.1f KiB", float64(size)/1024.0)
} else if size < 1073741824 { // 1 GiB
return fmt.Sprintf("%.1f MiB", float64(size)/1048576.0)
} else if size < 1099511627776 { // 1 TiB
return fmt.Sprintf("%.1f GiB", float64(size)/1073741824.0)
} else {
return fmt.Sprintf("%.1f TiB", float64(size)/1099511627776.0)
}
},
"headerLinks": func() []map[string]string {
headerLinks := make([]map[string]string, 0)
if openPath == handlerPath {
return headerLinks
}
p := strings.TrimPrefix(openPath, handlerPath+"/")
frags := strings.Split(p, "/")
for i := range frags {
var path string
if i > 0 {
path = strings.Join(frags[0:i], "/") + "/"
} else {
path = ""
}
headerLinks = append(headerLinks, map[string]string{
"name": frags[i],
"url": (handler.opts.StripPrefix + path + frags[i] + "/"),
})
}
return headerLinks
},
"getTypeString": getTypeString,
}).Parse(tpl)
check(err)
err = t.Execute(response, col)
check(err)
} else {
log.Print(er)
}
}
} else {
response.Header().Set("Content-Type", "text/html")
response.WriteHeader(http.StatusNotFound)
response.Write([]byte("<h3>404 Not Found: " + openPath + "</h3>"))
log.Print(err)
}
}); er != nil {
log.Print(er)
return
}
}
|
package sudoku
import (
"fmt"
"log"
"math"
"os"
"sync"
)
//The number of solves we should average the signals together for before asking them for their difficulty
//Note: this should be set to the num-solves parameter used to train the currently configured weights.
const _NUM_SOLVES_FOR_DIFFICULTY = 10
//The list of techniques that HumanSolve will use to try to solve the puzzle, with the oddball Guess split out.
var (
//All of the 'normal' Techniques that will be used to solve the puzzle
Techniques []SolveTechnique
//The special GuessTechnique that is used only if no other techniques find options.
GuessTechnique SolveTechnique
//Every technique that HumanSolve could ever use, including the oddball Guess technique.
AllTechniques []SolveTechnique
//Every variant name for every TechniqueVariant that HumanSolve could ever use.
AllTechniqueVariants []string
)
//The actual techniques are intialized in hs_techniques.go, and actually defined in hst_*.go files.
//Worst case scenario, how many times we'd call HumanSolve to get a difficulty.
const _MAX_DIFFICULTY_ITERATIONS = 50
//TODO: consider relaxing this even more.
//How close we have to get to the average to feel comfortable our difficulty is converging.
const _DIFFICULTY_CONVERGENCE = 0.005
//SolveDirections is a list of SolveSteps that, when applied in order to its
//Grid, would cause it to be solved (except if IsHint is true).
type SolveDirections struct {
//A copy of the Grid when the SolveDirections was generated. Grab a
//reference from SolveDirections.Grid().
gridSnapshot *Grid
//The list of steps that, when applied in order, would cause the
//SolveDirection's Grid() to be solved.
Steps []*SolveStep
//IsHint is whether the SolveDirections tells how to solve the given grid
//or just what the next set of steps leading to a fill step is. If true,
//the last step in Steps will be IsFill().
IsHint bool
}
//SolveStep is a step to fill in a number in a cell or narrow down the possibilities in a cell to
//get it closer to being solved. SolveSteps model techniques that humans would use to solve a
//puzzle.
type SolveStep struct {
//The technique that was used to identify that this step is logically valid at this point in the solution.
Technique SolveTechnique
//The cells that will be affected by the techinque (either the number to fill in or possibilities to exclude).
TargetCells CellSlice
//The numbers we will remove (or, in the case of Fill, add) to the TargetCells.
TargetNums IntSlice
//The cells that together lead the techinque to logically apply in this case; the cells behind the reasoning
//why the TargetCells will be mutated in the way specified by this SolveStep.
PointerCells CellSlice
//The specific numbers in PointerCells that lead us to remove TargetNums from TargetCells.
//This is only very rarely needed (at this time only for hiddenSubset techniques)
PointerNums IntSlice
//extra is a private place that information relevant to only specific techniques
//can be stashed.
extra interface{}
}
//TODO: consider passing a non-pointer humanSolveOptions so that mutations
//deeper in the solve stack don' tmatter.
//HumanSolveOptions configures how precisely the human solver should operate.
//Passing nil where a HumanSolveOptions is expected will use reasonable
//defaults. Note that the various human solve methods may mutate your options
//object.
type HumanSolveOptions struct {
//At each step in solving the puzzle, how many candidate SolveSteps should
//we generate before stopping the search for more? Higher values will give
//more 'realistic' solves, but at the cost of *much* higher performance
//costs. Also note that the difficulty may be wrong if the difficulty
//model in use was trained on a different NumOptionsToCalculate.
NumOptionsToCalculate int
//Which techniques to try at each step of the puzzle, sorted in the order
//to try them out (generally from cheapest to most expensive). A value of
//nil will use Techniques (the default). Any GuessTechniques will be
//ignored.
TechniquesToUse []SolveTechnique
//NoGuess specifies that even if no other techniques work, the HumanSolve
//should not fall back on guessing, and instead just return failure.
NoGuess bool
//TODO: figure out how to test that we do indeed use different values of
//numOptionsToCalculate.
//TODO: add a TwiddleChainDissimilarity bool.
//The following are flags only used for testing.
//When we reenter back into humanSolveHelper after making a guess, should
//we keep the provided TechniquesToUse, or revert back to this set of
//techniques? (If nil, don't change them) Mainly useful for the case where
//we want to test that Hint works well when it returns a guess.
techniquesToUseAfterGuess []SolveTechnique
}
//Grid returns a snapshot of the grid at the time this SolveDirections was
//generated. Returns a fresh copy every time.
func (self SolveDirections) Grid() *Grid {
//TODO: this is the only pointer receiver method on SolveDirections.
return self.gridSnapshot.Copy()
}
//Sets the given HumanSolveOptions to have reasonable defaults. Returns itself
//for convenience, allowing `options := (&HumanSolveOptions{}).Default()`
func (self *HumanSolveOptions) Default() *HumanSolveOptions {
//TODO: the (&HumanSolveOptions{}).Default() pattern is a bit weird.
//consider just doing a package global DefaultHumanSolveOptions.
self.NumOptionsToCalculate = 15
self.TechniquesToUse = Techniques
self.NoGuess = false
//Have to set even zero valued properties, because the Options isn't
//necessarily default initalized.
self.techniquesToUseAfterGuess = nil
return self
}
//Modifies the options object to make sure all of the options are set
//in a legal way. Returns itself for convenience.
func (self *HumanSolveOptions) validate() *HumanSolveOptions {
if self.TechniquesToUse == nil {
self.TechniquesToUse = Techniques
}
if self.NumOptionsToCalculate < 1 {
self.NumOptionsToCalculate = 1
}
//Remove any GuessTechniques that might be in there because
//the are invalid.
var techniques []SolveTechnique
for _, technique := range self.TechniquesToUse {
if technique == GuessTechnique {
continue
}
techniques = append(techniques, technique)
}
self.TechniquesToUse = techniques
return self
}
//IsUseful returns true if this SolveStep, when applied to the given grid, would do useful work--that is, it would
//either fill a previously unfilled number, or cull previously un-culled possibilities. This is useful to ensure
//HumanSolve doesn't get in a loop of applying the same useless steps.
func (self *SolveStep) IsUseful(grid *Grid) bool {
//Returns true IFF calling Apply with this step and the given grid would result in some useful work. Does not modify the gri.d
//All of this logic is substantially recreated in Apply.
if self.Technique == nil {
return false
}
//TODO: test this.
if self.Technique.IsFill() {
if len(self.TargetCells) == 0 || len(self.TargetNums) == 0 {
return false
}
cell := self.TargetCells[0].InGrid(grid)
return self.TargetNums[0] != cell.Number()
} else {
useful := false
for _, cell := range self.TargetCells {
gridCell := cell.InGrid(grid)
for _, exclude := range self.TargetNums {
//It's right to use Possible because it includes the logic of "it's not possible if there's a number in there already"
//TODO: ensure the comment above is correct logically.
if gridCell.Possible(exclude) {
useful = true
}
}
}
return useful
}
}
//Apply does the solve operation to the Grid that is defined by the configuration of the SolveStep, mutating the
//grid and bringing it one step closer to being solved.
func (self *SolveStep) Apply(grid *Grid) {
//All of this logic is substantially recreated in IsUseful.
if self.Technique.IsFill() {
if len(self.TargetCells) == 0 || len(self.TargetNums) == 0 {
return
}
cell := self.TargetCells[0].InGrid(grid)
cell.SetNumber(self.TargetNums[0])
} else {
for _, cell := range self.TargetCells {
gridCell := cell.InGrid(grid)
for _, exclude := range self.TargetNums {
gridCell.SetExcluded(exclude, true)
}
}
}
}
//Description returns a human-readable sentence describing what the SolveStep instructs the user to do, and what reasoning
//it used to decide that this step was logically valid to apply.
func (self *SolveStep) Description() string {
result := ""
if self.Technique.IsFill() {
result += fmt.Sprintf("We put %s in cell %s ", self.TargetNums.Description(), self.TargetCells.Description())
} else {
//TODO: pluralize based on length of lists.
result += fmt.Sprintf("We remove the possibilities %s from cells %s ", self.TargetNums.Description(), self.TargetCells.Description())
}
result += "because " + self.Technique.Description(self) + "."
return result
}
//HumanLikelihood is how likely a user would be to pick this step when compared with other possible steps.
//Generally inversely related to difficulty (but not perfectly).
//This value will be used to pick which technique to apply when compared with other candidates.
//Based on the technique's HumanLikelihood, possibly attenuated by this particular step's variant
//or specifics.
func (self *SolveStep) HumanLikelihood() float64 {
//TODO: attenuate by variant
return self.Technique.humanLikelihood(self)
}
//TechniqueVariant returns the name of the precise variant of the Technique
//that this step represents. This information is useful for figuring out
//which weight to apply when calculating overall difficulty. A Technique would have
//variants (as opposed to simply other Techniques) when the work to calculate all
//variants is the same, but the difficulty of produced steps may vary due to some
//property of the technique. Forcing Chains is the canonical example.
func (self *SolveStep) TechniqueVariant() string {
//Defer to the Technique.variant implementation entirely.
//This allows us to most easily share code for the simple case.
return self.Technique.variant(self)
}
//normalize puts the step in a known, deterministic state, which eases testing.
func (self *SolveStep) normalize() {
//Different techniques will want to normalize steps in different ways.
self.Technique.normalizeStep(self)
}
//HumanSolution returns the SolveDirections that represent how a human would
//solve this puzzle. It does not mutate the grid. If options is nil, will use
//reasonable defaults.
func (self *Grid) HumanSolution(options *HumanSolveOptions) *SolveDirections {
clone := self.Copy()
defer clone.Done()
return clone.HumanSolve(options)
}
/*
* The HumanSolve method is very complex due to guessing logic.
*
* Without guessing, the approach is very straightforward. Every move either fills a cell
* or removes possibilities. But nothing does anything contradictory, so if they diverge
* in path, it doesn't matter--they're still working towards the same end state (denoted by @)
*
*
*
* |
* /|\
* / | \
* | | |
* \ | /
* \ | /
* \|/
* |
* V
* @
*
*
* In human solve, we first try the cheap techniques, and if we can't find enough options, we then additionally try
* the expensive set of techniques. But both cheap and expensive techniques are similar in that they move us
* towards the end state.
*
* For simplicity, we'll just show paths like this as a single line, even though realistically they could diverge arbitrarily,
* before converging on the end state.
*
* This all changes when you introduce branching, because at a branch point you could have chosen the wrong path
* and at some point down that path you will discover an invalidity, which tells you you chose wrong, and
* you'll have to unwind.
*
* Let's explore a puzzle that needs one branch point.
*
* We explore with normal techniques until we run into a point where none of the normal techinques work.
* This is a DIRE point, and in some cases we might just give up. But we have one last thing to try:
* branching.
* We then run the guess technique, which proposes multiple guess steps (big O's, in this diagram) that we could take.
*
* The technique will choose cells with only a small number of possibilities, to reduce the branching factor.
*
* |
* |
* V
* O O O O O ...
*
* We will randomly pick one cell, and then explore all of its possibilities.
* CRUCIALLY, at a branch point, we never have to pick another cell to explore its possibilities; for each cell,
* if you plug in each of the possibilites and solve forward, it must result in either an invalidity (at which
* point you try another possibility, or if they're all gone you unwind if there's a branch point above), or
* you picked correctly and the solution lies that way. But it's never the case that picking THIS cell won't uncover
* either the invalidity or the solution.
* So in reality, when we come to a branch point, we can choose one cell to focus on and throw out all of the others.
*
* |
* |
* V
* O
*
* But within that cell, there are multiple possibilty branches to consider.
*
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
*
* We go through each in turn and play forward until we find either an invalidity or a solution.
* Within each branch, we use the normal techniques as normal--remember it's actually branching but
* converging, like in the first diagram.
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
* X @
*
* When we uncover an invalidity, we unwind back to the branch point and then try the next possibility.
* We should never have to unwind above the top branch, because down one of the branches (possibly somewhere deep)
* There MUST be a solution (assuming the puzzle is valid)
* Obviously if we find the solution on our branch, we're good.
*
* But what happens if we run out of normal techinques down one of our branches and have to branch again?
*
* Nothing much changes, except that you DO unravel if you uncover that all of the possibilities down this
* side lead to invalidities. You just never unravel past the first branch point.
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
* O O
* / \ / \
* 4 5 6 7
* / | | \
* | | | |
* X X X @
*
* Down one of the paths MUST lie a solution.
*
* The search will fail if we have a max depth limit of branching to try, because then we might not discover a
* solution down one of the branches. A good sanity point is DIM*DIM branch points is the absolute highest; an
* assert at that level makes sense.
*
* In this implementation, humanSolveHelper does the work of exploring any branch up to a point where a guess must happen.
* If we run out of ideas on a branch, we call into guess helper, which will pick a guess and then try all of the versions of it
* until finding one that works. This keeps humanSolveHelper pretty straighforward and keeps most of the complex guess logic out.
*/
//HumanSolve is the workhorse of the package. It solves the puzzle much like a
//human would, applying complex logic techniques iteratively to find a
//sequence of steps that a reasonable human might apply to solve the puzzle.
//HumanSolve is an expensive operation because at each step it identifies all
//of the valid logic rules it could apply and then selects between them based
//on various weightings. HumanSolve endeavors to find the most realistic human
//solution it can by using a large number of possible techniques with
//realistic weights, as well as by doing things like being more likely to pick
//a cell that is in the same row/cell/block as the last filled cell. Returns
//nil if the puzzle does not have a single valid solution. If options is nil,
//will use reasonable defaults. Mutates the grid.
func (self *Grid) HumanSolve(options *HumanSolveOptions) *SolveDirections {
return humanSolveHelper(self, options, true)
}
//SolveDirections returns a chain of SolveDirections, containing exactly one
//IsFill step at the end, that is a reasonable next step to move the puzzle
//towards being completed. It is effectively a hint to the user about what
//Fill step to do next, and why it's logically implied; the truncated return
//value of HumanSolve. Returns nil if the puzzle has multiple solutions or is
//otherwise invalid. If options is nil, will use reasonable defaults. Does not
//mutate the grid.
func (self *Grid) Hint(options *HumanSolveOptions) *SolveDirections {
//TODO: return HintDirections instead of SolveDirections
//TODO: test that non-fill steps before the last one are necessary to unlock
//the fill step at the end (cull them if not), and test that.
clone := self.Copy()
defer clone.Done()
result := humanSolveHelper(clone, options, false)
result.IsHint = true
return result
}
//humanSolveHelper does most of the set up for both HumanSolve and Hint.
func humanSolveHelper(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) *SolveDirections {
//Short circuit solving if it has multiple solutions.
if grid.HasMultipleSolutions() {
log.Println("Grid exited early due to multiple solutions:", grid)
return nil
}
if options == nil {
options = (&HumanSolveOptions{}).Default()
}
options.validate()
snapshot := grid.Copy()
steps := humanSolveNonGuessSearcher(grid, options, endConditionSolved)
return &SolveDirections{snapshot, steps, false}
}
//Do we even need a helper here? Can't we just make HumanSolve actually humanSolveHelper?
//The core worker of human solve, it does all of the solving between branch points.
func humanSolveNonGuessSearcher(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) []*SolveStep {
var results []*SolveStep
//Note: trying these all in parallel is much slower (~15x) than doing them in sequence.
//The reason is that in sequence we bailed early as soon as we found one step; now we try them all.
var lastStep *SolveStep
//Is this the first time through the loop?
firstRun := true
for firstRun || (endConditionSolved && !grid.Solved()) || (!endConditionSolved && lastStep != nil && !lastStep.Technique.IsFill()) {
firstRun = false
if grid.Invalid() {
//We must have been in a branch and found an invalidity.
//Bail immediately.
return nil
}
possibilities := runTechniques(options.TechniquesToUse, grid, options.NumOptionsToCalculate)
//Now pick one to apply.
if len(possibilities) == 0 {
//Hmm, didn't find any possivbilities. We failed. :-(
break
}
//TODO: consider if we should stop picking techniques based on their weight here.
//Now that Find returns a slice instead of a single, we're already much more likely to select an "easy" technique. ... Right?
possibilitiesWeights := make([]float64, len(possibilities))
for i, possibility := range possibilities {
possibilitiesWeights[i] = possibility.HumanLikelihood()
}
tweakChainedStepsWeights(lastStep, possibilities, possibilitiesWeights)
step := possibilities[randomIndexWithInvertedWeights(possibilitiesWeights)]
results = append(results, step)
lastStep = step
step.Apply(grid)
}
if (endConditionSolved && !grid.Solved()) || (!endConditionSolved && (lastStep == nil || !lastStep.Technique.IsFill())) {
//We couldn't solve the puzzle.
//But let's do one last ditch effort and try guessing.
//But first... are we allowed to guess?
if options.NoGuess {
//guess not... :-)
return nil
}
guessSteps := humanSolveGuessSearcher(grid, options, endConditionSolved)
if len(guessSteps) == 0 {
//Okay, we just totally failed.
return nil
}
return append(results, guessSteps...)
}
return results
}
//Called when we have run out of options at a given state and need to guess.
func humanSolveGuessSearcher(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) []*SolveStep {
//Yes, using DIM*DIM is a gross hack... I really should be calling Find inside a goroutine...
results := make(chan *SolveStep, DIM*DIM)
done := make(chan bool)
if options.techniquesToUseAfterGuess != nil {
options.TechniquesToUse = options.techniquesToUseAfterGuess
}
//TODO: consider doing a normal solve forward from here to figure out what the right branch is and just do that.
//Find is meant to be run in a goroutine; it won't complete until it's searched everything.
GuessTechnique.Find(grid, results, done)
close(done)
var guess *SolveStep
//TODO: test cases where we expectmultipel results...
select {
case guess = <-results:
default:
//Coludn't find a guess step, oddly enough.
return nil
}
//We'll just take the first guess step and forget about the other ones.
//The guess technique passes back the other nums as PointerNums, which is a hack.
//Unpack them and then nil it out to prevent confusing other people in the future with them.
otherNums := guess.PointerNums
guess.PointerNums = nil
var gridCopy *Grid
for {
gridCopy = grid.Copy()
guess.Apply(gridCopy)
//Even if endConditionSolved is true, this guess we will return will be an IsFill,
//thus terminating the search. From here on out all we're doing is verifying that
//we picked the right branch at the guess if endConditionSolved is not true.
solveSteps := humanSolveNonGuessSearcher(gridCopy, options, true)
if len(solveSteps) != 0 {
//Success!
//Make ourselves look like that grid (to pass back the state of what the solution was) and return.
grid.replace(gridCopy)
gridCopy.Done()
if endConditionSolved {
return append([]*SolveStep{guess}, solveSteps...)
} else {
//Since we're trying to find a hint that terminates in an IsFill step,
//and this guess IS the IsFill step, and we've verified that this
//guess we chose is correct, just return the guess step back up.
return []*SolveStep{guess}
}
}
//We need to try the next solution.
if len(otherNums) == 0 {
//No more numbers to try. We failed!
break
}
nextNum := otherNums[0]
otherNums = otherNums[1:]
//Stuff it into the TargetNums for the branch step.
guess.TargetNums = IntSlice{nextNum}
gridCopy.Done()
}
gridCopy.Done()
//We failed to find anything (which should never happen...)
return nil
}
//This function will tweak weights quite a bit to make it more likely that we will pick a subsequent step that
// is 'related' to the last step. For example, if the last step had targetCells that shared a row, then a step with
//target cells in that same row will be more likely this step. This captures the fact that humans, in practice,
//will have 'chains' of steps that are all related.
func tweakChainedStepsWeights(lastStep *SolveStep, possibilities []*SolveStep, weights []float64) {
if len(possibilities) != len(weights) {
log.Println("Mismatched lenghts of weights and possibilities: ", possibilities, weights)
return
}
if lastStep == nil || len(possibilities) == 0 {
return
}
for i := 0; i < len(possibilities); i++ {
possibility := possibilities[i]
//Tweak every weight by how related they are.
//Remember: these are INVERTED weights, so tweaking them down is BETTER.
//TODO: consider attentuating the effect of this; chaining is nice but shouldn't totally change the calculation for hard techniques.
//It turns out that we probably want to STRENGTHEN the effect.
//Logically we should be attenuating Dissimilarity here, but for some reason the math.Pow(dissimilairty, 10) doesn't actually
//appear to work here, which is maddening.
weights[i] *= possibility.TargetCells.chainDissimilarity(lastStep.TargetCells)
}
}
func runTechniques(techniques []SolveTechnique, grid *Grid, numRequestedSteps int) []*SolveStep {
/*
This function went from being a mere convenience function to
being a complex piece of multi-threaded code.
The basic idea is to parellelize all of the technique's.Find
work.
Each technique is designed so it will bail early if we tell it
(via closing the done channel) we've already got enough steps
found.
We only want to spin up numTechniquesToStartByDefault # of
techniques at a time, because likely we'll find enough steps
before getting to the harder (and generally more expensive to
calculate) techniques if earlier ones fail.
There is one thread for each currently running technique's
Find. The main thread collects results and figures out when it
has enough that all of the other threads can stop searching
(or, when it hears that no more results will be coming in and
it should just stop). There are two other threads. One waits
until the waitgroup is all done and then signals that back to
the main thread by closing resultsChan. The other thread is
notified every time a technique thread is done, and decides
whether or not it should start a new technique thread now. The
interplay of those last two threads is very timing sensitive;
if wg.Done were called before we'd started up the new
technique, we could return from the whole endeavor before
getting enough steps collected.
*/
if numRequestedSteps < 1 {
numRequestedSteps = 1
}
//We make a copy of the grid to search on to avoid race conditions where
// main thread has already returned up to humanSolveHelper, but not all of the techinques have gotten
//the message and freak out a bit because the grid starts changing under them.
gridCopy := grid.Copy()
//TODO: make this configurable, and figure out what the optimal values are
numTechniquesToStartByDefault := 10
//Handle the case where we were given a short list of techniques.
if len(techniques) < numTechniquesToStartByDefault {
numTechniquesToStartByDefault = len(techniques)
}
//Leave some room in resultsChan so all of the techniques don't have to block as often
//waiting for the mainthread to clear resultsChan. Leads to a 20% reduction in time compared
//to unbuffered.
//We'll close this channel to signal the collector that no more results are coming.
resultsChan := make(chan *SolveStep, len(techniques))
done := make(chan bool)
//Deliberately unbuffered; we want it to run sync inside of startTechnique
//the thread that's waiting on it will pass its own chan that it should send to when it's done
techniqueFinished := make(chan chan bool)
var wg sync.WaitGroup
//The next technique to spin up
nextTechniqueIndex := 0
//We'll be kicking off this routine from multiple places so just define it once
startTechnique := func(theTechnique SolveTechnique) {
theTechnique.Find(gridCopy, resultsChan, done)
//This is where a new technique should be kicked off, if one's going to be, before we tell the waitgroup that we're done.
//We need to communicate synchronously with that thread
comms := make(chan bool)
techniqueFinished <- comms
//Wait to hear back that a new technique is started, if one is going to be.
<-comms
//Okay, now the other thread has either started a new technique going, or hasn't.
wg.Done()
}
var results []*SolveStep
//Get the first batch of techniques going
wg.Add(numTechniquesToStartByDefault)
//Since Techniques is in sorted order, we're starting off with the easiest techniques.
for nextTechniqueIndex = 0; nextTechniqueIndex < numTechniquesToStartByDefault; nextTechniqueIndex++ {
go startTechnique(techniques[nextTechniqueIndex])
}
//Listen for when all items are done and signal the collector to stop collecting
go func() {
wg.Wait()
//All of the techniques must be done here; no one can send on resultsChan at this point.
//Signal to the collector that it should break out.
close(resultsChan)
close(techniqueFinished)
}()
//The thread that will kick off new techinques
go func() {
for {
returnChan, ok := <-techniqueFinished
if !ok {
//If channel is closed, that's our cue to die.
return
}
//Start a technique here, if we're going to.
//First, check if the collector has signaled that we're all done
select {
case <-done:
//Don't start a new one
default:
//Potentially start a new technique going as things aren't shutting down yet.
//Is there another technique?
if nextTechniqueIndex < len(techniques) {
wg.Add(1)
go startTechnique(techniques[nextTechniqueIndex])
//Next time we're considering starting a new technique, start the next one
nextTechniqueIndex++
}
}
//Tell our caller that we're done
returnChan <- true
}
}()
//Collect the results as long as more are coming
OuterLoop:
for {
result, ok := <-resultsChan
if !ok {
//resultsChan was closed, which is our signal that no more results are coming and we should break
break OuterLoop
}
results = append(results, result)
//Do we have enough steps accumulate?
if len(results) > numRequestedSteps {
//Communicate to all still-running routines that they can stop
close(done)
break OuterLoop
}
}
return results
}
//Difficulty returns a value between 0.0 and 1.0, representing how hard the puzzle would be
//for a human to solve. :This is an EXTREMELY expensive method (although repeated calls without
//mutating the grid return a cached value quickly). It human solves the puzzle, extracts signals
//out of the solveDirections, and then passes those signals into a machine-learned model that
//was trained on hundreds of thousands of solves by real users in order to generate a candidate difficulty.
//It then repeats the process multiple times until the difficultly number begins to converge to
//an average.
func (self *Grid) Difficulty() float64 {
//TODO: test that the memoization works (that is, the cached value is thrown out if the grid is modified)
//It's hard to test because self.calculateDifficulty(true) is so expensive to run.
//This is so expensive and during testing we don't care if converges.
//So we split out the meat of the method separately.
if self == nil {
return 0.0
}
//Yes, this memoization will fail in the (rare!) cases where a grid's actual difficulty is 0.0, but
//the worst case scenario is that we just return the same value.
if self.cachedDifficulty == 0.0 {
self.cachedDifficulty = self.calcluateDifficulty(true)
}
return self.cachedDifficulty
}
func (self *Grid) calcluateDifficulty(accurate bool) float64 {
//This can be an extremely expensive method. Do not call repeatedly!
//returns the difficulty of the grid, which is a number between 0.0 and 1.0.
//This is a probabilistic measure; repeated calls may return different numbers, although generally we wait for the results to converge.
//We solve the same puzzle N times, then ask each set of steps for their difficulty, and combine those to come up with the overall difficulty.
accum := 0.0
average := 0.0
lastAverage := 0.0
self.HasMultipleSolutions()
//Since this is so expensive, in testing situations we want to run it in less accurate mode (so it goes fast!)
maxIterations := _MAX_DIFFICULTY_ITERATIONS
if !accurate {
maxIterations = 1
}
for i := 0; i < maxIterations; i++ {
difficulty := gridDifficultyHelper(self)
accum += difficulty
average = accum / (float64(i) + 1.0)
if math.Abs(average-lastAverage) < _DIFFICULTY_CONVERGENCE {
//Okay, we've already converged. Just return early!
return average
}
lastAverage = average
}
//We weren't converging... oh well!
return average
}
//This function will HumanSolve _NUM_SOLVES_FOR_DIFFICULTY times, then average the signals together, then
//give the difficulty for THAT. This is more accurate becuase the weights were trained on such averaged signals.
func gridDifficultyHelper(grid *Grid) float64 {
collector := make(chan DifficultySignals, _NUM_SOLVES_FOR_DIFFICULTY)
//Might as well run all of the human solutions in parallel
for i := 0; i < _NUM_SOLVES_FOR_DIFFICULTY; i++ {
go func(gridToUse *Grid) {
solution := gridToUse.HumanSolution(nil)
if solution == nil {
log.Println("A generated grid turned out to have mutiple solutions (or otherwise return nil), indicating a very serious error:", gridToUse.DataString())
os.Exit(1)
}
collector <- solution.Signals()
}(grid)
}
combinedSignals := DifficultySignals{}
for i := 0; i < _NUM_SOLVES_FOR_DIFFICULTY; i++ {
signals := <-collector
combinedSignals.sum(signals)
}
//Now average all of the signal values
for key := range combinedSignals {
combinedSignals[key] /= _NUM_SOLVES_FOR_DIFFICULTY
}
return combinedSignals.difficulty()
}
Remove an extra debugging logging statement.
package sudoku
import (
"fmt"
"log"
"math"
"os"
"sync"
)
//The number of solves we should average the signals together for before asking them for their difficulty
//Note: this should be set to the num-solves parameter used to train the currently configured weights.
const _NUM_SOLVES_FOR_DIFFICULTY = 10
//The list of techniques that HumanSolve will use to try to solve the puzzle, with the oddball Guess split out.
var (
//All of the 'normal' Techniques that will be used to solve the puzzle
Techniques []SolveTechnique
//The special GuessTechnique that is used only if no other techniques find options.
GuessTechnique SolveTechnique
//Every technique that HumanSolve could ever use, including the oddball Guess technique.
AllTechniques []SolveTechnique
//Every variant name for every TechniqueVariant that HumanSolve could ever use.
AllTechniqueVariants []string
)
//The actual techniques are intialized in hs_techniques.go, and actually defined in hst_*.go files.
//Worst case scenario, how many times we'd call HumanSolve to get a difficulty.
const _MAX_DIFFICULTY_ITERATIONS = 50
//TODO: consider relaxing this even more.
//How close we have to get to the average to feel comfortable our difficulty is converging.
const _DIFFICULTY_CONVERGENCE = 0.005
//SolveDirections is a list of SolveSteps that, when applied in order to its
//Grid, would cause it to be solved (except if IsHint is true).
type SolveDirections struct {
//A copy of the Grid when the SolveDirections was generated. Grab a
//reference from SolveDirections.Grid().
gridSnapshot *Grid
//The list of steps that, when applied in order, would cause the
//SolveDirection's Grid() to be solved.
Steps []*SolveStep
//IsHint is whether the SolveDirections tells how to solve the given grid
//or just what the next set of steps leading to a fill step is. If true,
//the last step in Steps will be IsFill().
IsHint bool
}
//SolveStep is a step to fill in a number in a cell or narrow down the possibilities in a cell to
//get it closer to being solved. SolveSteps model techniques that humans would use to solve a
//puzzle.
type SolveStep struct {
//The technique that was used to identify that this step is logically valid at this point in the solution.
Technique SolveTechnique
//The cells that will be affected by the techinque (either the number to fill in or possibilities to exclude).
TargetCells CellSlice
//The numbers we will remove (or, in the case of Fill, add) to the TargetCells.
TargetNums IntSlice
//The cells that together lead the techinque to logically apply in this case; the cells behind the reasoning
//why the TargetCells will be mutated in the way specified by this SolveStep.
PointerCells CellSlice
//The specific numbers in PointerCells that lead us to remove TargetNums from TargetCells.
//This is only very rarely needed (at this time only for hiddenSubset techniques)
PointerNums IntSlice
//extra is a private place that information relevant to only specific techniques
//can be stashed.
extra interface{}
}
//TODO: consider passing a non-pointer humanSolveOptions so that mutations
//deeper in the solve stack don' tmatter.
//HumanSolveOptions configures how precisely the human solver should operate.
//Passing nil where a HumanSolveOptions is expected will use reasonable
//defaults. Note that the various human solve methods may mutate your options
//object.
type HumanSolveOptions struct {
//At each step in solving the puzzle, how many candidate SolveSteps should
//we generate before stopping the search for more? Higher values will give
//more 'realistic' solves, but at the cost of *much* higher performance
//costs. Also note that the difficulty may be wrong if the difficulty
//model in use was trained on a different NumOptionsToCalculate.
NumOptionsToCalculate int
//Which techniques to try at each step of the puzzle, sorted in the order
//to try them out (generally from cheapest to most expensive). A value of
//nil will use Techniques (the default). Any GuessTechniques will be
//ignored.
TechniquesToUse []SolveTechnique
//NoGuess specifies that even if no other techniques work, the HumanSolve
//should not fall back on guessing, and instead just return failure.
NoGuess bool
//TODO: figure out how to test that we do indeed use different values of
//numOptionsToCalculate.
//TODO: add a TwiddleChainDissimilarity bool.
//The following are flags only used for testing.
//When we reenter back into humanSolveHelper after making a guess, should
//we keep the provided TechniquesToUse, or revert back to this set of
//techniques? (If nil, don't change them) Mainly useful for the case where
//we want to test that Hint works well when it returns a guess.
techniquesToUseAfterGuess []SolveTechnique
}
//Grid returns a snapshot of the grid at the time this SolveDirections was
//generated. Returns a fresh copy every time.
func (self SolveDirections) Grid() *Grid {
//TODO: this is the only pointer receiver method on SolveDirections.
return self.gridSnapshot.Copy()
}
//Sets the given HumanSolveOptions to have reasonable defaults. Returns itself
//for convenience, allowing `options := (&HumanSolveOptions{}).Default()`
func (self *HumanSolveOptions) Default() *HumanSolveOptions {
//TODO: the (&HumanSolveOptions{}).Default() pattern is a bit weird.
//consider just doing a package global DefaultHumanSolveOptions.
self.NumOptionsToCalculate = 15
self.TechniquesToUse = Techniques
self.NoGuess = false
//Have to set even zero valued properties, because the Options isn't
//necessarily default initalized.
self.techniquesToUseAfterGuess = nil
return self
}
//Modifies the options object to make sure all of the options are set
//in a legal way. Returns itself for convenience.
func (self *HumanSolveOptions) validate() *HumanSolveOptions {
if self.TechniquesToUse == nil {
self.TechniquesToUse = Techniques
}
if self.NumOptionsToCalculate < 1 {
self.NumOptionsToCalculate = 1
}
//Remove any GuessTechniques that might be in there because
//the are invalid.
var techniques []SolveTechnique
for _, technique := range self.TechniquesToUse {
if technique == GuessTechnique {
continue
}
techniques = append(techniques, technique)
}
self.TechniquesToUse = techniques
return self
}
//IsUseful returns true if this SolveStep, when applied to the given grid, would do useful work--that is, it would
//either fill a previously unfilled number, or cull previously un-culled possibilities. This is useful to ensure
//HumanSolve doesn't get in a loop of applying the same useless steps.
func (self *SolveStep) IsUseful(grid *Grid) bool {
//Returns true IFF calling Apply with this step and the given grid would result in some useful work. Does not modify the gri.d
//All of this logic is substantially recreated in Apply.
if self.Technique == nil {
return false
}
//TODO: test this.
if self.Technique.IsFill() {
if len(self.TargetCells) == 0 || len(self.TargetNums) == 0 {
return false
}
cell := self.TargetCells[0].InGrid(grid)
return self.TargetNums[0] != cell.Number()
} else {
useful := false
for _, cell := range self.TargetCells {
gridCell := cell.InGrid(grid)
for _, exclude := range self.TargetNums {
//It's right to use Possible because it includes the logic of "it's not possible if there's a number in there already"
//TODO: ensure the comment above is correct logically.
if gridCell.Possible(exclude) {
useful = true
}
}
}
return useful
}
}
//Apply does the solve operation to the Grid that is defined by the configuration of the SolveStep, mutating the
//grid and bringing it one step closer to being solved.
func (self *SolveStep) Apply(grid *Grid) {
//All of this logic is substantially recreated in IsUseful.
if self.Technique.IsFill() {
if len(self.TargetCells) == 0 || len(self.TargetNums) == 0 {
return
}
cell := self.TargetCells[0].InGrid(grid)
cell.SetNumber(self.TargetNums[0])
} else {
for _, cell := range self.TargetCells {
gridCell := cell.InGrid(grid)
for _, exclude := range self.TargetNums {
gridCell.SetExcluded(exclude, true)
}
}
}
}
//Description returns a human-readable sentence describing what the SolveStep instructs the user to do, and what reasoning
//it used to decide that this step was logically valid to apply.
func (self *SolveStep) Description() string {
result := ""
if self.Technique.IsFill() {
result += fmt.Sprintf("We put %s in cell %s ", self.TargetNums.Description(), self.TargetCells.Description())
} else {
//TODO: pluralize based on length of lists.
result += fmt.Sprintf("We remove the possibilities %s from cells %s ", self.TargetNums.Description(), self.TargetCells.Description())
}
result += "because " + self.Technique.Description(self) + "."
return result
}
//HumanLikelihood is how likely a user would be to pick this step when compared with other possible steps.
//Generally inversely related to difficulty (but not perfectly).
//This value will be used to pick which technique to apply when compared with other candidates.
//Based on the technique's HumanLikelihood, possibly attenuated by this particular step's variant
//or specifics.
func (self *SolveStep) HumanLikelihood() float64 {
//TODO: attenuate by variant
return self.Technique.humanLikelihood(self)
}
//TechniqueVariant returns the name of the precise variant of the Technique
//that this step represents. This information is useful for figuring out
//which weight to apply when calculating overall difficulty. A Technique would have
//variants (as opposed to simply other Techniques) when the work to calculate all
//variants is the same, but the difficulty of produced steps may vary due to some
//property of the technique. Forcing Chains is the canonical example.
func (self *SolveStep) TechniqueVariant() string {
//Defer to the Technique.variant implementation entirely.
//This allows us to most easily share code for the simple case.
return self.Technique.variant(self)
}
//normalize puts the step in a known, deterministic state, which eases testing.
func (self *SolveStep) normalize() {
//Different techniques will want to normalize steps in different ways.
self.Technique.normalizeStep(self)
}
//HumanSolution returns the SolveDirections that represent how a human would
//solve this puzzle. It does not mutate the grid. If options is nil, will use
//reasonable defaults.
func (self *Grid) HumanSolution(options *HumanSolveOptions) *SolveDirections {
clone := self.Copy()
defer clone.Done()
return clone.HumanSolve(options)
}
/*
* The HumanSolve method is very complex due to guessing logic.
*
* Without guessing, the approach is very straightforward. Every move either fills a cell
* or removes possibilities. But nothing does anything contradictory, so if they diverge
* in path, it doesn't matter--they're still working towards the same end state (denoted by @)
*
*
*
* |
* /|\
* / | \
* | | |
* \ | /
* \ | /
* \|/
* |
* V
* @
*
*
* In human solve, we first try the cheap techniques, and if we can't find enough options, we then additionally try
* the expensive set of techniques. But both cheap and expensive techniques are similar in that they move us
* towards the end state.
*
* For simplicity, we'll just show paths like this as a single line, even though realistically they could diverge arbitrarily,
* before converging on the end state.
*
* This all changes when you introduce branching, because at a branch point you could have chosen the wrong path
* and at some point down that path you will discover an invalidity, which tells you you chose wrong, and
* you'll have to unwind.
*
* Let's explore a puzzle that needs one branch point.
*
* We explore with normal techniques until we run into a point where none of the normal techinques work.
* This is a DIRE point, and in some cases we might just give up. But we have one last thing to try:
* branching.
* We then run the guess technique, which proposes multiple guess steps (big O's, in this diagram) that we could take.
*
* The technique will choose cells with only a small number of possibilities, to reduce the branching factor.
*
* |
* |
* V
* O O O O O ...
*
* We will randomly pick one cell, and then explore all of its possibilities.
* CRUCIALLY, at a branch point, we never have to pick another cell to explore its possibilities; for each cell,
* if you plug in each of the possibilites and solve forward, it must result in either an invalidity (at which
* point you try another possibility, or if they're all gone you unwind if there's a branch point above), or
* you picked correctly and the solution lies that way. But it's never the case that picking THIS cell won't uncover
* either the invalidity or the solution.
* So in reality, when we come to a branch point, we can choose one cell to focus on and throw out all of the others.
*
* |
* |
* V
* O
*
* But within that cell, there are multiple possibilty branches to consider.
*
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
*
* We go through each in turn and play forward until we find either an invalidity or a solution.
* Within each branch, we use the normal techniques as normal--remember it's actually branching but
* converging, like in the first diagram.
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
* X @
*
* When we uncover an invalidity, we unwind back to the branch point and then try the next possibility.
* We should never have to unwind above the top branch, because down one of the branches (possibly somewhere deep)
* There MUST be a solution (assuming the puzzle is valid)
* Obviously if we find the solution on our branch, we're good.
*
* But what happens if we run out of normal techinques down one of our branches and have to branch again?
*
* Nothing much changes, except that you DO unravel if you uncover that all of the possibilities down this
* side lead to invalidities. You just never unravel past the first branch point.
*
* |
* |
* V
* O
* / \
* 1 3
* / \
* | |
* O O
* / \ / \
* 4 5 6 7
* / | | \
* | | | |
* X X X @
*
* Down one of the paths MUST lie a solution.
*
* The search will fail if we have a max depth limit of branching to try, because then we might not discover a
* solution down one of the branches. A good sanity point is DIM*DIM branch points is the absolute highest; an
* assert at that level makes sense.
*
* In this implementation, humanSolveHelper does the work of exploring any branch up to a point where a guess must happen.
* If we run out of ideas on a branch, we call into guess helper, which will pick a guess and then try all of the versions of it
* until finding one that works. This keeps humanSolveHelper pretty straighforward and keeps most of the complex guess logic out.
*/
//HumanSolve is the workhorse of the package. It solves the puzzle much like a
//human would, applying complex logic techniques iteratively to find a
//sequence of steps that a reasonable human might apply to solve the puzzle.
//HumanSolve is an expensive operation because at each step it identifies all
//of the valid logic rules it could apply and then selects between them based
//on various weightings. HumanSolve endeavors to find the most realistic human
//solution it can by using a large number of possible techniques with
//realistic weights, as well as by doing things like being more likely to pick
//a cell that is in the same row/cell/block as the last filled cell. Returns
//nil if the puzzle does not have a single valid solution. If options is nil,
//will use reasonable defaults. Mutates the grid.
func (self *Grid) HumanSolve(options *HumanSolveOptions) *SolveDirections {
return humanSolveHelper(self, options, true)
}
//SolveDirections returns a chain of SolveDirections, containing exactly one
//IsFill step at the end, that is a reasonable next step to move the puzzle
//towards being completed. It is effectively a hint to the user about what
//Fill step to do next, and why it's logically implied; the truncated return
//value of HumanSolve. Returns nil if the puzzle has multiple solutions or is
//otherwise invalid. If options is nil, will use reasonable defaults. Does not
//mutate the grid.
func (self *Grid) Hint(options *HumanSolveOptions) *SolveDirections {
//TODO: return HintDirections instead of SolveDirections
//TODO: test that non-fill steps before the last one are necessary to unlock
//the fill step at the end (cull them if not), and test that.
clone := self.Copy()
defer clone.Done()
result := humanSolveHelper(clone, options, false)
result.IsHint = true
return result
}
//humanSolveHelper does most of the set up for both HumanSolve and Hint.
func humanSolveHelper(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) *SolveDirections {
//Short circuit solving if it has multiple solutions.
if grid.HasMultipleSolutions() {
return nil
}
if options == nil {
options = (&HumanSolveOptions{}).Default()
}
options.validate()
snapshot := grid.Copy()
steps := humanSolveNonGuessSearcher(grid, options, endConditionSolved)
return &SolveDirections{snapshot, steps, false}
}
//Do we even need a helper here? Can't we just make HumanSolve actually humanSolveHelper?
//The core worker of human solve, it does all of the solving between branch points.
func humanSolveNonGuessSearcher(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) []*SolveStep {
var results []*SolveStep
//Note: trying these all in parallel is much slower (~15x) than doing them in sequence.
//The reason is that in sequence we bailed early as soon as we found one step; now we try them all.
var lastStep *SolveStep
//Is this the first time through the loop?
firstRun := true
for firstRun || (endConditionSolved && !grid.Solved()) || (!endConditionSolved && lastStep != nil && !lastStep.Technique.IsFill()) {
firstRun = false
if grid.Invalid() {
//We must have been in a branch and found an invalidity.
//Bail immediately.
return nil
}
possibilities := runTechniques(options.TechniquesToUse, grid, options.NumOptionsToCalculate)
//Now pick one to apply.
if len(possibilities) == 0 {
//Hmm, didn't find any possivbilities. We failed. :-(
break
}
//TODO: consider if we should stop picking techniques based on their weight here.
//Now that Find returns a slice instead of a single, we're already much more likely to select an "easy" technique. ... Right?
possibilitiesWeights := make([]float64, len(possibilities))
for i, possibility := range possibilities {
possibilitiesWeights[i] = possibility.HumanLikelihood()
}
tweakChainedStepsWeights(lastStep, possibilities, possibilitiesWeights)
step := possibilities[randomIndexWithInvertedWeights(possibilitiesWeights)]
results = append(results, step)
lastStep = step
step.Apply(grid)
}
if (endConditionSolved && !grid.Solved()) || (!endConditionSolved && (lastStep == nil || !lastStep.Technique.IsFill())) {
//We couldn't solve the puzzle.
//But let's do one last ditch effort and try guessing.
//But first... are we allowed to guess?
if options.NoGuess {
//guess not... :-)
return nil
}
guessSteps := humanSolveGuessSearcher(grid, options, endConditionSolved)
if len(guessSteps) == 0 {
//Okay, we just totally failed.
return nil
}
return append(results, guessSteps...)
}
return results
}
//Called when we have run out of options at a given state and need to guess.
func humanSolveGuessSearcher(grid *Grid, options *HumanSolveOptions, endConditionSolved bool) []*SolveStep {
//Yes, using DIM*DIM is a gross hack... I really should be calling Find inside a goroutine...
results := make(chan *SolveStep, DIM*DIM)
done := make(chan bool)
if options.techniquesToUseAfterGuess != nil {
options.TechniquesToUse = options.techniquesToUseAfterGuess
}
//TODO: consider doing a normal solve forward from here to figure out what the right branch is and just do that.
//Find is meant to be run in a goroutine; it won't complete until it's searched everything.
GuessTechnique.Find(grid, results, done)
close(done)
var guess *SolveStep
//TODO: test cases where we expectmultipel results...
select {
case guess = <-results:
default:
//Coludn't find a guess step, oddly enough.
return nil
}
//We'll just take the first guess step and forget about the other ones.
//The guess technique passes back the other nums as PointerNums, which is a hack.
//Unpack them and then nil it out to prevent confusing other people in the future with them.
otherNums := guess.PointerNums
guess.PointerNums = nil
var gridCopy *Grid
for {
gridCopy = grid.Copy()
guess.Apply(gridCopy)
//Even if endConditionSolved is true, this guess we will return will be an IsFill,
//thus terminating the search. From here on out all we're doing is verifying that
//we picked the right branch at the guess if endConditionSolved is not true.
solveSteps := humanSolveNonGuessSearcher(gridCopy, options, true)
if len(solveSteps) != 0 {
//Success!
//Make ourselves look like that grid (to pass back the state of what the solution was) and return.
grid.replace(gridCopy)
gridCopy.Done()
if endConditionSolved {
return append([]*SolveStep{guess}, solveSteps...)
} else {
//Since we're trying to find a hint that terminates in an IsFill step,
//and this guess IS the IsFill step, and we've verified that this
//guess we chose is correct, just return the guess step back up.
return []*SolveStep{guess}
}
}
//We need to try the next solution.
if len(otherNums) == 0 {
//No more numbers to try. We failed!
break
}
nextNum := otherNums[0]
otherNums = otherNums[1:]
//Stuff it into the TargetNums for the branch step.
guess.TargetNums = IntSlice{nextNum}
gridCopy.Done()
}
gridCopy.Done()
//We failed to find anything (which should never happen...)
return nil
}
//This function will tweak weights quite a bit to make it more likely that we will pick a subsequent step that
// is 'related' to the last step. For example, if the last step had targetCells that shared a row, then a step with
//target cells in that same row will be more likely this step. This captures the fact that humans, in practice,
//will have 'chains' of steps that are all related.
func tweakChainedStepsWeights(lastStep *SolveStep, possibilities []*SolveStep, weights []float64) {
if len(possibilities) != len(weights) {
log.Println("Mismatched lenghts of weights and possibilities: ", possibilities, weights)
return
}
if lastStep == nil || len(possibilities) == 0 {
return
}
for i := 0; i < len(possibilities); i++ {
possibility := possibilities[i]
//Tweak every weight by how related they are.
//Remember: these are INVERTED weights, so tweaking them down is BETTER.
//TODO: consider attentuating the effect of this; chaining is nice but shouldn't totally change the calculation for hard techniques.
//It turns out that we probably want to STRENGTHEN the effect.
//Logically we should be attenuating Dissimilarity here, but for some reason the math.Pow(dissimilairty, 10) doesn't actually
//appear to work here, which is maddening.
weights[i] *= possibility.TargetCells.chainDissimilarity(lastStep.TargetCells)
}
}
func runTechniques(techniques []SolveTechnique, grid *Grid, numRequestedSteps int) []*SolveStep {
/*
This function went from being a mere convenience function to
being a complex piece of multi-threaded code.
The basic idea is to parellelize all of the technique's.Find
work.
Each technique is designed so it will bail early if we tell it
(via closing the done channel) we've already got enough steps
found.
We only want to spin up numTechniquesToStartByDefault # of
techniques at a time, because likely we'll find enough steps
before getting to the harder (and generally more expensive to
calculate) techniques if earlier ones fail.
There is one thread for each currently running technique's
Find. The main thread collects results and figures out when it
has enough that all of the other threads can stop searching
(or, when it hears that no more results will be coming in and
it should just stop). There are two other threads. One waits
until the waitgroup is all done and then signals that back to
the main thread by closing resultsChan. The other thread is
notified every time a technique thread is done, and decides
whether or not it should start a new technique thread now. The
interplay of those last two threads is very timing sensitive;
if wg.Done were called before we'd started up the new
technique, we could return from the whole endeavor before
getting enough steps collected.
*/
if numRequestedSteps < 1 {
numRequestedSteps = 1
}
//We make a copy of the grid to search on to avoid race conditions where
// main thread has already returned up to humanSolveHelper, but not all of the techinques have gotten
//the message and freak out a bit because the grid starts changing under them.
gridCopy := grid.Copy()
//TODO: make this configurable, and figure out what the optimal values are
numTechniquesToStartByDefault := 10
//Handle the case where we were given a short list of techniques.
if len(techniques) < numTechniquesToStartByDefault {
numTechniquesToStartByDefault = len(techniques)
}
//Leave some room in resultsChan so all of the techniques don't have to block as often
//waiting for the mainthread to clear resultsChan. Leads to a 20% reduction in time compared
//to unbuffered.
//We'll close this channel to signal the collector that no more results are coming.
resultsChan := make(chan *SolveStep, len(techniques))
done := make(chan bool)
//Deliberately unbuffered; we want it to run sync inside of startTechnique
//the thread that's waiting on it will pass its own chan that it should send to when it's done
techniqueFinished := make(chan chan bool)
var wg sync.WaitGroup
//The next technique to spin up
nextTechniqueIndex := 0
//We'll be kicking off this routine from multiple places so just define it once
startTechnique := func(theTechnique SolveTechnique) {
theTechnique.Find(gridCopy, resultsChan, done)
//This is where a new technique should be kicked off, if one's going to be, before we tell the waitgroup that we're done.
//We need to communicate synchronously with that thread
comms := make(chan bool)
techniqueFinished <- comms
//Wait to hear back that a new technique is started, if one is going to be.
<-comms
//Okay, now the other thread has either started a new technique going, or hasn't.
wg.Done()
}
var results []*SolveStep
//Get the first batch of techniques going
wg.Add(numTechniquesToStartByDefault)
//Since Techniques is in sorted order, we're starting off with the easiest techniques.
for nextTechniqueIndex = 0; nextTechniqueIndex < numTechniquesToStartByDefault; nextTechniqueIndex++ {
go startTechnique(techniques[nextTechniqueIndex])
}
//Listen for when all items are done and signal the collector to stop collecting
go func() {
wg.Wait()
//All of the techniques must be done here; no one can send on resultsChan at this point.
//Signal to the collector that it should break out.
close(resultsChan)
close(techniqueFinished)
}()
//The thread that will kick off new techinques
go func() {
for {
returnChan, ok := <-techniqueFinished
if !ok {
//If channel is closed, that's our cue to die.
return
}
//Start a technique here, if we're going to.
//First, check if the collector has signaled that we're all done
select {
case <-done:
//Don't start a new one
default:
//Potentially start a new technique going as things aren't shutting down yet.
//Is there another technique?
if nextTechniqueIndex < len(techniques) {
wg.Add(1)
go startTechnique(techniques[nextTechniqueIndex])
//Next time we're considering starting a new technique, start the next one
nextTechniqueIndex++
}
}
//Tell our caller that we're done
returnChan <- true
}
}()
//Collect the results as long as more are coming
OuterLoop:
for {
result, ok := <-resultsChan
if !ok {
//resultsChan was closed, which is our signal that no more results are coming and we should break
break OuterLoop
}
results = append(results, result)
//Do we have enough steps accumulate?
if len(results) > numRequestedSteps {
//Communicate to all still-running routines that they can stop
close(done)
break OuterLoop
}
}
return results
}
//Difficulty returns a value between 0.0 and 1.0, representing how hard the puzzle would be
//for a human to solve. :This is an EXTREMELY expensive method (although repeated calls without
//mutating the grid return a cached value quickly). It human solves the puzzle, extracts signals
//out of the solveDirections, and then passes those signals into a machine-learned model that
//was trained on hundreds of thousands of solves by real users in order to generate a candidate difficulty.
//It then repeats the process multiple times until the difficultly number begins to converge to
//an average.
func (self *Grid) Difficulty() float64 {
//TODO: test that the memoization works (that is, the cached value is thrown out if the grid is modified)
//It's hard to test because self.calculateDifficulty(true) is so expensive to run.
//This is so expensive and during testing we don't care if converges.
//So we split out the meat of the method separately.
if self == nil {
return 0.0
}
//Yes, this memoization will fail in the (rare!) cases where a grid's actual difficulty is 0.0, but
//the worst case scenario is that we just return the same value.
if self.cachedDifficulty == 0.0 {
self.cachedDifficulty = self.calcluateDifficulty(true)
}
return self.cachedDifficulty
}
func (self *Grid) calcluateDifficulty(accurate bool) float64 {
//This can be an extremely expensive method. Do not call repeatedly!
//returns the difficulty of the grid, which is a number between 0.0 and 1.0.
//This is a probabilistic measure; repeated calls may return different numbers, although generally we wait for the results to converge.
//We solve the same puzzle N times, then ask each set of steps for their difficulty, and combine those to come up with the overall difficulty.
accum := 0.0
average := 0.0
lastAverage := 0.0
self.HasMultipleSolutions()
//Since this is so expensive, in testing situations we want to run it in less accurate mode (so it goes fast!)
maxIterations := _MAX_DIFFICULTY_ITERATIONS
if !accurate {
maxIterations = 1
}
for i := 0; i < maxIterations; i++ {
difficulty := gridDifficultyHelper(self)
accum += difficulty
average = accum / (float64(i) + 1.0)
if math.Abs(average-lastAverage) < _DIFFICULTY_CONVERGENCE {
//Okay, we've already converged. Just return early!
return average
}
lastAverage = average
}
//We weren't converging... oh well!
return average
}
//This function will HumanSolve _NUM_SOLVES_FOR_DIFFICULTY times, then average the signals together, then
//give the difficulty for THAT. This is more accurate becuase the weights were trained on such averaged signals.
func gridDifficultyHelper(grid *Grid) float64 {
collector := make(chan DifficultySignals, _NUM_SOLVES_FOR_DIFFICULTY)
//Might as well run all of the human solutions in parallel
for i := 0; i < _NUM_SOLVES_FOR_DIFFICULTY; i++ {
go func(gridToUse *Grid) {
solution := gridToUse.HumanSolution(nil)
if solution == nil {
log.Println("A generated grid turned out to have mutiple solutions (or otherwise return nil), indicating a very serious error:", gridToUse.DataString())
os.Exit(1)
}
collector <- solution.Signals()
}(grid)
}
combinedSignals := DifficultySignals{}
for i := 0; i < _NUM_SOLVES_FOR_DIFFICULTY; i++ {
signals := <-collector
combinedSignals.sum(signals)
}
//Now average all of the signal values
for key := range combinedSignals {
combinedSignals[key] /= _NUM_SOLVES_FOR_DIFFICULTY
}
return combinedSignals.difficulty()
}
|
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"strings"
"unicode/utf16"
)
// DefaultUTF16WithBOMByteOrder is the byte order used when the "UTF16 with BOM" encoding
// is specified without a corresponding BOM in the data.
var DefaultUTF16WithBOMByteOrder binary.ByteOrder = binary.LittleEndian
// ID3v2.2.0 frames (see http://id3.org/id3v2-00, sec 4).
var id3v22Frames = map[string]string{
"BUF": "Recommended buffer size",
"CNT": "Play counter",
"COM": "Comments",
"CRA": "Audio encryption",
"CRM": "Encrypted meta frame",
"ETC": "Event timing codes",
"EQU": "Equalization",
"GEO": "General encapsulated object",
"IPL": "Involved people list",
"LNK": "Linked information",
"MCI": "Music CD Identifier",
"MLL": "MPEG location lookup table",
"PIC": "Attached picture",
"POP": "Popularimeter",
"REV": "Reverb",
"RVA": "Relative volume adjustment",
"SLT": "Synchronized lyric/text",
"STC": "Synced tempo codes",
"TAL": "Album/Movie/Show title",
"TBP": "BPM (Beats Per Minute)",
"TCM": "Composer",
"TCO": "Content type",
"TCR": "Copyright message",
"TDA": "Date",
"TDY": "Playlist delay",
"TEN": "Encoded by",
"TFT": "File type",
"TIM": "Time",
"TKE": "Initial key",
"TLA": "Language(s)",
"TLE": "Length",
"TMT": "Media type",
"TOA": "Original artist(s)/performer(s)",
"TOF": "Original filename",
"TOL": "Original Lyricist(s)/text writer(s)",
"TOR": "Original release year",
"TOT": "Original album/Movie/Show title",
"TP1": "Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group",
"TP2": "Band/Orchestra/Accompaniment",
"TP3": "Conductor/Performer refinement",
"TP4": "Interpreted, remixed, or otherwise modified by",
"TPA": "Part of a set",
"TPB": "Publisher",
"TRC": "ISRC (International Standard Recording Code)",
"TRD": "Recording dates",
"TRK": "Track number/Position in set",
"TSI": "Size",
"TSS": "Software/hardware and settings used for encoding",
"TT1": "Content group description",
"TT2": "Title/Songname/Content description",
"TT3": "Subtitle/Description refinement",
"TXT": "Lyricist/text writer",
"TXX": "User defined text information frame",
"TYE": "Year",
"UFI": "Unique file identifier",
"ULT": "Unsychronized lyric/text transcription",
"WAF": "Official audio file webpage",
"WAR": "Official artist/performer webpage",
"WAS": "Official audio source webpage",
"WCM": "Commercial information",
"WCP": "Copyright/Legal information",
"WPB": "Publishers official webpage",
"WXX": "User defined URL link frame",
}
// ID3v2.3.0 frames (see http://id3.org/id3v2.3.0#Declared_ID3v2_frames).
var id3v23Frames = map[string]string{
"AENC": "Audio encryption]",
"APIC": "Attached picture",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQUA": "Equalization",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"IPLS": "Involved people list",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVAD": "Relative volume adjustment",
"RVRB": "Reverb",
"SYLT": "Synchronized lyric/text",
"SYTC": "Synchronized tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDAT": "Date",
"TDLY": "Playlist delay",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIME": "Time",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMED": "Media type",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TORY": "Original release year",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRDA": "Recording dates",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSIZ": "Size",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TYER": "Year",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsychronized lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3v2.4.0 frames (see http://id3.org/id3v2.4.0-frames, sec 4).
var id3v24Frames = map[string]string{
"AENC": "Audio encryption",
"APIC": "Attached picture",
"ASPI": "Audio seek point index",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQU2": "Equalisation (2)",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVA2": "Relative volume adjustment (2)",
"RVRB": "Reverb",
"SEEK": "Seek frame",
"SIGN": "Signature frame",
"SYLT": "Synchronised lyric/text",
"SYTC": "Synchronised tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDEN": "Encoding time",
"TDLY": "Playlist delay",
"TDOR": "Original release time",
"TDRC": "Recording time",
"TDRL": "Release time",
"TDTG": "Tagging time",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIPL": "Involved people list",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMCL": "Musician credits list",
"TMED": "Media type",
"TMOO": "Mood",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPRO": "Produced notice",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOA": "Album sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSOP": "Performer sort order",
"TSOT": "Title sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TSST": "Set subtitle",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsynchronised lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official Internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3 frames that are defined in the specs.
var id3Frames = map[Format]map[string]string{
ID3v2_2: id3v22Frames,
ID3v2_3: id3v23Frames,
ID3v2_4: id3v24Frames,
}
func validID3Frame(version Format, name string) bool {
names, ok := id3Frames[version]
if !ok {
return false
}
_, ok = names[name]
return ok
}
func readWFrame(b []byte) (string, error) {
// Frame text is always encoded in ISO-8859-1
b = append([]byte{0}, b...)
return readTFrame(b)
}
func readTFrame(b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
txt, err := decodeText(b[0], b[1:])
if err != nil {
return "", err
}
return strings.Join(strings.Split(txt, string([]byte{0})), ""), nil
}
func decodeText(enc byte, b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
switch enc {
case 0: // ISO-8859-1
return decodeISO8859(b), nil
case 1: // UTF-16 with byte order marker
if len(b) == 1 {
return "", nil
}
return decodeUTF16WithBOM(b), nil
case 2: // UTF-16 without byte order (assuming BigEndian)
if len(b) == 1 {
return "", nil
}
return decodeUTF16(b, binary.BigEndian), nil
case 3: // UTF-8
return string(b), nil
default: // Fallback to ISO-8859-1
return decodeISO8859(b), nil
}
}
func encodingDelim(enc byte) ([]byte, error) {
switch enc {
case 0, 3: // see decodeText above
return []byte{0}, nil
case 1, 2: // see decodeText above
return []byte{0, 0}, nil
default: // see decodeText above
return []byte{0}, nil
}
}
func dataSplit(b []byte, enc byte) ([][]byte, error) {
delim, err := encodingDelim(enc)
if err != nil {
return nil, err
}
result := bytes.SplitN(b, delim, 2)
if len(result) != 2 {
return result, nil
}
if len(result[1]) == 0 {
return result, nil
}
if result[1][0] == 0 {
// there was a double (or triple) 0 and we cut too early
result[1] = result[1][1:]
}
return result, nil
}
func decodeISO8859(b []byte) string {
r := make([]rune, len(b))
for i, x := range b {
r[i] = rune(x)
}
return string(r)
}
func decodeUTF16WithBOM(b []byte) string {
var bo binary.ByteOrder
switch {
case b[0] == 0xFE && b[1] == 0xFF:
bo = binary.BigEndian
b = b[2:]
case b[0] == 0xFF && b[1] == 0xFE:
bo = binary.LittleEndian
b = b[2:]
default:
bo = DefaultUTF16WithBOMByteOrder
}
return decodeUTF16(b, bo)
}
func decodeUTF16(b []byte, bo binary.ByteOrder) string {
s := make([]uint16, 0, len(b)/2)
for i := 0; i < len(b); i += 2 {
s = append(s, bo.Uint16(b[i:i+2]))
}
return string(utf16.Decode(s))
}
// Comm is a type used in COMM, UFID, TXXX, WXXX and USLT tag.
// It's a text with a description and a specified language
// For WXXX, TXXX and UFID, we don't set a Language
type Comm struct {
Language string
Description string
Text string
}
// String returns a string representation of the underlying Comm instance.
func (t Comm) String() string {
if t.Language != "" {
return fmt.Sprintf("Text{Lang: '%v', Description: '%v', %v lines}",
t.Language, t.Description, strings.Count(t.Text, "\n"))
}
return fmt.Sprintf("Text{Description: '%v', %v}", t.Description, t.Text)
}
// IDv2.{3,4}
// -- Header
// <Header for 'Unsynchronised lyrics/text transcription', ID: "USLT">
// <Header for 'Comment', ID: "COMM">
// -- readTextWithDescrFrame(data, true, true)
// Text encoding $xx
// Language $xx xx xx
// Content descriptor <text string according to encoding> $00 (00)
// Lyrics/text <full text string according to encoding>
// -- Header
// <Header for 'User defined text information frame', ID: "TXXX">
// <Header for 'User defined URL link frame', ID: "WXXX">
// -- readTextWithDescrFrame(data, false, <isDataEncoded>)
// Text encoding $xx
// Description <text string according to encoding> $00 (00)
// Value <text string according to encoding>
func readTextWithDescrFrame(b []byte, hasLang bool, encoded bool) (*Comm, error) {
enc := b[0]
b = b[1:]
c := &Comm{}
if hasLang {
c.Language = string(b[:3])
b = b[3:]
}
descTextSplit, err := dataSplit(b, enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descTextSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding tag description text: %v", err)
}
c.Description = desc
if len(descTextSplit) == 1 {
return c, nil
}
if !encoded {
enc = byte(0)
}
text, err := decodeText(enc, descTextSplit[1])
if err != nil {
return nil, fmt.Errorf("error decoding tag text: %v", err)
}
c.Text = text
return c, nil
}
// UFID is composed of a provider (frequently a URL and a binary identifier)
// The identifier can be a text (Musicbrainz use texts, but not necessary)
type UFID struct {
Provider string
Identifier []byte
}
func (u UFID) String() string {
return fmt.Sprintf("%v (%v)", u.Provider, string(u.Identifier))
}
func readUFID(b []byte) (*UFID, error) {
result := bytes.SplitN(b, []byte{0}, 2)
if len(result) != 2 {
return nil, errors.New("expected to split UFID data into 2 pieces")
}
return &UFID{
Provider: string(result[0]),
Identifier: result[1],
}, nil
}
var pictureTypes = map[byte]string{
0x00: "Other",
0x01: "32x32 pixels 'file icon' (PNG only)",
0x02: "Other file icon",
0x03: "Cover (front)",
0x04: "Cover (back)",
0x05: "Leaflet page",
0x06: "Media (e.g. lable side of CD)",
0x07: "Lead artist/lead performer/soloist",
0x08: "Artist/performer",
0x09: "Conductor",
0x0A: "Band/Orchestra",
0x0B: "Composer",
0x0C: "Lyricist/text writer",
0x0D: "Recording Location",
0x0E: "During recording",
0x0F: "During performance",
0x10: "Movie/video screen capture",
0x11: "A bright coloured fish",
0x12: "Illustration",
0x13: "Band/artist logotype",
0x14: "Publisher/Studio logotype",
}
// Picture is a type which represents an attached picture extracted from metadata.
type Picture struct {
Ext string // Extension of the picture file.
MIMEType string // MIMEType of the picture.
Type string // Type of the picture (see pictureTypes).
Description string // Description.
Data []byte // Raw picture data.
}
// String returns a string representation of the underlying Picture instance.
func (p Picture) String() string {
return fmt.Sprintf("Picture{Ext: %v, MIMEType: %v, Type: %v, Description: %v, Data.Size: %v}",
p.Ext, p.MIMEType, p.Type, p.Description, len(p.Data))
}
// IDv2.2
// -- Header
// Attached picture "PIC"
// Frame size $xx xx xx
// -- readPICFrame
// Text encoding $xx
// Image format $xx xx xx
// Picture type $xx
// Description <textstring> $00 (00)
// Picture data <binary data>
func readPICFrame(b []byte) (*Picture, error) {
enc := b[0]
ext := string(b[1:4])
picType := b[4]
descDataSplit, err := dataSplit(b[5:], enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding PIC description text: %v", err)
}
var mimeType string
switch ext {
case "jpeg", "jpg":
mimeType = "image/jpeg"
case "png":
mimeType = "image/png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}
// IDv2.{3,4}
// -- Header
// <Header for 'Attached picture', ID: "APIC">
// -- readAPICFrame
// Text encoding $xx
// MIME type <text string> $00
// Picture type $xx
// Description <text string according to encoding> $00 (00)
// Picture data <binary data>
func readAPICFrame(b []byte) (*Picture, error) {
enc := b[0]
mimeDataSplit := bytes.SplitN(b[1:], []byte{0}, 2)
mimeType := string(mimeDataSplit[0])
b = mimeDataSplit[1]
picType := b[0]
descDataSplit, err := dataSplit(b[1:], enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding APIC description text: %v", err)
}
var ext string
switch mimeType {
case "image/jpeg":
ext = "jpg"
case "image/png":
ext = "png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}
fix bug with invalid APIC mimetype
Return an error if the mimetype can't be decoded rather than
panicking. See #28.
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"strings"
"unicode/utf16"
)
// DefaultUTF16WithBOMByteOrder is the byte order used when the "UTF16 with BOM" encoding
// is specified without a corresponding BOM in the data.
var DefaultUTF16WithBOMByteOrder binary.ByteOrder = binary.LittleEndian
// ID3v2.2.0 frames (see http://id3.org/id3v2-00, sec 4).
var id3v22Frames = map[string]string{
"BUF": "Recommended buffer size",
"CNT": "Play counter",
"COM": "Comments",
"CRA": "Audio encryption",
"CRM": "Encrypted meta frame",
"ETC": "Event timing codes",
"EQU": "Equalization",
"GEO": "General encapsulated object",
"IPL": "Involved people list",
"LNK": "Linked information",
"MCI": "Music CD Identifier",
"MLL": "MPEG location lookup table",
"PIC": "Attached picture",
"POP": "Popularimeter",
"REV": "Reverb",
"RVA": "Relative volume adjustment",
"SLT": "Synchronized lyric/text",
"STC": "Synced tempo codes",
"TAL": "Album/Movie/Show title",
"TBP": "BPM (Beats Per Minute)",
"TCM": "Composer",
"TCO": "Content type",
"TCR": "Copyright message",
"TDA": "Date",
"TDY": "Playlist delay",
"TEN": "Encoded by",
"TFT": "File type",
"TIM": "Time",
"TKE": "Initial key",
"TLA": "Language(s)",
"TLE": "Length",
"TMT": "Media type",
"TOA": "Original artist(s)/performer(s)",
"TOF": "Original filename",
"TOL": "Original Lyricist(s)/text writer(s)",
"TOR": "Original release year",
"TOT": "Original album/Movie/Show title",
"TP1": "Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group",
"TP2": "Band/Orchestra/Accompaniment",
"TP3": "Conductor/Performer refinement",
"TP4": "Interpreted, remixed, or otherwise modified by",
"TPA": "Part of a set",
"TPB": "Publisher",
"TRC": "ISRC (International Standard Recording Code)",
"TRD": "Recording dates",
"TRK": "Track number/Position in set",
"TSI": "Size",
"TSS": "Software/hardware and settings used for encoding",
"TT1": "Content group description",
"TT2": "Title/Songname/Content description",
"TT3": "Subtitle/Description refinement",
"TXT": "Lyricist/text writer",
"TXX": "User defined text information frame",
"TYE": "Year",
"UFI": "Unique file identifier",
"ULT": "Unsychronized lyric/text transcription",
"WAF": "Official audio file webpage",
"WAR": "Official artist/performer webpage",
"WAS": "Official audio source webpage",
"WCM": "Commercial information",
"WCP": "Copyright/Legal information",
"WPB": "Publishers official webpage",
"WXX": "User defined URL link frame",
}
// ID3v2.3.0 frames (see http://id3.org/id3v2.3.0#Declared_ID3v2_frames).
var id3v23Frames = map[string]string{
"AENC": "Audio encryption]",
"APIC": "Attached picture",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQUA": "Equalization",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"IPLS": "Involved people list",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVAD": "Relative volume adjustment",
"RVRB": "Reverb",
"SYLT": "Synchronized lyric/text",
"SYTC": "Synchronized tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDAT": "Date",
"TDLY": "Playlist delay",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIME": "Time",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMED": "Media type",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TORY": "Original release year",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRDA": "Recording dates",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSIZ": "Size",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TYER": "Year",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsychronized lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3v2.4.0 frames (see http://id3.org/id3v2.4.0-frames, sec 4).
var id3v24Frames = map[string]string{
"AENC": "Audio encryption",
"APIC": "Attached picture",
"ASPI": "Audio seek point index",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQU2": "Equalisation (2)",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVA2": "Relative volume adjustment (2)",
"RVRB": "Reverb",
"SEEK": "Seek frame",
"SIGN": "Signature frame",
"SYLT": "Synchronised lyric/text",
"SYTC": "Synchronised tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDEN": "Encoding time",
"TDLY": "Playlist delay",
"TDOR": "Original release time",
"TDRC": "Recording time",
"TDRL": "Release time",
"TDTG": "Tagging time",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIPL": "Involved people list",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMCL": "Musician credits list",
"TMED": "Media type",
"TMOO": "Mood",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPRO": "Produced notice",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOA": "Album sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSOP": "Performer sort order",
"TSOT": "Title sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TSST": "Set subtitle",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsynchronised lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official Internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3 frames that are defined in the specs.
var id3Frames = map[Format]map[string]string{
ID3v2_2: id3v22Frames,
ID3v2_3: id3v23Frames,
ID3v2_4: id3v24Frames,
}
func validID3Frame(version Format, name string) bool {
names, ok := id3Frames[version]
if !ok {
return false
}
_, ok = names[name]
return ok
}
func readWFrame(b []byte) (string, error) {
// Frame text is always encoded in ISO-8859-1
b = append([]byte{0}, b...)
return readTFrame(b)
}
func readTFrame(b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
txt, err := decodeText(b[0], b[1:])
if err != nil {
return "", err
}
return strings.Join(strings.Split(txt, string([]byte{0})), ""), nil
}
func decodeText(enc byte, b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
switch enc {
case 0: // ISO-8859-1
return decodeISO8859(b), nil
case 1: // UTF-16 with byte order marker
if len(b) == 1 {
return "", nil
}
return decodeUTF16WithBOM(b), nil
case 2: // UTF-16 without byte order (assuming BigEndian)
if len(b) == 1 {
return "", nil
}
return decodeUTF16(b, binary.BigEndian), nil
case 3: // UTF-8
return string(b), nil
default: // Fallback to ISO-8859-1
return decodeISO8859(b), nil
}
}
func encodingDelim(enc byte) ([]byte, error) {
switch enc {
case 0, 3: // see decodeText above
return []byte{0}, nil
case 1, 2: // see decodeText above
return []byte{0, 0}, nil
default: // see decodeText above
return []byte{0}, nil
}
}
func dataSplit(b []byte, enc byte) ([][]byte, error) {
delim, err := encodingDelim(enc)
if err != nil {
return nil, err
}
result := bytes.SplitN(b, delim, 2)
if len(result) != 2 {
return result, nil
}
if len(result[1]) == 0 {
return result, nil
}
if result[1][0] == 0 {
// there was a double (or triple) 0 and we cut too early
result[1] = result[1][1:]
}
return result, nil
}
func decodeISO8859(b []byte) string {
r := make([]rune, len(b))
for i, x := range b {
r[i] = rune(x)
}
return string(r)
}
func decodeUTF16WithBOM(b []byte) string {
var bo binary.ByteOrder
switch {
case b[0] == 0xFE && b[1] == 0xFF:
bo = binary.BigEndian
b = b[2:]
case b[0] == 0xFF && b[1] == 0xFE:
bo = binary.LittleEndian
b = b[2:]
default:
bo = DefaultUTF16WithBOMByteOrder
}
return decodeUTF16(b, bo)
}
func decodeUTF16(b []byte, bo binary.ByteOrder) string {
s := make([]uint16, 0, len(b)/2)
for i := 0; i < len(b); i += 2 {
s = append(s, bo.Uint16(b[i:i+2]))
}
return string(utf16.Decode(s))
}
// Comm is a type used in COMM, UFID, TXXX, WXXX and USLT tag.
// It's a text with a description and a specified language
// For WXXX, TXXX and UFID, we don't set a Language
type Comm struct {
Language string
Description string
Text string
}
// String returns a string representation of the underlying Comm instance.
func (t Comm) String() string {
if t.Language != "" {
return fmt.Sprintf("Text{Lang: '%v', Description: '%v', %v lines}",
t.Language, t.Description, strings.Count(t.Text, "\n"))
}
return fmt.Sprintf("Text{Description: '%v', %v}", t.Description, t.Text)
}
// IDv2.{3,4}
// -- Header
// <Header for 'Unsynchronised lyrics/text transcription', ID: "USLT">
// <Header for 'Comment', ID: "COMM">
// -- readTextWithDescrFrame(data, true, true)
// Text encoding $xx
// Language $xx xx xx
// Content descriptor <text string according to encoding> $00 (00)
// Lyrics/text <full text string according to encoding>
// -- Header
// <Header for 'User defined text information frame', ID: "TXXX">
// <Header for 'User defined URL link frame', ID: "WXXX">
// -- readTextWithDescrFrame(data, false, <isDataEncoded>)
// Text encoding $xx
// Description <text string according to encoding> $00 (00)
// Value <text string according to encoding>
func readTextWithDescrFrame(b []byte, hasLang bool, encoded bool) (*Comm, error) {
enc := b[0]
b = b[1:]
c := &Comm{}
if hasLang {
c.Language = string(b[:3])
b = b[3:]
}
descTextSplit, err := dataSplit(b, enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descTextSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding tag description text: %v", err)
}
c.Description = desc
if len(descTextSplit) == 1 {
return c, nil
}
if !encoded {
enc = byte(0)
}
text, err := decodeText(enc, descTextSplit[1])
if err != nil {
return nil, fmt.Errorf("error decoding tag text: %v", err)
}
c.Text = text
return c, nil
}
// UFID is composed of a provider (frequently a URL and a binary identifier)
// The identifier can be a text (Musicbrainz use texts, but not necessary)
type UFID struct {
Provider string
Identifier []byte
}
func (u UFID) String() string {
return fmt.Sprintf("%v (%v)", u.Provider, string(u.Identifier))
}
func readUFID(b []byte) (*UFID, error) {
result := bytes.SplitN(b, []byte{0}, 2)
if len(result) != 2 {
return nil, errors.New("expected to split UFID data into 2 pieces")
}
return &UFID{
Provider: string(result[0]),
Identifier: result[1],
}, nil
}
var pictureTypes = map[byte]string{
0x00: "Other",
0x01: "32x32 pixels 'file icon' (PNG only)",
0x02: "Other file icon",
0x03: "Cover (front)",
0x04: "Cover (back)",
0x05: "Leaflet page",
0x06: "Media (e.g. lable side of CD)",
0x07: "Lead artist/lead performer/soloist",
0x08: "Artist/performer",
0x09: "Conductor",
0x0A: "Band/Orchestra",
0x0B: "Composer",
0x0C: "Lyricist/text writer",
0x0D: "Recording Location",
0x0E: "During recording",
0x0F: "During performance",
0x10: "Movie/video screen capture",
0x11: "A bright coloured fish",
0x12: "Illustration",
0x13: "Band/artist logotype",
0x14: "Publisher/Studio logotype",
}
// Picture is a type which represents an attached picture extracted from metadata.
type Picture struct {
Ext string // Extension of the picture file.
MIMEType string // MIMEType of the picture.
Type string // Type of the picture (see pictureTypes).
Description string // Description.
Data []byte // Raw picture data.
}
// String returns a string representation of the underlying Picture instance.
func (p Picture) String() string {
return fmt.Sprintf("Picture{Ext: %v, MIMEType: %v, Type: %v, Description: %v, Data.Size: %v}",
p.Ext, p.MIMEType, p.Type, p.Description, len(p.Data))
}
// IDv2.2
// -- Header
// Attached picture "PIC"
// Frame size $xx xx xx
// -- readPICFrame
// Text encoding $xx
// Image format $xx xx xx
// Picture type $xx
// Description <textstring> $00 (00)
// Picture data <binary data>
func readPICFrame(b []byte) (*Picture, error) {
enc := b[0]
ext := string(b[1:4])
picType := b[4]
descDataSplit, err := dataSplit(b[5:], enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding PIC description text: %v", err)
}
var mimeType string
switch ext {
case "jpeg", "jpg":
mimeType = "image/jpeg"
case "png":
mimeType = "image/png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}
// IDv2.{3,4}
// -- Header
// <Header for 'Attached picture', ID: "APIC">
// -- readAPICFrame
// Text encoding $xx
// MIME type <text string> $00
// Picture type $xx
// Description <text string according to encoding> $00 (00)
// Picture data <binary data>
func readAPICFrame(b []byte) (*Picture, error) {
enc := b[0]
mimeDataSplit := bytes.SplitN(b[1:], []byte{0}, 2)
mimeType := string(mimeDataSplit[0])
b = mimeDataSplit[1]
if len(b) < 1 {
return nil, fmt.Errorf("error decoding APIC mimetype")
}
picType := b[0]
descDataSplit, err := dataSplit(b[1:], enc)
if err != nil {
return nil, err
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding APIC description text: %v", err)
}
var ext string
switch mimeType {
case "image/jpeg":
ext = "jpg"
case "image/png":
ext = "png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}
|
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package ids
import (
"bytes"
"reflect"
"testing"
)
func TestID(t *testing.T) {
hash := [32]byte{24}
id := NewID(hash)
if key := id.Key(); !bytes.Equal(hash[:], key[:]) {
t.Fatalf("ID.Key returned wrong bytes")
}
prefixed := id.Prefix(0)
if key := id.Key(); !bytes.Equal(hash[:], key[:]) {
t.Fatalf("ID.Prefix mutated the ID")
}
if nextPrefix := id.Prefix(0); !prefixed.Equals(nextPrefix) {
t.Fatalf("ID.Prefix not consistant")
}
if b := id.Bytes(); !bytes.Equal(hash[:], b) {
t.Fatalf("ID.Bytes returned wrong bytes")
}
}
func TestIDBit(t *testing.T) {
id0 := NewID([32]byte{1 << 0})
id1 := NewID([32]byte{1 << 1})
id2 := NewID([32]byte{1 << 2})
id3 := NewID([32]byte{1 << 3})
id4 := NewID([32]byte{1 << 4})
id5 := NewID([32]byte{1 << 5})
id6 := NewID([32]byte{1 << 6})
id7 := NewID([32]byte{1 << 7})
id8 := NewID([32]byte{0, 1 << 0})
if id0.Bit(0) != 1 {
t.Fatalf("Wrong bit")
} else if id1.Bit(1) != 1 {
t.Fatalf("Wrong bit")
} else if id2.Bit(2) != 1 {
t.Fatalf("Wrong bit")
} else if id3.Bit(3) != 1 {
t.Fatalf("Wrong bit")
} else if id4.Bit(4) != 1 {
t.Fatalf("Wrong bit")
} else if id5.Bit(5) != 1 {
t.Fatalf("Wrong bit")
} else if id6.Bit(6) != 1 {
t.Fatalf("Wrong bit")
} else if id7.Bit(7) != 1 {
t.Fatalf("Wrong bit")
} else if id8.Bit(8) != 1 {
t.Fatalf("Wrong bit")
}
}
func TestFromString(t *testing.T) {
key := [32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}
id := NewID(key)
idStr := id.String()
id2, err := FromString(idStr)
if err != nil {
t.Fatal(err)
}
if id.Key() != id2.Key() {
t.Fatal("Expected FromString to be inverse of String but it wasn't")
}
}
func TestIDFromStringError(t *testing.T) {
tests := []struct {
in string
}{
{""},
{"foo"},
{"foobar"},
}
for _, tt := range tests {
t.Run(tt.in, func(t *testing.T) {
_, err := FromString(tt.in)
if err == nil {
t.Error("Unexpected success")
}
})
}
}
func TestIDMarshalJSON(t *testing.T) {
tests := []struct {
label string
in ID
out []byte
err error
}{
{"ID{}", ID{}, []byte("null"), nil},
{"ID(\"ava labs\")",
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
[]byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""),
nil,
},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
out, err := tt.in.MarshalJSON()
if err != tt.err {
t.Errorf("Expected err %s, got error %v", tt.err, err)
} else if !bytes.Equal(out, tt.out) {
t.Errorf("got %q, expected %q", out, tt.out)
}
})
}
}
func TestIDUnmarshalJSON(t *testing.T) {
tests := []struct {
label string
in []byte
out ID
err error
}{
{"ID{}", []byte("null"), ID{}, nil},
{"ID(\"ava labs\")",
[]byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
nil,
},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
foo := ID{}
err := foo.UnmarshalJSON(tt.in)
if err != tt.err {
t.Errorf("Expected err %s, got error %v", tt.err, err)
} else if foo.ID != nil && foo.Key() != tt.out.Key() {
t.Errorf("got %q, expected %q", foo.Key(), tt.out.Key())
}
})
}
}
func TestIDHex(t *testing.T) {
id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'})
expected := "617661206c61627300000000000000000000000000000000000000000000000000"
actual := id.Hex()
if actual != expected {
t.Fatalf("got %s, expected %s", actual, expected)
}
}
func TestIDString(t *testing.T) {
tests := []struct {
label string
id ID
expected string
}{
{"ID{}", ID{}, "nil"},
{"ID{[32]byte{24}}", NewID([32]byte{24}), "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt"},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
result := tt.id.String()
if result != tt.expected {
t.Errorf("got %q, expected %q", result, tt.expected)
}
})
}
}
func TestSortIDs(t *testing.T) {
ids := []ID{
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
SortIDs(ids)
expected := []ID{
NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if !reflect.DeepEqual(ids, expected) {
t.Fatal("[]ID was not sorted lexographically")
}
}
func TestIsSortedAndUnique(t *testing.T) {
unsorted := []ID{
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if IsSortedAndUniqueIDs(unsorted) {
t.Fatal("Wrongly accepted unsorted IDs")
}
duplicated := []ID{
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if IsSortedAndUniqueIDs(duplicated) {
t.Fatal("Wrongly accepted duplicated IDs")
}
sorted := []ID{
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if !IsSortedAndUniqueIDs(sorted) {
t.Fatal("Wrongly rejected sorted, unique IDs")
}
}
testcase fix
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package ids
import (
"bytes"
"reflect"
"testing"
)
func TestID(t *testing.T) {
hash := [32]byte{24}
id := NewID(hash)
if key := id.Key(); !bytes.Equal(hash[:], key[:]) {
t.Fatalf("ID.Key returned wrong bytes")
}
prefixed := id.Prefix(0)
if key := id.Key(); !bytes.Equal(hash[:], key[:]) {
t.Fatalf("ID.Prefix mutated the ID")
}
if nextPrefix := id.Prefix(0); !prefixed.Equals(nextPrefix) {
t.Fatalf("ID.Prefix not consistant")
}
if b := id.Bytes(); !bytes.Equal(hash[:], b) {
t.Fatalf("ID.Bytes returned wrong bytes")
}
}
func TestIDBit(t *testing.T) {
id0 := NewID([32]byte{1 << 0})
id1 := NewID([32]byte{1 << 1})
id2 := NewID([32]byte{1 << 2})
id3 := NewID([32]byte{1 << 3})
id4 := NewID([32]byte{1 << 4})
id5 := NewID([32]byte{1 << 5})
id6 := NewID([32]byte{1 << 6})
id7 := NewID([32]byte{1 << 7})
id8 := NewID([32]byte{0, 1 << 0})
if id0.Bit(0) != 1 {
t.Fatalf("Wrong bit")
} else if id1.Bit(1) != 1 {
t.Fatalf("Wrong bit")
} else if id2.Bit(2) != 1 {
t.Fatalf("Wrong bit")
} else if id3.Bit(3) != 1 {
t.Fatalf("Wrong bit")
} else if id4.Bit(4) != 1 {
t.Fatalf("Wrong bit")
} else if id5.Bit(5) != 1 {
t.Fatalf("Wrong bit")
} else if id6.Bit(6) != 1 {
t.Fatalf("Wrong bit")
} else if id7.Bit(7) != 1 {
t.Fatalf("Wrong bit")
} else if id8.Bit(8) != 1 {
t.Fatalf("Wrong bit")
}
}
func TestFromString(t *testing.T) {
key := [32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}
id := NewID(key)
idStr := id.String()
id2, err := FromString(idStr)
if err != nil {
t.Fatal(err)
}
if id.Key() != id2.Key() {
t.Fatal("Expected FromString to be inverse of String but it wasn't")
}
}
func TestIDFromStringError(t *testing.T) {
tests := []struct {
in string
}{
{""},
{"foo"},
{"foobar"},
}
for _, tt := range tests {
t.Run(tt.in, func(t *testing.T) {
_, err := FromString(tt.in)
if err == nil {
t.Error("Unexpected success")
}
})
}
}
func TestIDMarshalJSON(t *testing.T) {
tests := []struct {
label string
in ID
out []byte
err error
}{
{"ID{}", ID{}, []byte("null"), nil},
{"ID(\"ava labs\")",
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
[]byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""),
nil,
},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
out, err := tt.in.MarshalJSON()
if err != tt.err {
t.Errorf("Expected err %s, got error %v", tt.err, err)
} else if !bytes.Equal(out, tt.out) {
t.Errorf("got %q, expected %q", out, tt.out)
}
})
}
}
func TestIDUnmarshalJSON(t *testing.T) {
tests := []struct {
label string
in []byte
out ID
err error
}{
{"ID{}", []byte("null"), ID{}, nil},
{"ID(\"ava labs\")",
[]byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
nil,
},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
foo := ID{}
err := foo.UnmarshalJSON(tt.in)
if err != tt.err {
t.Errorf("Expected err %s, got error %v", tt.err, err)
} else if foo.ID != nil && foo.Key() != tt.out.Key() {
t.Errorf("got %q, expected %q", foo.Key(), tt.out.Key())
}
})
}
}
func TestIDHex(t *testing.T) {
id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'})
expected := "617661206c616273000000000000000000000000000000000000000000000000"
actual := id.Hex()
if actual != expected {
t.Fatalf("got %s, expected %s", actual, expected)
}
}
func TestIDString(t *testing.T) {
tests := []struct {
label string
id ID
expected string
}{
{"ID{}", ID{}, "nil"},
{"ID{[32]byte{24}}", NewID([32]byte{24}), "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt"},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
result := tt.id.String()
if result != tt.expected {
t.Errorf("got %q, expected %q", result, tt.expected)
}
})
}
}
func TestSortIDs(t *testing.T) {
ids := []ID{
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
SortIDs(ids)
expected := []ID{
NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if !reflect.DeepEqual(ids, expected) {
t.Fatal("[]ID was not sorted lexographically")
}
}
func TestIsSortedAndUnique(t *testing.T) {
unsorted := []ID{
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if IsSortedAndUniqueIDs(unsorted) {
t.Fatal("Wrongly accepted unsorted IDs")
}
duplicated := []ID{
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if IsSortedAndUniqueIDs(duplicated) {
t.Fatal("Wrongly accepted duplicated IDs")
}
sorted := []ID{
NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}),
}
if !IsSortedAndUniqueIDs(sorted) {
t.Fatal("Wrongly rejected sorted, unique IDs")
}
}
|
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ValidateLayout walks through the file tree given by src and
// validates the manifest pointed to by the given refs
// or returns an error if the validation failed.
func ValidateLayout(src string, refs []string, out *log.Logger) error {
return validate(newPathWalker(src), refs, out)
}
// Validate walks through the given .tar file and
// validates the manifest pointed to by the given refs
//iiii or returns an error if the validation failed.
func Validate(tarFile string, refs []string, out *log.Logger) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return validate(newTarWalker(f), refs, out)
}
var validRefMediaTypes = []string{
v1.MediaTypeImageManifest,
v1.MediaTypeImageManifestList,
}
func validate(w walker, refs []string, out *log.Logger) error {
ds, err := listReferences(w)
if err != nil {
return err
}
if len(refs) == 0 && len(ds) == 0 {
// TODO(runcom): ugly, we'll need a better way and library
// to express log levels.
// see https://github.com/opencontainers/image-spec/issues/288
out.Print("WARNING: no descriptors found")
}
if len(refs) == 0 {
for ref := range ds {
refs = append(refs, ref)
}
}
for _, ref := range refs {
d, ok := ds[ref]
if !ok {
// TODO(runcom):
// soften this error to a warning if the user didn't ask for any specific reference
// with --ref but she's just validating the whole image.
return fmt.Errorf("reference %s not found", ref)
}
if err = d.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, d)
if err != nil {
return err
}
if err := m.validate(w); err != nil {
return err
}
if out != nil {
out.Printf("reference %q: OK", ref)
}
}
return nil
}
// UnpackLayout walks through the file tree given by src and
// using the layers specified in the manifest pointed to by the given ref
// and unpacks all layers in the given destination directory
// or returns an error if the unpacking failed.
func UnpackLayout(src, dest, ref string) error {
return unpack(newPathWalker(src), dest, ref)
}
// Unpack walks through the given .tar file and
// using the layers specified in the manifest pointed to by the given ref
// and unpacks all layers in the given destination directory
// or returns an error if the unpacking failed.
func Unpack(tarFile, dest, ref string) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return unpack(newTarWalker(f), dest, ref)
}
func unpack(w walker, dest, refName string) error {
ref, err := findDescriptor(w, refName)
if err != nil {
return err
}
if err = ref.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, ref)
if err != nil {
return err
}
if err = m.validate(w); err != nil {
return err
}
return m.unpack(w, dest)
}
// CreateRuntimeBundleLayout walks through the file tree given by src and
// creates an OCI runtime bundle in the given destination dest
// or returns an error if the unpacking failed.
func CreateRuntimeBundleLayout(src, dest, ref, root string) error {
return createRuntimeBundle(newPathWalker(src), dest, ref, root)
}
// CreateRuntimeBundle walks through the given .tar file and
// creates an OCI runtime bundle in the given destination dest
// or returns an error if the unpacking failed.
func CreateRuntimeBundle(tarFile, dest, ref, root string) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return createRuntimeBundle(newTarWalker(f), dest, ref, root)
}
func createRuntimeBundle(w walker, dest, refName, rootfs string) error {
ref, err := findDescriptor(w, refName)
if err != nil {
return err
}
if err = ref.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, ref)
if err != nil {
return err
}
if err = m.validate(w); err != nil {
return err
}
c, err := findConfig(w, &m.Config)
if err != nil {
return err
}
err = m.unpack(w, filepath.Join(dest, rootfs))
if err != nil {
return err
}
spec, err := c.runtimeSpec(rootfs)
if err != nil {
return err
}
f, err := os.Create(filepath.Join(dest, "config.json"))
if err != nil {
return err
}
defer f.Close()
return json.NewEncoder(f).Encode(spec)
}
image: fix typo in docstring
Signed-off-by: Jonathan Boulle <0d47477beef1d882e8812885f3dcf66c4a47c346@gmail.com>
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ValidateLayout walks through the file tree given by src and
// validates the manifest pointed to by the given refs
// or returns an error if the validation failed.
func ValidateLayout(src string, refs []string, out *log.Logger) error {
return validate(newPathWalker(src), refs, out)
}
// Validate walks through the given .tar file and
// validates the manifest pointed to by the given refs
// or returns an error if the validation failed.
func Validate(tarFile string, refs []string, out *log.Logger) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return validate(newTarWalker(f), refs, out)
}
var validRefMediaTypes = []string{
v1.MediaTypeImageManifest,
v1.MediaTypeImageManifestList,
}
func validate(w walker, refs []string, out *log.Logger) error {
ds, err := listReferences(w)
if err != nil {
return err
}
if len(refs) == 0 && len(ds) == 0 {
// TODO(runcom): ugly, we'll need a better way and library
// to express log levels.
// see https://github.com/opencontainers/image-spec/issues/288
out.Print("WARNING: no descriptors found")
}
if len(refs) == 0 {
for ref := range ds {
refs = append(refs, ref)
}
}
for _, ref := range refs {
d, ok := ds[ref]
if !ok {
// TODO(runcom):
// soften this error to a warning if the user didn't ask for any specific reference
// with --ref but she's just validating the whole image.
return fmt.Errorf("reference %s not found", ref)
}
if err = d.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, d)
if err != nil {
return err
}
if err := m.validate(w); err != nil {
return err
}
if out != nil {
out.Printf("reference %q: OK", ref)
}
}
return nil
}
// UnpackLayout walks through the file tree given by src and
// using the layers specified in the manifest pointed to by the given ref
// and unpacks all layers in the given destination directory
// or returns an error if the unpacking failed.
func UnpackLayout(src, dest, ref string) error {
return unpack(newPathWalker(src), dest, ref)
}
// Unpack walks through the given .tar file and
// using the layers specified in the manifest pointed to by the given ref
// and unpacks all layers in the given destination directory
// or returns an error if the unpacking failed.
func Unpack(tarFile, dest, ref string) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return unpack(newTarWalker(f), dest, ref)
}
func unpack(w walker, dest, refName string) error {
ref, err := findDescriptor(w, refName)
if err != nil {
return err
}
if err = ref.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, ref)
if err != nil {
return err
}
if err = m.validate(w); err != nil {
return err
}
return m.unpack(w, dest)
}
// CreateRuntimeBundleLayout walks through the file tree given by src and
// creates an OCI runtime bundle in the given destination dest
// or returns an error if the unpacking failed.
func CreateRuntimeBundleLayout(src, dest, ref, root string) error {
return createRuntimeBundle(newPathWalker(src), dest, ref, root)
}
// CreateRuntimeBundle walks through the given .tar file and
// creates an OCI runtime bundle in the given destination dest
// or returns an error if the unpacking failed.
func CreateRuntimeBundle(tarFile, dest, ref, root string) error {
f, err := os.Open(tarFile)
if err != nil {
return errors.Wrap(err, "unable to open file")
}
defer f.Close()
return createRuntimeBundle(newTarWalker(f), dest, ref, root)
}
func createRuntimeBundle(w walker, dest, refName, rootfs string) error {
ref, err := findDescriptor(w, refName)
if err != nil {
return err
}
if err = ref.validate(w, validRefMediaTypes); err != nil {
return err
}
m, err := findManifest(w, ref)
if err != nil {
return err
}
if err = m.validate(w); err != nil {
return err
}
c, err := findConfig(w, &m.Config)
if err != nil {
return err
}
err = m.unpack(w, filepath.Join(dest, rootfs))
if err != nil {
return err
}
spec, err := c.runtimeSpec(rootfs)
if err != nil {
return err
}
f, err := os.Create(filepath.Join(dest, "config.json"))
if err != nil {
return err
}
defer f.Close()
return json.NewEncoder(f).Encode(spec)
}
|
// Copyright 2013 The Go-IMAP Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imap
import (
"bytes"
"fmt"
"io"
"strconv"
)
// ParserError indicates a problem with the server response format. This could
// be the result of an unsupported extension or nonstandard server behavior.
type ParserError struct {
Info string // Short message explaining the problem
Line []byte // Full or partial response line, starting with the tag
Offset int // Parser offset, starting at 0
}
func (err *ParserError) Error() string {
if err.Line == nil {
return "imap: " + err.Info
}
line, ellipsis := err.Line, ""
if len(line) > rawLimit {
line, ellipsis = line[:rawLimit], "..."
}
return fmt.Sprintf("imap: %s at offset %d of %+q%s",
err.Info, err.Offset, line, ellipsis)
}
// readerInput is the interface for reading all parts of a response. This
// interface is implemented by transport.
type readerInput interface {
io.Reader
ReadLine() (line []byte, err error)
}
// reader creates rawResponse structs and provides additional lines and literals
// to the parser when requested.
type reader struct {
readerInput
LiteralReader
tagid []byte // Tag prefix expected in command completion responses ([A-Z]+)
order int64 // Response order counter
}
// rawResponse is an intermediate response form used to construct full Response
// objects. The struct returned by reader.Next() contains the start of the next
// response up to the first literal string, if there is one. The parser reads
// literals and additional lines as needed via reader.More(), which appends new
// bytes to line and tail. The current parser position can be calculated as
// len(raw.line) - len(raw.tail).
type rawResponse struct {
*Response
*reader
line []byte // Full response line without literals or CRLFs
tail []byte // Unconsumed line ending (parser state)
}
// newReader returns a reader configured to accept tagged responses beginning
// with tagid.
func newReader(in readerInput, lr LiteralReader, tagid string) *reader {
if in == nil || lr == nil || len(tagid) == 0 {
panic("imap: bad arguments to newReader")
}
for _, c := range tagid {
if c < 'A' || c > 'Z' {
panic("imap: bad tagid format")
}
}
return &reader{in, lr, []byte(tagid), 0}
}
// Next returns the next unparsed server response, or any data read prior to an
// error. If an error is returned and rsp != nil, the connection should be
// terminated because the client and server are no longer synchronized.
func (r *reader) Next() (raw *rawResponse, err error) {
raw = &rawResponse{reader: r}
if raw.line, err = r.ReadLine(); err != nil {
if len(raw.line) == 0 {
raw = nil
}
} else if tag := r.tag(raw.line); tag != "" {
r.order++
raw.Response = &Response{Order: r.order, Raw: raw.line, Tag: tag}
raw.tail = raw.line[len(tag)+1:]
} else {
err = &ProtocolError{"bad response tag", raw.line}
}
return
}
// More returns the next literal string and reads one more line from the server.
func (r *reader) More(raw *rawResponse, i LiteralInfo) (l Literal, err error) {
src := io.LimitedReader{R: r, N: int64(i.Len)}
if l, err = r.ReadLiteral(&src, i); l != nil {
raw.Literals = append(raw.Literals, l)
if err == nil {
var line []byte
if line, err = r.ReadLine(); len(line) > 0 { // ok if err != nil
pos := raw.pos()
raw.line = append(raw.line, line...)
raw.tail = raw.line[pos:]
raw.Raw = raw.line
}
}
} else if err == nil {
// Sanity check for user-provided ReadLiteral implementations
panic("imap: ReadLiteral returned (nil, nil)")
}
return
}
// tag verifies that line is a valid start of a new server response and returns
// the full response tag. Valid tags are "*" (untagged status/data), "+"
// (continuation request), and strings in the format "{r.tagid}[0-9]+" (command
// completion). The tag must be followed by a space.
func (r *reader) tag(line []byte) string {
if n := bytes.IndexByte(line, ' '); n == 1 {
if c := line[0]; c == '*' || c == '+' {
return string(c)
}
} else if i := len(r.tagid); i < n && bytes.Equal(line[:i], r.tagid) {
for _, c := range line[i:n] {
if c < '0' || c > '9' {
return ""
}
}
return string(line[:n])
}
return ""
}
// Error returned by parseCondition to indicate that rsp.Type != Status.
var errNotStatus error = &ParserError{Info: "not a status response"}
// Parse converts rawResponse into a full Response object by calling parseX
// methods, which gradually consume raw.tail.
func (raw *rawResponse) Parse() (rsp *Response, err error) {
if raw.Response == nil {
return nil, &ParserError{"unparsable response", raw.line, 0}
}
switch rsp = raw.Response; rsp.Tag {
case "*":
if err = raw.parseCondition(OK | NO | BAD | PREAUTH | BYE); err == nil {
rsp.Type = Status
err = raw.parseStatus()
} else if err == errNotStatus {
rsp.Type = Data
rsp.Fields, err = raw.parseFields(nul)
if len(rsp.Fields) == 0 && err == nil {
err = raw.error("empty data response", 0)
}
}
case "+":
rsp.Type = Continue
raw.parseContinue()
default:
if err = raw.parseCondition(OK | NO | BAD); err == nil {
rsp.Type = Done
err = raw.parseStatus()
} else if err == errNotStatus {
err = &ParserError{"unknown response type", raw.line, 0}
}
}
if len(raw.tail) > 0 && err == nil {
err = raw.unexpected(0)
}
raw.Response = nil
return
}
// pos returns the current parser position in raw.line.
func (raw *rawResponse) pos() int {
return len(raw.line) - len(raw.tail)
}
// error returns a ParserError to indicate a problem with the response at the
// specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) error(info string, off int) error {
return &ParserError{info, raw.line, raw.pos() + off}
}
// unexpected returns a ParserError to indicate an unexpected byte at the
// specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) unexpected(off int) error {
c := raw.line[raw.pos()+off]
return raw.error(fmt.Sprintf("unexpected %+q", c), off)
}
// missing returns a ParserError to indicate the absence of a required character
// or section at the specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) missing(v interface{}, off int) error {
if _, ok := v.(byte); ok {
return raw.error(fmt.Sprintf("missing %+q", v), off)
}
return raw.error(fmt.Sprintf("missing %v", v), off)
}
// Valid status conditions.
var bStatus = []struct {
b []byte
s RespStatus
}{
{[]byte("OK"), OK},
{[]byte("NO"), NO},
{[]byte("BAD"), BAD},
{[]byte("PREAUTH"), PREAUTH},
{[]byte("BYE"), BYE},
}
// parseCondition extracts the status condition if raw is a status response
// (ABNF: resp-cond-*). errNotStatus is returned for all other response types.
func (raw *rawResponse) parseCondition(accept RespStatus) error {
outer:
for _, v := range bStatus {
if n := len(v.b); n <= len(raw.tail) {
for i, c := range v.b {
if raw.tail[i]&0xDF != c { // &0xDF converts [a-z] to upper case
continue outer
}
}
if n == len(raw.tail) {
return raw.missing("SP", n)
} else if raw.tail[n] == ' ' {
if accept&v.s == 0 {
return raw.error("unacceptable status condition", 0)
}
raw.Status = v.s
raw.tail = raw.tail[n+1:]
return nil
}
// Assume data response with a matching prefix (e.g. "* NOT STATUS")
break
}
}
return errNotStatus
}
// parseStatus extracts the optional response code and required text after the
// status condition (ABNF: resp-text).
func (raw *rawResponse) parseStatus() error {
if len(raw.tail) > 0 && raw.tail[0] == '[' {
var err error
raw.tail = raw.tail[1:]
if raw.Fields, err = raw.parseFields(']'); err != nil {
return err
} else if len(raw.Fields) == 0 {
return raw.error("empty response code", -1)
} else if len(raw.tail) == 0 {
// Some servers do not send any text after the response code
// (e.g. "* OK [UNSEEN 1]"). This is not allowed, according to RFC
// 3501 ABNF, but we accept it for compatibility with other clients.
raw.tail = nil
return nil
} else if raw.tail[0] != ' ' {
return raw.missing("SP", 0)
}
raw.tail = raw.tail[1:]
}
if len(raw.tail) == 0 {
return raw.missing("status text", 0)
}
raw.Info = string(raw.tail)
raw.tail = nil
return nil
}
// parseContinue extracts the text or Base64 data from a continuation request
// (ABNF: continue-req). Base64 data is saved in its original form to raw.Info,
// and decoded as []byte into raw.Fields[0].
func (raw *rawResponse) parseContinue() {
if n := len(raw.tail); n == 0 {
raw.Label = "BASE64"
raw.Fields = []Field{[]byte(nil)}
} else if n&3 == 0 {
if b, err := b64dec(raw.tail); err == nil {
raw.Label = "BASE64"
raw.Fields = []Field{b}
}
}
// ABNF uses resp-text, but section 7.5 states "The remainder of this
// response is a line of text." Assume that response codes are not allowed.
raw.Info = string(raw.tail)
raw.tail = nil
}
// parseFields extracts as many data fields from raw.tail as possible until it
// finds the stop byte in a delimiter position. An error is returned if the stop
// byte is not found. NUL stop causes all of raw.tail to be consumed (NUL does
// not appear anywhere in raw.line - checked by transport).
func (raw *rawResponse) parseFields(stop byte) (fields []Field, err error) {
if len(raw.tail) > 0 && raw.tail[0] == stop {
// Empty parenthesized list, BODY[] and friends, or an error
raw.tail = raw.tail[1:]
return
}
for len(raw.tail) > 0 && err == nil {
var f Field
switch raw.next() {
case QuotedString:
f, err = raw.parseQuotedString()
case LiteralString:
f, err = raw.parseLiteralString()
case List:
raw.tail = raw.tail[1:]
f, err = raw.parseFields(')')
default:
f, err = raw.parseAtom(raw.Type == Data && stop != ']')
}
if err == nil || f != nil {
fields = append(fields, f)
}
// Delimiter
if len(raw.tail) > 0 && err == nil {
switch raw.tail[0] {
case ' ':
if len(raw.tail) > 1 {
raw.tail = raw.tail[1:]
} else {
err = raw.unexpected(0)
}
case stop:
raw.tail = raw.tail[1:]
return
case '(':
// body-type-mpart is 1*body without a space in between
if len(raw.tail) == 1 {
err = raw.unexpected(0)
}
default:
err = raw.unexpected(0)
}
}
}
if stop != nul && err == nil {
err = raw.missing(stop, 0)
}
return
}
// next returns the type of the next response field. The default type is Atom,
// which includes atoms, numbers, and NILs.
func (raw *rawResponse) next() FieldType {
switch raw.tail[0] {
case '"':
return QuotedString
case '{':
return LiteralString
case '(':
return List
// RFC 5738 utf8-quoted
case '*':
if len(raw.tail) >= 2 && raw.tail[1] == '"' {
return QuotedString
}
// RFC 3516 literal8
case '~':
if len(raw.tail) >= 2 && raw.tail[1] == '{' {
return LiteralString
}
}
return Atom
}
// parseQuotedString returns the next quoted string. The string stays quoted,
// but validation is performed to ensure that subsequent calls to Unquote() are
// successful.
func (raw *rawResponse) parseQuotedString() (f Field, err error) {
start := 1
if raw.tail[0] == '*' {
start++
}
escaped := false
for n, c := range raw.tail[start:] {
if escaped {
escaped = false
} else if c == '\\' {
escaped = true
} else if c == '"' {
n += start + 1
if _, ok := UnquoteBytes(raw.tail[:n]); ok {
f = string(raw.tail[:n])
raw.tail = raw.tail[n:]
return
}
break
}
}
err = raw.error("bad quoted string", 0)
return
}
// parseLiteralString returns the next literal string. The octet count should be
// the last field in raw.tail. An additional line of text will be appended to
// raw.line and raw.tail after the literal is received.
func (raw *rawResponse) parseLiteralString() (f Field, err error) {
var info LiteralInfo
start := 1
if raw.tail[0] == '~' {
info.Bin = true
start++
}
n := len(raw.tail) - 1
if n-start < 1 || raw.tail[n] != '}' {
err = raw.unexpected(0)
return
}
oc, err := strconv.ParseUint(string(raw.tail[start:n]), 10, 32)
if err != nil {
err = raw.error("bad literal octet count", start)
return
}
info.Len = uint32(oc)
if f, err = raw.More(raw, info); err == nil {
raw.tail = raw.tail[n+1:]
}
return
}
// atomSpecials identifies ASCII characters that either may not appear in atoms
// or require special handling (ABNF: ATOM-CHAR).
var atomSpecials [char]bool
func init() {
// atom-specials + '[' to provide special handling for BODY[...]
s := []byte{'(', ')', '{', ' ', '%', '*', '"', '[', '\\', ']', '\x7F'}
for c := byte(0); c < char; c++ {
atomSpecials[c] = c < ctl || bytes.IndexByte(s, c) >= 0
}
}
// parseAtom returns the next atom, number, or NIL. The syntax rules are relaxed
// to treat sequences such as "BODY[...]<...>" as a single atom. Numbers are
// converted to uint32, NIL is converted to nil, everything else becomes a
// string. Flags (e.g. "\Seen") are converted to title case, other strings are
// left in their original form.
func (raw *rawResponse) parseAtom(astring bool) (f Field, err error) {
n, flag := 0, false
for end := len(raw.tail); n < end; n++ {
if c := raw.tail[n]; c >= char || atomSpecials[c] {
switch c {
case '\\':
if n == 0 {
flag = true
astring = false
continue // ABNF: flag (e.g. `\Seen`)
}
case '*':
if n == 1 && flag {
n++ // ABNF: flag-perm (`\*`), end of atom
}
case '[':
if n == 4 && bytes.EqualFold(raw.tail[:4], []byte("BODY")) {
pos := raw.pos()
raw.tail = raw.tail[n+1:] // Temporary shift for parseFields
// TODO: Literals between '[' and ']' are handled correctly,
// but only the octet count will make it into the returned
// atom. Would any server actually send a literal here, and
// is it a problem to discard it since the client already
// knows what was requested?
if _, err = raw.parseFields(']'); err != nil {
return
}
n = raw.pos() - pos - 1
raw.tail = raw.line[pos:] // Undo temporary shift
end = len(raw.tail)
astring = false
}
continue // ABNF: fetch-att ("BODY[...]<...>"), atom, or astring
case ']':
if astring {
continue // ABNF: ASTRING-CHAR
}
}
break // raw.tail[n] is a delimiter or an unexpected byte
}
}
// Atom must have at least one character, two if it starts with a backslash
if n < 2 && (n == 0 || flag) {
err = raw.unexpected(0)
return
}
// Take whatever was found, let parseFields report delimiter errors
atom := raw.tail[:n]
if norm := normalize(atom); flag {
f = norm
} else if norm != "NIL" {
if c := norm[0]; '0' <= c && c <= '9' {
if ui, err := strconv.ParseUint(norm, 10, 32); err == nil {
f = uint32(ui)
}
}
if f == nil {
if raw.Label == "" {
raw.Label = norm
}
f = string(atom)
}
}
raw.tail = raw.tail[n:]
return
}
// normalize returns a normalized string copy of an atom. Non-flag atoms are
// converted to upper case. Flags are converted to title case (e.g. `\Seen`).
func normalize(atom []byte) string {
norm := []byte(nil)
want := byte(0) // Want upper case
for i, c := range atom {
have := c & 0x20
if c &= 0xDF; 'A' <= c && c <= 'Z' && have != want {
norm = make([]byte, len(atom))
break
} else if i == 1 && atom[0] == '\\' {
want = 0x20 // Want lower case starting at i == 2
}
}
if norm == nil {
return string(atom) // Fast path: no changes
}
want = 0
for i, c := range atom {
if c &= 0xDF; 'A' <= c && c <= 'Z' {
norm[i] = c | want
} else {
norm[i] = atom[i]
}
if i == 1 && atom[0] == '\\' {
want = 0x20
}
}
return string(norm)
}
Better parsing
// Copyright 2013 The Go-IMAP Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imap
import (
"bytes"
"fmt"
"io"
"strconv"
)
// ParserError indicates a problem with the server response format. This could
// be the result of an unsupported extension or nonstandard server behavior.
type ParserError struct {
Info string // Short message explaining the problem
Line []byte // Full or partial response line, starting with the tag
Offset int // Parser offset, starting at 0
}
func (err *ParserError) Error() string {
if err.Line == nil {
return "imap: " + err.Info
}
line, ellipsis := err.Line, ""
if len(line) > rawLimit {
line, ellipsis = line[:rawLimit], "..."
}
return fmt.Sprintf("imap: %s at offset %d of %+q%s",
err.Info, err.Offset, line, ellipsis)
}
// readerInput is the interface for reading all parts of a response. This
// interface is implemented by transport.
type readerInput interface {
io.Reader
ReadLine() (line []byte, err error)
}
// reader creates rawResponse structs and provides additional lines and literals
// to the parser when requested.
type reader struct {
readerInput
LiteralReader
tagid []byte // Tag prefix expected in command completion responses ([A-Z]+)
order int64 // Response order counter
}
// rawResponse is an intermediate response form used to construct full Response
// objects. The struct returned by reader.Next() contains the start of the next
// response up to the first literal string, if there is one. The parser reads
// literals and additional lines as needed via reader.More(), which appends new
// bytes to line and tail. The current parser position can be calculated as
// len(raw.line) - len(raw.tail).
type rawResponse struct {
*Response
*reader
line []byte // Full response line without literals or CRLFs
tail []byte // Unconsumed line ending (parser state)
}
// newReader returns a reader configured to accept tagged responses beginning
// with tagid.
func newReader(in readerInput, lr LiteralReader, tagid string) *reader {
if in == nil || lr == nil || len(tagid) == 0 {
panic("imap: bad arguments to newReader")
}
for _, c := range tagid {
if c < 'A' || c > 'Z' {
panic("imap: bad tagid format")
}
}
return &reader{in, lr, []byte(tagid), 0}
}
// Next returns the next unparsed server response, or any data read prior to an
// error. If an error is returned and rsp != nil, the connection should be
// terminated because the client and server are no longer synchronized.
func (r *reader) Next() (raw *rawResponse, err error) {
raw = &rawResponse{reader: r}
if raw.line, err = r.ReadLine(); err != nil {
if len(raw.line) == 0 {
raw = nil
}
} else if tag := r.tag(raw.line); tag != "" {
r.order++
raw.Response = &Response{Order: r.order, Raw: raw.line, Tag: tag}
raw.tail = raw.line[len(tag)+1:]
} else {
err = &ProtocolError{"bad response tag", raw.line}
}
return
}
// More returns the next literal string and reads one more line from the server.
func (r *reader) More(raw *rawResponse, i LiteralInfo) (l Literal, err error) {
src := io.LimitedReader{R: r, N: int64(i.Len)}
if l, err = r.ReadLiteral(&src, i); l != nil {
raw.Literals = append(raw.Literals, l)
if err == nil {
var line []byte
if line, err = r.ReadLine(); len(line) > 0 { // ok if err != nil
pos := raw.pos()
raw.line = append(raw.line, line...)
raw.tail = raw.line[pos:]
raw.Raw = raw.line
}
}
} else if err == nil {
// Sanity check for user-provided ReadLiteral implementations
panic("imap: ReadLiteral returned (nil, nil)")
}
return
}
// tag verifies that line is a valid start of a new server response and returns
// the full response tag. Valid tags are "*" (untagged status/data), "+"
// (continuation request), and strings in the format "{r.tagid}[0-9]+" (command
// completion). The tag must be followed by a space.
func (r *reader) tag(line []byte) string {
if n := bytes.IndexByte(line, ' '); n == 1 {
if c := line[0]; c == '*' || c == '+' {
return string(c)
}
} else if i := len(r.tagid); i < n && bytes.Equal(line[:i], r.tagid) {
for _, c := range line[i:n] {
if c < '0' || c > '9' {
return ""
}
}
return string(line[:n])
}
return ""
}
// Error returned by parseCondition to indicate that rsp.Type != Status.
var errNotStatus error = &ParserError{Info: "not a status response"}
// Parse converts rawResponse into a full Response object by calling parseX
// methods, which gradually consume raw.tail.
func (raw *rawResponse) Parse() (rsp *Response, err error) {
if raw.Response == nil {
return nil, &ParserError{"unparsable response", raw.line, 0}
}
switch rsp = raw.Response; rsp.Tag {
case "*":
if err = raw.parseCondition(OK | NO | BAD | PREAUTH | BYE); err == nil {
rsp.Type = Status
err = raw.parseStatus()
} else if err == errNotStatus {
rsp.Type = Data
rsp.Fields, err = raw.parseFields(nul)
if len(rsp.Fields) == 0 && err == nil {
err = raw.error("empty data response", 0)
}
}
case "+":
rsp.Type = Continue
raw.parseContinue()
default:
if err = raw.parseCondition(OK | NO | BAD); err == nil {
rsp.Type = Done
err = raw.parseStatus()
} else if err == errNotStatus {
err = &ParserError{"unknown response type", raw.line, 0}
}
}
if len(raw.tail) > 0 && err == nil {
err = raw.unexpected(0)
}
raw.Response = nil
return
}
// pos returns the current parser position in raw.line.
func (raw *rawResponse) pos() int {
return len(raw.line) - len(raw.tail)
}
// error returns a ParserError to indicate a problem with the response at the
// specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) error(info string, off int) error {
return &ParserError{info, raw.line, raw.pos() + off}
}
// unexpected returns a ParserError to indicate an unexpected byte at the
// specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) unexpected(off int) error {
c := raw.line[raw.pos()+off]
return raw.error(fmt.Sprintf("unexpected %+q", c), off)
}
// missing returns a ParserError to indicate the absence of a required character
// or section at the specified offset. The offset is relative to raw.tail.
func (raw *rawResponse) missing(v interface{}, off int) error {
if _, ok := v.(byte); ok {
return raw.error(fmt.Sprintf("missing %+q", v), off)
}
return raw.error(fmt.Sprintf("missing %v", v), off)
}
// Valid status conditions.
var bStatus = []struct {
b []byte
s RespStatus
}{
{[]byte("OK"), OK},
{[]byte("NO"), NO},
{[]byte("BAD"), BAD},
{[]byte("PREAUTH"), PREAUTH},
{[]byte("BYE"), BYE},
}
// parseCondition extracts the status condition if raw is a status response
// (ABNF: resp-cond-*). errNotStatus is returned for all other response types.
func (raw *rawResponse) parseCondition(accept RespStatus) error {
outer:
for _, v := range bStatus {
if n := len(v.b); n <= len(raw.tail) {
for i, c := range v.b {
if raw.tail[i]&0xDF != c { // &0xDF converts [a-z] to upper case
continue outer
}
}
if n == len(raw.tail) {
return raw.missing("SP", n)
} else if raw.tail[n] == ' ' {
if accept&v.s == 0 {
return raw.error("unacceptable status condition", 0)
}
raw.Status = v.s
raw.tail = raw.tail[n+1:]
return nil
}
// Assume data response with a matching prefix (e.g. "* NOT STATUS")
break
}
}
return errNotStatus
}
// parseStatus extracts the optional response code and required text after the
// status condition (ABNF: resp-text).
func (raw *rawResponse) parseStatus() error {
if len(raw.tail) > 0 && raw.tail[0] == '[' {
var err error
raw.tail = raw.tail[1:]
if raw.Fields, err = raw.parseFields(']'); err != nil {
return err
} else if len(raw.Fields) == 0 {
return raw.error("empty response code", -1)
} else if len(raw.tail) == 0 {
// Some servers do not send any text after the response code
// (e.g. "* OK [UNSEEN 1]"). This is not allowed, according to RFC
// 3501 ABNF, but we accept it for compatibility with other clients.
raw.tail = nil
return nil
} else if raw.tail[0] != ' ' {
return raw.missing("SP", 0)
}
raw.tail = raw.tail[1:]
}
if len(raw.tail) == 0 {
return raw.missing("status text", 0)
}
raw.Info = string(raw.tail)
raw.tail = nil
return nil
}
// parseContinue extracts the text or Base64 data from a continuation request
// (ABNF: continue-req). Base64 data is saved in its original form to raw.Info,
// and decoded as []byte into raw.Fields[0].
func (raw *rawResponse) parseContinue() {
if n := len(raw.tail); n == 0 {
raw.Label = "BASE64"
raw.Fields = []Field{[]byte(nil)}
} else if n&3 == 0 {
if b, err := b64dec(raw.tail); err == nil {
raw.Label = "BASE64"
raw.Fields = []Field{b}
}
}
// ABNF uses resp-text, but section 7.5 states "The remainder of this
// response is a line of text." Assume that response codes are not allowed.
raw.Info = string(raw.tail)
raw.tail = nil
}
// parseFields extracts as many data fields from raw.tail as possible until it
// finds the stop byte in a delimiter position. An error is returned if the stop
// byte is not found. NUL stop causes all of raw.tail to be consumed (NUL does
// not appear anywhere in raw.line - checked by transport).
func (raw *rawResponse) parseFields(stop byte) (fields []Field, err error) {
if len(raw.tail) > 0 && raw.tail[0] == stop {
// Empty parenthesized list, BODY[] and friends, or an error
raw.tail = raw.tail[1:]
return
}
for len(raw.tail) > 0 && err == nil {
var f Field
switch raw.next() {
case QuotedString:
f, err = raw.parseQuotedString()
case LiteralString:
f, err = raw.parseLiteralString()
case List:
raw.tail = raw.tail[1:]
f, err = raw.parseFields(')')
default:
f, err = raw.parseAtom(raw.Type == Data && stop != ']')
}
if err == nil || f != nil {
fields = append(fields, f)
}
// Delimiter
if len(raw.tail) > 0 && err == nil {
switch raw.tail[0] {
case ' ':
if len(raw.tail) > 1 {
raw.tail = raw.tail[1:]
} else {
err = raw.unexpected(0)
}
case stop:
raw.tail = raw.tail[1:]
return
case '(':
// body-type-mpart is 1*body without a space in between
if len(raw.tail) == 1 {
err = raw.unexpected(0)
}
default:
err = raw.unexpected(0)
}
}
}
if stop != nul && err == nil {
err = raw.missing(stop, 0)
}
return
}
// next returns the type of the next response field. The default type is Atom,
// which includes atoms, numbers, and NILs.
func (raw *rawResponse) next() FieldType {
switch raw.tail[0] {
case '"':
return QuotedString
case '{':
return LiteralString
case '(':
return List
// RFC 5738 utf8-quoted
case '*':
if len(raw.tail) >= 2 && raw.tail[1] == '"' {
return QuotedString
}
// RFC 3516 literal8
case '~':
if len(raw.tail) >= 2 && raw.tail[1] == '{' {
return LiteralString
}
}
return Atom
}
// parseQuotedString returns the next quoted string. The string stays quoted,
// but validation is performed to ensure that subsequent calls to Unquote() are
// successful.
func (raw *rawResponse) parseQuotedString() (f Field, err error) {
start := 1
if raw.tail[0] == '*' {
start++
}
escaped := false
for n, c := range raw.tail[start:] {
if escaped {
escaped = false
} else if c == '\\' {
escaped = true
} else if c == '"' {
n += start + 1
if _, ok := UnquoteBytes(raw.tail[:n]); ok {
f = string(raw.tail[:n])
raw.tail = raw.tail[n:]
return
}
break
}
}
err = raw.error("bad quoted string", 0)
return
}
// parseLiteralString returns the next literal string. The octet count should be
// the last field in raw.tail. An additional line of text will be appended to
// raw.line and raw.tail after the literal is received.
func (raw *rawResponse) parseLiteralString() (f Field, err error) {
var info LiteralInfo
start := 1
if raw.tail[0] == '~' {
info.Bin = true
start++
}
n := len(raw.tail) - 1
if n-start < 1 || raw.tail[n] != '}' {
err = raw.unexpected(0)
return
}
oc, err := strconv.ParseUint(string(raw.tail[start:n]), 10, 32)
if err != nil {
err = raw.error("bad literal octet count", start)
return
}
info.Len = uint32(oc)
if f, err = raw.More(raw, info); err == nil {
raw.tail = raw.tail[n+1:]
}
return
}
// atomSpecials identifies ASCII characters that either may not appear in atoms
// or require special handling (ABNF: ATOM-CHAR).
var atomSpecials [char]bool
func init() {
// atom-specials + '[' to provide special handling for BODY[...]
s := []byte{'(', ')', '{', ' ', '%', '*', '"', '[', '\\', ']', '\x7F'}
for c := byte(0); c < char; c++ {
atomSpecials[c] = c < ctl || bytes.IndexByte(s, c) >= 0
}
}
// parseAtom returns the next atom, number, or NIL. The syntax rules are relaxed
// to treat sequences such as "BODY[...]<...>" as a single atom. Numbers are
// converted to uint32, NIL is converted to nil, everything else becomes a
// string. Flags (e.g. "\Seen") are converted to title case, other strings are
// left in their original form.
func (raw *rawResponse) parseAtom(astring bool) (f Field, err error) {
n, flag := 0, false
for end := len(raw.tail); n < end; n++ {
if c := raw.tail[n]; c >= char || atomSpecials[c] {
switch c {
case '\\':
if n == 0 {
flag = true
astring = false
continue // ABNF: flag (e.g. `\Seen`)
}
case '*':
if n == 1 && flag {
n++ // ABNF: flag-perm (`\*`), end of atom
}
case '[':
if n == 4 && bytes.EqualFold(raw.tail[:4], []byte("BODY")) {
pos := raw.pos()
raw.tail = raw.tail[n+1:] // Temporary shift for parseFields
// TODO: Literals between '[' and ']' are handled correctly,
// but only the octet count will make it into the returned
// atom. Would any server actually send a literal here, and
// is it a problem to discard it since the client already
// knows what was requested?
if _, err = raw.parseFields(']'); err != nil {
return
}
n = raw.pos() - pos - 1
raw.tail = raw.line[pos:] // Undo temporary shift
end = len(raw.tail)
astring = false
}
continue // ABNF: fetch-att ("BODY[...]<...>"), atom, or astring
case ']':
if astring {
continue // ABNF: ASTRING-CHAR
}
}
break // raw.tail[n] is a delimiter or an unexpected byte
}
}
// Atom must have at least one character, two if it starts with a backslash
if n < 2 && (n == 0 || flag) {
err = raw.unexpected(0)
return
}
// Take whatever was found, let parseFields report delimiter errors
atom := raw.tail[:n]
if norm := normalize(atom); flag {
f = norm
} else if norm != "NIL" {
if c := norm[0]; '0' <= c && c <= '9' {
if ui, err := strconv.ParseUint(norm, 10, 32); err == nil {
f = uint32(ui)
}
}
if f == nil {
if raw.Label == "" {
raw.Label = norm
}
f = string(atom)
}
}
raw.tail = raw.tail[n:]
return
}
// normalize returns a normalized string copy of an atom. Non-flag atoms are
// converted to upper case. Flags are converted to title case (e.g. `\Seen`).
func normalize(atom []byte) string {
norm := []byte(nil)
want := byte(0) // Want upper case
for i, c := range atom {
have := c & 0x20
if c &= 0xDF; 'A' <= c && c <= 'Z' && have != want {
norm = atom
break
} else if i == 1 && atom[0] == '\\' {
want = 0x20 // Want lower case starting at i == 2
}
}
if norm == nil {
return string(atom) // Fast path: no changes
}
want = 0
for i, c := range atom {
if c &= 0xDF; 'A' <= c && c <= 'Z' {
norm[i] = c | want
} else {
norm[i] = atom[i]
}
if i == 1 && atom[0] == '\\' {
want = 0x20
}
}
return string(norm)
}
|
// http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"log"
"math/big"
"net"
"os"
"os/user"
"path"
"time"
)
// CertInfo is the representation of a Certificate in the API.
type CertInfo struct {
Certificate string `json:"certificate"`
Fingerprint string `json:"fingerprint"`
Type string `json:"type"`
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h}
ifs, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, iface := range ifs {
if IsLoopback(&iface) {
continue
}
addrs, err := iface.Addrs()
if err != nil {
return nil, err
}
for _, addr := range addrs {
ret = append(ret, addr.String())
}
}
return ret, nil
}
func FindOrGenCert(certf string, keyf string) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file
func GenCert(certf string, keyf string) error {
/* Create the basenames if needed */
dir := path.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
dir = path.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert()
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
log.Fatalf("failed to open %s for writing: %s", certf, err)
return err
}
certOut.Write(certBytes)
certOut.Close()
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Printf("failed to open %s for writing: %s", keyf, err)
return err
}
keyOut.Write(keyBytes)
keyOut.Close()
return nil
}
// GenerateMemCert creates a certificate and key pair, returning them as byte
// arrays in memory.
func GenerateMemCert() ([]byte, []byte, error) {
privk, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
log.Fatalf("failed to generate key")
return nil, nil, err
}
hosts, err := mynames()
if err != nil {
log.Fatalf("Failed to get my hostname")
return nil, nil, err
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("failed to generate serial number: %s", err)
return nil, nil, err
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
}
} else {
username = "UNKNOWN"
}
hostname, err := os.Hostname()
if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"linuxcontainers.org"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
log.Fatalf("Failed to create certificate: %s", err)
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privk)})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
return x509.ParseCertificate(certBlock.Bytes)
}
Detect invalid certificate files
Closes #1977
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
// http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"log"
"math/big"
"net"
"os"
"os/user"
"path"
"time"
)
// CertInfo is the representation of a Certificate in the API.
type CertInfo struct {
Certificate string `json:"certificate"`
Fingerprint string `json:"fingerprint"`
Type string `json:"type"`
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h}
ifs, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, iface := range ifs {
if IsLoopback(&iface) {
continue
}
addrs, err := iface.Addrs()
if err != nil {
return nil, err
}
for _, addr := range addrs {
ret = append(ret, addr.String())
}
}
return ret, nil
}
func FindOrGenCert(certf string, keyf string) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file
func GenCert(certf string, keyf string) error {
/* Create the basenames if needed */
dir := path.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
dir = path.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert()
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
log.Fatalf("failed to open %s for writing: %s", certf, err)
return err
}
certOut.Write(certBytes)
certOut.Close()
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Printf("failed to open %s for writing: %s", keyf, err)
return err
}
keyOut.Write(keyBytes)
keyOut.Close()
return nil
}
// GenerateMemCert creates a certificate and key pair, returning them as byte
// arrays in memory.
func GenerateMemCert() ([]byte, []byte, error) {
privk, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
log.Fatalf("failed to generate key")
return nil, nil, err
}
hosts, err := mynames()
if err != nil {
log.Fatalf("Failed to get my hostname")
return nil, nil, err
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("failed to generate serial number: %s", err)
return nil, nil, err
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
}
} else {
username = "UNKNOWN"
}
hostname, err := os.Hostname()
if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"linuxcontainers.org"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
log.Fatalf("Failed to create certificate: %s", err)
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privk)})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate file")
}
return x509.ParseCertificate(certBlock.Bytes)
}
|
package main
import (
"errors"
"io/ioutil"
"net/http"
"runtime"
"strconv"
"strings"
"github.com/inconshreveable/go-update"
)
const VERSION = "0.1.0"
// Updates work like this: each version is stored in a folder on a Linode
// server operated by the developers. The most recent version is stored in
// current/. The folder contains the files changed by the update, as well as a
// MANIFEST file that contains the version number and a file listing. To check
// for an update, we first read the version number from current/MANIFEST. If
// the version is newer, we download and apply the files listed in the update
// manifest.
var updateURL = "http://23.239.14.98/releases/" + runtime.GOOS + "_" + runtime.GOARCH
// returns true if version is "greater than" VERSION.
func newerVersion(version string) bool {
// super naive; assumes same number of .s
// TODO: make this more robust... if it's worth the effort.
nums := strings.Split(version, ".")
NUMS := strings.Split(VERSION, ".")
for i := range nums {
// inputs are trusted, so no need to check the error
ni, _ := strconv.Atoi(nums[i])
Ni, _ := strconv.Atoi(NUMS[i])
if ni != Ni {
return ni > Ni
}
}
// versions are equal
return false
}
// helper function that requests and parses the update manifest.
// It returns the manifest (if available) as a slice of lines.
func fetchManifest(version string) (lines []string, err error) {
resp, err := http.Get(updateURL + "/" + version + "/MANIFEST")
if err != nil {
return
}
defer resp.Body.Close()
manifest, _ := ioutil.ReadAll(resp.Body)
lines = strings.Split(strings.TrimSpace(string(manifest)), "\n")
if len(lines) == 0 {
err = errors.New("could not parse MANIFEST file")
}
return
}
// checkForUpdate checks a centralized server for a more recent version of
// Sia. If an update is available, it returns true, along with the newer
// version.
func checkForUpdate() (bool, string, error) {
manifest, err := fetchManifest("current")
if err != nil {
return false, "", err
}
version := manifest[0]
return newerVersion(version), version, nil
}
// applyUpdate downloads and applies an update.
//
// TODO: lots of room for improvement here.
// - binary diffs
// - signed updates
// - zipped updates
func applyUpdate(version string) (err error) {
manifest, err := fetchManifest(version)
if err != nil {
return
}
for _, file := range manifest[1:] {
err, _ = update.New().Target(file).FromUrl(updateURL + "/" + version + "/" + file)
if err != nil {
// TODO: revert prior successful updates?
return
}
}
// the binary must always be updated, because if nothing else, the version
// number has to be bumped.
// TODO: should it be siad.exe on Windows?
err, _ = update.New().FromUrl(updateURL + "/" + version + "/siad")
if err != nil {
return
}
return
}
bump version number
package main
import (
"errors"
"io/ioutil"
"net/http"
"runtime"
"strconv"
"strings"
"github.com/inconshreveable/go-update"
)
const VERSION = "0.2.0"
// Updates work like this: each version is stored in a folder on a Linode
// server operated by the developers. The most recent version is stored in
// current/. The folder contains the files changed by the update, as well as a
// MANIFEST file that contains the version number and a file listing. To check
// for an update, we first read the version number from current/MANIFEST. If
// the version is newer, we download and apply the files listed in the update
// manifest.
var updateURL = "http://23.239.14.98/releases/" + runtime.GOOS + "_" + runtime.GOARCH
// returns true if version is "greater than" VERSION.
func newerVersion(version string) bool {
// super naive; assumes same number of .s
// TODO: make this more robust... if it's worth the effort.
nums := strings.Split(version, ".")
NUMS := strings.Split(VERSION, ".")
for i := range nums {
// inputs are trusted, so no need to check the error
ni, _ := strconv.Atoi(nums[i])
Ni, _ := strconv.Atoi(NUMS[i])
if ni != Ni {
return ni > Ni
}
}
// versions are equal
return false
}
// helper function that requests and parses the update manifest.
// It returns the manifest (if available) as a slice of lines.
func fetchManifest(version string) (lines []string, err error) {
resp, err := http.Get(updateURL + "/" + version + "/MANIFEST")
if err != nil {
return
}
defer resp.Body.Close()
manifest, _ := ioutil.ReadAll(resp.Body)
lines = strings.Split(strings.TrimSpace(string(manifest)), "\n")
if len(lines) == 0 {
err = errors.New("could not parse MANIFEST file")
}
return
}
// checkForUpdate checks a centralized server for a more recent version of
// Sia. If an update is available, it returns true, along with the newer
// version.
func checkForUpdate() (bool, string, error) {
manifest, err := fetchManifest("current")
if err != nil {
return false, "", err
}
version := manifest[0]
return newerVersion(version), version, nil
}
// applyUpdate downloads and applies an update.
//
// TODO: lots of room for improvement here.
// - binary diffs
// - signed updates
// - zipped updates
func applyUpdate(version string) (err error) {
manifest, err := fetchManifest(version)
if err != nil {
return
}
for _, file := range manifest[1:] {
err, _ = update.New().Target(file).FromUrl(updateURL + "/" + version + "/" + file)
if err != nil {
// TODO: revert prior successful updates?
return
}
}
// the binary must always be updated, because if nothing else, the version
// number has to be bumped.
// TODO: should it be siad.exe on Windows?
err, _ = update.New().FromUrl(updateURL + "/" + version + "/siad")
if err != nil {
return
}
return
}
|
package simra
import (
"github.com/pankona/gomo-simra/simra/fps"
"github.com/pankona/gomo-simra/simra/internal/peer"
)
// Simraer represents an interface of simra instance
type Simraer interface {
// Start needs to call to enable all function belong to simra package.
Start(onStart, onStop func())
// SetScene sets a driver as a scene.
// If a driver is already set, it is replaced with new one.
SetScene(driver Driver)
// NewSprite returns an instance of Spriter
NewSprite() Spriter
// AddSprite adds a sprite to current scene with empty texture.
AddSprite(s Spriter)
// RemoveSprite removes specified sprite from current scene.
// Removed sprite will be disappeared.
RemoveSprite(s Spriter)
// SetDesiredScreenSize configures virtual screen size.
// This function must be called at least once before calling Start.
SetDesiredScreenSize(w, h float32)
// AddTouchListener registers a listener for notifying touch event.
// Event is notified when "screen" is touched.
AddTouchListener(listener peer.TouchListener)
// RemoveTouchListener unregisters a listener for notifying touch event.
RemoveTouchListener(listener peer.TouchListener)
// AddCollisionListener add a callback function that is called on
// collision is detected between c1 and c2.
AddCollisionListener(c1, c2 Collider, listener CollisionListener)
// RemoveAllCollisionListener removes all registered listeners
RemoveAllCollisionListener()
}
type collisionMap struct {
c1 Collider
c2 Collider
listener CollisionListener
}
// Simra is a struct that provides API interface of simra
type simra struct {
driver Driver
comap []*collisionMap
gl peer.GLer
spritecontainer peer.SpriteContainerer
onStart func()
onStop func()
}
// TODO: don't declare as package global.
var sim = &simra{
comap: make([]*collisionMap, 0),
}
// GetInstance returns instance of Simra.
// It is necessary to call this function to get Simra instance
// since Simra is single instance.
func GetInstance() Simraer {
return sim
}
type point struct {
x, y int
}
func (sim *simra) onUpdate() {
if sim.driver != nil {
sim.driver.Drive()
}
sim.collisionCheckAndNotify()
sim.gl.Update(sim.spritecontainer)
}
func (sim *simra) onStopped() {
peer.LogDebug("IN")
sim.driver = nil
sim.gl.Finalize()
peer.LogDebug("OUT")
}
func (sim *simra) onGomoStart(glc *peer.GLContext) {
sim.gl.Initialize(glc)
sim.onStart()
}
func (sim *simra) onGomoStop() {
sim.spritecontainer.Initialize(sim.gl)
sim.gl.Finalize()
sim.onStop()
}
// Start needs to call to enable all function belong to simra package.
func (sim *simra) Start(onStart, onStop func()) {
peer.LogDebug("IN")
gl := peer.NewGLPeer()
sc := peer.GetSpriteContainer()
sc.Initialize(gl)
sim.gl = gl
sim.spritecontainer = sc
sim.onStart = onStart
sim.onStop = onStop
gomo := peer.GetGomo()
gomo.Initialize(sim.onGomoStart, sim.onGomoStop, sim.onUpdate)
gomo.Start()
peer.LogDebug("OUT")
}
// SetScene sets a driver as a scene.
// If a driver is already set, it is replaced with new one.
func (sim *simra) SetScene(driver Driver) {
peer.LogDebug("IN")
sim.spritecontainer.RemoveSprites()
sim.gl.Reset()
sim.spritecontainer.Initialize(sim.gl)
peer.GetTouchPeer().RemoveAllTouchListeners()
sim.spritecontainer.RemoveSprites()
sim.driver = driver
sim.spritecontainer.Initialize(sim.gl)
sim.spritecontainer.AddSprite(&peer.Sprite{}, nil, fps.Progress)
driver.Initialize()
peer.LogDebug("OUT")
}
// NewSprite returns an instance of Sprite
func (sim *simra) NewSprite() Spriter {
return &sprite{
simra: sim,
animationSets: map[string]*AnimationSet{},
}
}
// AddSprite adds a sprite to current scene with empty texture.
func (sim *simra) AddSprite(s Spriter) {
sp := s.(*sprite)
sim.spritecontainer.AddSprite(&sp.Sprite, nil, nil)
}
// RemoveSprite removes specified sprite from current scene.
// Removed sprite will be disappeared.
func (sim *simra) RemoveSprite(s Spriter) {
sp := s.(*sprite)
sp.texture = nil
sim.spritecontainer.RemoveSprite(&sp.Sprite)
}
// SetDesiredScreenSize configures virtual screen size.
// This function must be called at least once before calling Start.
func (sim *simra) SetDesiredScreenSize(w, h float32) {
ss := peer.GetScreenSizePeer()
ss.SetDesiredScreenSize(w, h)
}
// AddTouchListener registers a listener for notifying touch event.
// Event is notified when "screen" is touched.
func (sim *simra) AddTouchListener(listener peer.TouchListener) {
peer.GetTouchPeer().AddTouchListener(listener)
}
// RemoveTouchListener unregisters a listener for notifying touch event.
func (sim *simra) RemoveTouchListener(listener peer.TouchListener) {
peer.GetTouchPeer().RemoveTouchListener(listener)
}
// AddCollisionListener add a callback function that is called on
// collision is detected between c1 and c2.
func (sim *simra) AddCollisionListener(c1, c2 Collider, listener CollisionListener) {
// TODO: exclusive controll
LogDebug("IN")
sim.comap = append(sim.comap, &collisionMap{c1, c2, listener})
LogDebug("OUT")
}
func (sim *simra) removeCollisionMap(c *collisionMap) {
result := []*collisionMap{}
for _, v := range sim.comap {
if c.c1 != v.c1 && c.c2 != v.c2 && v != c {
result = append(result, v)
}
}
sim.comap = result
}
// RemoveAllCollisionListener removes all registered listeners
func (sim *simra) RemoveAllCollisionListener() {
LogDebug("IN")
sim.comap = nil
LogDebug("OUT")
}
func (sim *simra) collisionCheckAndNotify() {
//LogDebug("IN")
// check collision
for _, v := range sim.comap {
// TODO: refactor around here...
x1, y1, w1, h1 := v.c1.GetXYWH()
x2, y2, w2, h2 := v.c2.GetXYWH()
p1 := &point{x1 - w1/2, y1 + h1/2}
p2 := &point{x1 + w1/2, y1 + h1/2}
p3 := &point{x1 - w1/2, y1 - h1/2}
p4 := &point{x1 + w1/2, y1 - h1/2}
if p1.x >= (x2-w2/2) && p1.x <= (x2+w2/2) &&
p1.y >= (y2-h2/2) && p1.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p2.x >= (x2-w2/2) && p2.x <= (x2+w2/2) &&
p2.y >= (y2-h2/2) && p2.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p3.x >= (x2-w2/2) && p3.x <= (x2+w2/2) &&
p3.y >= (y2-h2/2) && p3.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p4.x >= (x2-w2/2) && p4.x <= (x2+w2/2) &&
p4.y >= (y2-h2/2) && p4.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
}
//LogDebug("OUT")
}
// RemoveCollisionListener removes a collision map by specified collider instance.
func (sim *simra) RemoveCollisionListener(c1, c2 Collider) {
// TODO: exclusive controll
LogDebug("IN")
sim.removeCollisionMap(&collisionMap{c1, c2, nil})
LogDebug("OUT")
}
func (sim *simra) comapLength() int {
return len(sim.comap)
}
// LogDebug prints logs.
// From simra, just call peer.LogDebug.
// This is disabled at Release Build.
func LogDebug(format string, a ...interface{}) {
peer.LogDebug(format, a...)
}
// LogError prints logs.
// From simra, just call peer.LogError.
// This is never disabled even for Release build.
func LogError(format string, a ...interface{}) {
peer.LogError(format, a...)
}
add NewSimra function to treat simra as non single instance object
package simra
import (
"github.com/pankona/gomo-simra/simra/fps"
"github.com/pankona/gomo-simra/simra/internal/peer"
)
// Simraer represents an interface of simra instance
type Simraer interface {
// Start needs to call to enable all function belong to simra package.
Start(onStart, onStop func())
// SetScene sets a driver as a scene.
// If a driver is already set, it is replaced with new one.
SetScene(driver Driver)
// NewSprite returns an instance of Spriter
NewSprite() Spriter
// AddSprite adds a sprite to current scene with empty texture.
AddSprite(s Spriter)
// RemoveSprite removes specified sprite from current scene.
// Removed sprite will be disappeared.
RemoveSprite(s Spriter)
// SetDesiredScreenSize configures virtual screen size.
// This function must be called at least once before calling Start.
SetDesiredScreenSize(w, h float32)
// AddTouchListener registers a listener for notifying touch event.
// Event is notified when "screen" is touched.
AddTouchListener(listener peer.TouchListener)
// RemoveTouchListener unregisters a listener for notifying touch event.
RemoveTouchListener(listener peer.TouchListener)
// AddCollisionListener add a callback function that is called on
// collision is detected between c1 and c2.
AddCollisionListener(c1, c2 Collider, listener CollisionListener)
// RemoveAllCollisionListener removes all registered listeners
RemoveAllCollisionListener()
}
type collisionMap struct {
c1 Collider
c2 Collider
listener CollisionListener
}
// Simra is a struct that provides API interface of simra
type simra struct {
driver Driver
comap []*collisionMap
gl peer.GLer
spritecontainer peer.SpriteContainerer
onStart func()
onStop func()
}
// TODO: don't declare as package global.
var sim = &simra{
comap: make([]*collisionMap, 0),
}
// GetInstance returns instance of Simra.
// It is necessary to call this function to get Simra instance
// since Simra is single instance.
func GetInstance() Simraer {
return sim
}
// NewSimra returns an instance of Simraer
func NewSimra() Simraer {
return &simra{
comap: make([]*collisionMap, 0),
}
}
type point struct {
x, y int
}
func (sim *simra) onUpdate() {
if sim.driver != nil {
sim.driver.Drive()
}
sim.collisionCheckAndNotify()
sim.gl.Update(sim.spritecontainer)
}
func (sim *simra) onStopped() {
peer.LogDebug("IN")
sim.driver = nil
sim.gl.Finalize()
peer.LogDebug("OUT")
}
func (sim *simra) onGomoStart(glc *peer.GLContext) {
sim.gl.Initialize(glc)
sim.onStart()
}
func (sim *simra) onGomoStop() {
sim.spritecontainer.Initialize(sim.gl)
sim.gl.Finalize()
sim.onStop()
}
// Start needs to call to enable all function belong to simra package.
func (sim *simra) Start(onStart, onStop func()) {
peer.LogDebug("IN")
gl := peer.NewGLPeer()
sc := peer.GetSpriteContainer()
sc.Initialize(gl)
sim.gl = gl
sim.spritecontainer = sc
sim.onStart = onStart
sim.onStop = onStop
gomo := peer.GetGomo()
gomo.Initialize(sim.onGomoStart, sim.onGomoStop, sim.onUpdate)
gomo.Start()
peer.LogDebug("OUT")
}
// SetScene sets a driver as a scene.
// If a driver is already set, it is replaced with new one.
func (sim *simra) SetScene(driver Driver) {
peer.LogDebug("IN")
sim.spritecontainer.RemoveSprites()
sim.gl.Reset()
sim.spritecontainer.Initialize(sim.gl)
peer.GetTouchPeer().RemoveAllTouchListeners()
sim.spritecontainer.RemoveSprites()
sim.driver = driver
sim.spritecontainer.Initialize(sim.gl)
sim.spritecontainer.AddSprite(&peer.Sprite{}, nil, fps.Progress)
driver.Initialize()
peer.LogDebug("OUT")
}
// NewSprite returns an instance of Sprite
func (sim *simra) NewSprite() Spriter {
return &sprite{
simra: sim,
animationSets: map[string]*AnimationSet{},
}
}
// AddSprite adds a sprite to current scene with empty texture.
func (sim *simra) AddSprite(s Spriter) {
sp := s.(*sprite)
sim.spritecontainer.AddSprite(&sp.Sprite, nil, nil)
}
// RemoveSprite removes specified sprite from current scene.
// Removed sprite will be disappeared.
func (sim *simra) RemoveSprite(s Spriter) {
sp := s.(*sprite)
sp.texture = nil
sim.spritecontainer.RemoveSprite(&sp.Sprite)
}
// SetDesiredScreenSize configures virtual screen size.
// This function must be called at least once before calling Start.
func (sim *simra) SetDesiredScreenSize(w, h float32) {
ss := peer.GetScreenSizePeer()
ss.SetDesiredScreenSize(w, h)
}
// AddTouchListener registers a listener for notifying touch event.
// Event is notified when "screen" is touched.
func (sim *simra) AddTouchListener(listener peer.TouchListener) {
peer.GetTouchPeer().AddTouchListener(listener)
}
// RemoveTouchListener unregisters a listener for notifying touch event.
func (sim *simra) RemoveTouchListener(listener peer.TouchListener) {
peer.GetTouchPeer().RemoveTouchListener(listener)
}
// AddCollisionListener add a callback function that is called on
// collision is detected between c1 and c2.
func (sim *simra) AddCollisionListener(c1, c2 Collider, listener CollisionListener) {
// TODO: exclusive controll
LogDebug("IN")
sim.comap = append(sim.comap, &collisionMap{c1, c2, listener})
LogDebug("OUT")
}
func (sim *simra) removeCollisionMap(c *collisionMap) {
result := []*collisionMap{}
for _, v := range sim.comap {
if c.c1 != v.c1 && c.c2 != v.c2 && v != c {
result = append(result, v)
}
}
sim.comap = result
}
// RemoveAllCollisionListener removes all registered listeners
func (sim *simra) RemoveAllCollisionListener() {
LogDebug("IN")
sim.comap = nil
LogDebug("OUT")
}
func (sim *simra) collisionCheckAndNotify() {
//LogDebug("IN")
// check collision
for _, v := range sim.comap {
// TODO: refactor around here...
x1, y1, w1, h1 := v.c1.GetXYWH()
x2, y2, w2, h2 := v.c2.GetXYWH()
p1 := &point{x1 - w1/2, y1 + h1/2}
p2 := &point{x1 + w1/2, y1 + h1/2}
p3 := &point{x1 - w1/2, y1 - h1/2}
p4 := &point{x1 + w1/2, y1 - h1/2}
if p1.x >= (x2-w2/2) && p1.x <= (x2+w2/2) &&
p1.y >= (y2-h2/2) && p1.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p2.x >= (x2-w2/2) && p2.x <= (x2+w2/2) &&
p2.y >= (y2-h2/2) && p2.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p3.x >= (x2-w2/2) && p3.x <= (x2+w2/2) &&
p3.y >= (y2-h2/2) && p3.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
if p4.x >= (x2-w2/2) && p4.x <= (x2+w2/2) &&
p4.y >= (y2-h2/2) && p4.y <= (y2+h2/2) {
v.listener.OnCollision(v.c1, v.c2)
return
}
}
//LogDebug("OUT")
}
// RemoveCollisionListener removes a collision map by specified collider instance.
func (sim *simra) RemoveCollisionListener(c1, c2 Collider) {
// TODO: exclusive controll
LogDebug("IN")
sim.removeCollisionMap(&collisionMap{c1, c2, nil})
LogDebug("OUT")
}
func (sim *simra) comapLength() int {
return len(sim.comap)
}
// LogDebug prints logs.
// From simra, just call peer.LogDebug.
// This is disabled at Release Build.
func LogDebug(format string, a ...interface{}) {
peer.LogDebug(format, a...)
}
// LogError prints logs.
// From simra, just call peer.LogError.
// This is never disabled even for Release build.
func LogError(format string, a ...interface{}) {
peer.LogError(format, a...)
}
|
package chardet
// Recognizer for single byte charset family
type recognizerSingleByte struct {
charset string
hasC1ByteCharset string
language string
charMap *[256]byte
ngram *[64]uint32
}
func (r *recognizerSingleByte) Match(input *recognizerInput) recognizerOutput {
var charset string = r.charset
if input.hasC1Bytes && len(r.hasC1ByteCharset) > 0 {
charset = r.hasC1ByteCharset
}
return recognizerOutput{
Charset: charset,
Language: r.language,
Confidence: r.parseNgram(input.input),
}
}
type ngramState struct {
ngram uint32
ignoreSpace bool
ngramCount, ngramHit uint32
table *[64]uint32
}
func newNgramState(table *[64]uint32) *ngramState {
return &ngramState{
ngram: 0,
ignoreSpace: false,
ngramCount: 0,
ngramHit: 0,
table: table,
}
}
func (s *ngramState) AddByte(b byte) {
const ngramMask = 0xFFFFFF
if !(b == 0x20 && s.ignoreSpace) {
s.ngram = (s.ngram << 8) | uint32(b)&ngramMask
s.ignoreSpace = (s.ngram == 0x20)
s.ngramCount++
if s.lookup() {
s.ngramHit++
}
}
s.ignoreSpace = (b == 0x20)
}
func (s *ngramState) HitRate() float32 {
if s.ngramCount == 0 {
return 0
}
return float32(s.ngramHit) / float32(s.ngramCount)
}
func (s *ngramState) lookup() bool {
var index int
if s.table[index+32] <= s.ngram {
index += 32
}
if s.table[index+16] <= s.ngram {
index += 16
}
if s.table[index+8] <= s.ngram {
index += 8
}
if s.table[index+4] <= s.ngram {
index += 4
}
if s.table[index+2] <= s.ngram {
index += 2
}
if s.table[index+1] <= s.ngram {
index += 1
}
if s.table[index] > s.ngram {
index -= 1
}
if index < 0 || s.table[index] != s.ngram {
return false
}
return true
}
func (r *recognizerSingleByte) parseNgram(input []byte) int {
state := newNgramState(r.ngram)
for _, inChar := range input {
c := r.charMap[inChar]
if c != 0 {
state.AddByte(c)
}
}
state.AddByte(0x20)
rate := state.HitRate()
if rate > 0.33 {
return 98
}
return int(rate * 300)
}
var charMap_8859_1 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_8859_1_en = [64]uint32{
0x206120, 0x20616E, 0x206265, 0x20636F, 0x20666F, 0x206861, 0x206865, 0x20696E, 0x206D61, 0x206F66, 0x207072, 0x207265, 0x207361, 0x207374, 0x207468, 0x20746F,
0x207768, 0x616964, 0x616C20, 0x616E20, 0x616E64, 0x617320, 0x617420, 0x617465, 0x617469, 0x642061, 0x642074, 0x652061, 0x652073, 0x652074, 0x656420, 0x656E74,
0x657220, 0x657320, 0x666F72, 0x686174, 0x686520, 0x686572, 0x696420, 0x696E20, 0x696E67, 0x696F6E, 0x697320, 0x6E2061, 0x6E2074, 0x6E6420, 0x6E6720, 0x6E7420,
0x6F6620, 0x6F6E20, 0x6F7220, 0x726520, 0x727320, 0x732061, 0x732074, 0x736169, 0x737420, 0x742074, 0x746572, 0x746861, 0x746865, 0x74696F, 0x746F20, 0x747320,
}
var ngrams_8859_1_da = [64]uint32{
0x206166, 0x206174, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207369, 0x207374, 0x207469, 0x207669, 0x616620,
0x616E20, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646572, 0x646574, 0x652073, 0x656420, 0x656465, 0x656E20, 0x656E64, 0x657220, 0x657265, 0x657320,
0x657420, 0x666F72, 0x676520, 0x67656E, 0x676572, 0x696765, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6572, 0x6C6967, 0x6C6C65, 0x6D6564, 0x6E6465, 0x6E6520,
0x6E6720, 0x6E6765, 0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722064, 0x722065, 0x722073, 0x726520, 0x737465, 0x742073, 0x746520, 0x746572, 0x74696C, 0x766572,
}
var ngrams_8859_1_de = [64]uint32{
0x20616E, 0x206175, 0x206265, 0x206461, 0x206465, 0x206469, 0x206569, 0x206765, 0x206861, 0x20696E, 0x206D69, 0x207363, 0x207365, 0x20756E, 0x207665, 0x20766F,
0x207765, 0x207A75, 0x626572, 0x636820, 0x636865, 0x636874, 0x646173, 0x64656E, 0x646572, 0x646965, 0x652064, 0x652073, 0x65696E, 0x656974, 0x656E20, 0x657220,
0x657320, 0x67656E, 0x68656E, 0x687420, 0x696368, 0x696520, 0x696E20, 0x696E65, 0x697420, 0x6C6963, 0x6C6C65, 0x6E2061, 0x6E2064, 0x6E2073, 0x6E6420, 0x6E6465,
0x6E6520, 0x6E6720, 0x6E6765, 0x6E7465, 0x722064, 0x726465, 0x726569, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x756E64, 0x756E67, 0x766572,
}
var ngrams_8859_1_es = [64]uint32{
0x206120, 0x206361, 0x20636F, 0x206465, 0x20656C, 0x20656E, 0x206573, 0x20696E, 0x206C61, 0x206C6F, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
0x20756E, 0x207920, 0x612063, 0x612064, 0x612065, 0x61206C, 0x612070, 0x616369, 0x61646F, 0x616C20, 0x617220, 0x617320, 0x6369F3, 0x636F6E, 0x646520, 0x64656C,
0x646F20, 0x652064, 0x652065, 0x65206C, 0x656C20, 0x656E20, 0x656E74, 0x657320, 0x657374, 0x69656E, 0x69F36E, 0x6C6120, 0x6C6F73, 0x6E2065, 0x6E7465, 0x6F2064,
0x6F2065, 0x6F6E20, 0x6F7220, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732064, 0x732065, 0x732070, 0x736520, 0x746520, 0x746F20, 0x756520, 0xF36E20,
}
var ngrams_8859_1_fr = [64]uint32{
0x206175, 0x20636F, 0x206461, 0x206465, 0x206475, 0x20656E, 0x206574, 0x206C61, 0x206C65, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207365, 0x20736F, 0x20756E,
0x20E020, 0x616E74, 0x617469, 0x636520, 0x636F6E, 0x646520, 0x646573, 0x647520, 0x652061, 0x652063, 0x652064, 0x652065, 0x65206C, 0x652070, 0x652073, 0x656E20,
0x656E74, 0x657220, 0x657320, 0x657420, 0x657572, 0x696F6E, 0x697320, 0x697420, 0x6C6120, 0x6C6520, 0x6C6573, 0x6D656E, 0x6E2064, 0x6E6520, 0x6E7320, 0x6E7420,
0x6F6E20, 0x6F6E74, 0x6F7572, 0x717565, 0x72206C, 0x726520, 0x732061, 0x732064, 0x732065, 0x73206C, 0x732070, 0x742064, 0x746520, 0x74696F, 0x756520, 0x757220,
}
var ngrams_8859_1_it = [64]uint32{
0x20616C, 0x206368, 0x20636F, 0x206465, 0x206469, 0x206520, 0x20696C, 0x20696E, 0x206C61, 0x207065, 0x207072, 0x20756E, 0x612063, 0x612064, 0x612070, 0x612073,
0x61746F, 0x636865, 0x636F6E, 0x64656C, 0x646920, 0x652061, 0x652063, 0x652064, 0x652069, 0x65206C, 0x652070, 0x652073, 0x656C20, 0x656C6C, 0x656E74, 0x657220,
0x686520, 0x692061, 0x692063, 0x692064, 0x692073, 0x696120, 0x696C20, 0x696E20, 0x696F6E, 0x6C6120, 0x6C6520, 0x6C6920, 0x6C6C61, 0x6E6520, 0x6E6920, 0x6E6F20,
0x6E7465, 0x6F2061, 0x6F2064, 0x6F2069, 0x6F2073, 0x6F6E20, 0x6F6E65, 0x706572, 0x726120, 0x726520, 0x736920, 0x746120, 0x746520, 0x746920, 0x746F20, 0x7A696F,
}
var ngrams_8859_1_nl = [64]uint32{
0x20616C, 0x206265, 0x206461, 0x206465, 0x206469, 0x206565, 0x20656E, 0x206765, 0x206865, 0x20696E, 0x206D61, 0x206D65, 0x206F70, 0x207465, 0x207661, 0x207665,
0x20766F, 0x207765, 0x207A69, 0x61616E, 0x616172, 0x616E20, 0x616E64, 0x617220, 0x617420, 0x636874, 0x646520, 0x64656E, 0x646572, 0x652062, 0x652076, 0x65656E,
0x656572, 0x656E20, 0x657220, 0x657273, 0x657420, 0x67656E, 0x686574, 0x696520, 0x696E20, 0x696E67, 0x697320, 0x6E2062, 0x6E2064, 0x6E2065, 0x6E2068, 0x6E206F,
0x6E2076, 0x6E6465, 0x6E6720, 0x6F6E64, 0x6F6F72, 0x6F7020, 0x6F7220, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x76616E, 0x766572, 0x766F6F,
}
var ngrams_8859_1_no = [64]uint32{
0x206174, 0x206176, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207365, 0x20736B, 0x20736F, 0x207374, 0x207469,
0x207669, 0x20E520, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646574, 0x652073, 0x656420, 0x656E20, 0x656E65, 0x657220, 0x657265, 0x657420, 0x657474,
0x666F72, 0x67656E, 0x696B6B, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6520, 0x6C6C65, 0x6D6564, 0x6D656E, 0x6E2073, 0x6E6520, 0x6E6720, 0x6E6765, 0x6E6E65,
0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722073, 0x726520, 0x736F6D, 0x737465, 0x742073, 0x746520, 0x74656E, 0x746572, 0x74696C, 0x747420, 0x747465, 0x766572,
}
var ngrams_8859_1_pt = [64]uint32{
0x206120, 0x20636F, 0x206461, 0x206465, 0x20646F, 0x206520, 0x206573, 0x206D61, 0x206E6F, 0x206F20, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
0x20756D, 0x612061, 0x612063, 0x612064, 0x612070, 0x616465, 0x61646F, 0x616C20, 0x617220, 0x617261, 0x617320, 0x636F6D, 0x636F6E, 0x646120, 0x646520, 0x646F20,
0x646F73, 0x652061, 0x652064, 0x656D20, 0x656E74, 0x657320, 0x657374, 0x696120, 0x696361, 0x6D656E, 0x6E7465, 0x6E746F, 0x6F2061, 0x6F2063, 0x6F2064, 0x6F2065,
0x6F2070, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732061, 0x732064, 0x732065, 0x732070, 0x737461, 0x746520, 0x746F20, 0x756520, 0xE36F20, 0xE7E36F,
}
var ngrams_8859_1_sv = [64]uint32{
0x206174, 0x206176, 0x206465, 0x20656E, 0x2066F6, 0x206861, 0x206920, 0x20696E, 0x206B6F, 0x206D65, 0x206F63, 0x2070E5, 0x20736B, 0x20736F, 0x207374, 0x207469,
0x207661, 0x207669, 0x20E472, 0x616465, 0x616E20, 0x616E64, 0x617220, 0x617474, 0x636820, 0x646520, 0x64656E, 0x646572, 0x646574, 0x656420, 0x656E20, 0x657220,
0x657420, 0x66F672, 0x67656E, 0x696C6C, 0x696E67, 0x6B6120, 0x6C6C20, 0x6D6564, 0x6E2073, 0x6E6120, 0x6E6465, 0x6E6720, 0x6E6765, 0x6E696E, 0x6F6368, 0x6F6D20,
0x6F6E20, 0x70E520, 0x722061, 0x722073, 0x726120, 0x736B61, 0x736F6D, 0x742073, 0x746120, 0x746520, 0x746572, 0x74696C, 0x747420, 0x766172, 0xE47220, 0xF67220,
}
func newRecognizer_8859_1(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-1",
hasC1ByteCharset: "windows-1252",
language: language,
charMap: &charMap_8859_1,
ngram: ngram,
}
}
func newRecognizer_8859_1_en() *recognizerSingleByte {
return newRecognizer_8859_1("en", &ngrams_8859_1_en)
}
func newRecognizer_8859_1_da() *recognizerSingleByte {
return newRecognizer_8859_1("da", &ngrams_8859_1_da)
}
func newRecognizer_8859_1_de() *recognizerSingleByte {
return newRecognizer_8859_1("de", &ngrams_8859_1_de)
}
func newRecognizer_8859_1_es() *recognizerSingleByte {
return newRecognizer_8859_1("es", &ngrams_8859_1_es)
}
func newRecognizer_8859_1_fr() *recognizerSingleByte {
return newRecognizer_8859_1("fr", &ngrams_8859_1_fr)
}
func newRecognizer_8859_1_it() *recognizerSingleByte {
return newRecognizer_8859_1("it", &ngrams_8859_1_it)
}
func newRecognizer_8859_1_nl() *recognizerSingleByte {
return newRecognizer_8859_1("nl", &ngrams_8859_1_nl)
}
func newRecognizer_8859_1_no() *recognizerSingleByte {
return newRecognizer_8859_1("no", &ngrams_8859_1_no)
}
func newRecognizer_8859_1_pt() *recognizerSingleByte {
return newRecognizer_8859_1("pt", &ngrams_8859_1_pt)
}
func newRecognizer_8859_1_sv() *recognizerSingleByte {
return newRecognizer_8859_1("sv", &ngrams_8859_1_sv)
}
var charMap_8859_2 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0x20,
0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0xB7,
0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
}
var ngrams_8859_2_cs = [64]uint32{
0x206120, 0x206279, 0x20646F, 0x206A65, 0x206E61, 0x206E65, 0x206F20, 0x206F64, 0x20706F, 0x207072, 0x2070F8, 0x20726F, 0x207365, 0x20736F, 0x207374, 0x20746F,
0x207620, 0x207679, 0x207A61, 0x612070, 0x636520, 0x636820, 0x652070, 0x652073, 0x652076, 0x656D20, 0x656EED, 0x686F20, 0x686F64, 0x697374, 0x6A6520, 0x6B7465,
0x6C6520, 0x6C6920, 0x6E6120, 0x6EE920, 0x6EEC20, 0x6EED20, 0x6F2070, 0x6F646E, 0x6F6A69, 0x6F7374, 0x6F7520, 0x6F7661, 0x706F64, 0x706F6A, 0x70726F, 0x70F865,
0x736520, 0x736F75, 0x737461, 0x737469, 0x73746E, 0x746572, 0x746EED, 0x746F20, 0x752070, 0xBE6520, 0xE16EED, 0xE9686F, 0xED2070, 0xED2073, 0xED6D20, 0xF86564,
}
var ngrams_8859_2_hu = [64]uint32{
0x206120, 0x20617A, 0x206265, 0x206567, 0x20656C, 0x206665, 0x206861, 0x20686F, 0x206973, 0x206B65, 0x206B69, 0x206BF6, 0x206C65, 0x206D61, 0x206D65, 0x206D69,
0x206E65, 0x20737A, 0x207465, 0x20E973, 0x612061, 0x61206B, 0x61206D, 0x612073, 0x616B20, 0x616E20, 0x617A20, 0x62616E, 0x62656E, 0x656779, 0x656B20, 0x656C20,
0x656C65, 0x656D20, 0x656E20, 0x657265, 0x657420, 0x657465, 0x657474, 0x677920, 0x686F67, 0x696E74, 0x697320, 0x6B2061, 0x6BF67A, 0x6D6567, 0x6D696E, 0x6E2061,
0x6E616B, 0x6E656B, 0x6E656D, 0x6E7420, 0x6F6779, 0x732061, 0x737A65, 0x737A74, 0x737AE1, 0x73E967, 0x742061, 0x747420, 0x74E173, 0x7A6572, 0xE16E20, 0xE97320,
}
var ngrams_8859_2_pl = [64]uint32{
0x20637A, 0x20646F, 0x206920, 0x206A65, 0x206B6F, 0x206D61, 0x206D69, 0x206E61, 0x206E69, 0x206F64, 0x20706F, 0x207072, 0x207369, 0x207720, 0x207769, 0x207779,
0x207A20, 0x207A61, 0x612070, 0x612077, 0x616E69, 0x636820, 0x637A65, 0x637A79, 0x646F20, 0x647A69, 0x652070, 0x652073, 0x652077, 0x65207A, 0x65676F, 0x656A20,
0x656D20, 0x656E69, 0x676F20, 0x696120, 0x696520, 0x69656A, 0x6B6120, 0x6B6920, 0x6B6965, 0x6D6965, 0x6E6120, 0x6E6961, 0x6E6965, 0x6F2070, 0x6F7761, 0x6F7769,
0x706F6C, 0x707261, 0x70726F, 0x70727A, 0x727A65, 0x727A79, 0x7369EA, 0x736B69, 0x737461, 0x776965, 0x796368, 0x796D20, 0x7A6520, 0x7A6965, 0x7A7920, 0xF37720,
}
var ngrams_8859_2_ro = [64]uint32{
0x206120, 0x206163, 0x206361, 0x206365, 0x20636F, 0x206375, 0x206465, 0x206469, 0x206C61, 0x206D61, 0x207065, 0x207072, 0x207365, 0x2073E3, 0x20756E, 0x20BA69,
0x20EE6E, 0x612063, 0x612064, 0x617265, 0x617420, 0x617465, 0x617520, 0x636172, 0x636F6E, 0x637520, 0x63E320, 0x646520, 0x652061, 0x652063, 0x652064, 0x652070,
0x652073, 0x656120, 0x656920, 0x656C65, 0x656E74, 0x657374, 0x692061, 0x692063, 0x692064, 0x692070, 0x696520, 0x696920, 0x696E20, 0x6C6120, 0x6C6520, 0x6C6F72,
0x6C7569, 0x6E6520, 0x6E7472, 0x6F7220, 0x70656E, 0x726520, 0x726561, 0x727520, 0x73E320, 0x746520, 0x747275, 0x74E320, 0x756920, 0x756C20, 0xBA6920, 0xEE6E20,
}
func newRecognizer_8859_2(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-2",
hasC1ByteCharset: "windows-1250",
language: language,
charMap: &charMap_8859_2,
ngram: ngram,
}
}
func newRecognizer_8859_2_cs() *recognizerSingleByte {
return newRecognizer_8859_1("cs", &ngrams_8859_2_cs)
}
func newRecognizer_8859_2_hu() *recognizerSingleByte {
return newRecognizer_8859_1("hu", &ngrams_8859_2_hu)
}
func newRecognizer_8859_2_pl() *recognizerSingleByte {
return newRecognizer_8859_1("pl", &ngrams_8859_2_pl)
}
func newRecognizer_8859_2_ro() *recognizerSingleByte {
return newRecognizer_8859_1("ro", &ngrams_8859_2_ro)
}
var charMap_8859_5 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
}
var ngrams_8859_5_ru = [64]uint32{
0x20D220, 0x20D2DE, 0x20D4DE, 0x20D7D0, 0x20D820, 0x20DAD0, 0x20DADE, 0x20DDD0, 0x20DDD5, 0x20DED1, 0x20DFDE, 0x20DFE0, 0x20E0D0, 0x20E1DE, 0x20E1E2, 0x20E2DE,
0x20E7E2, 0x20EDE2, 0xD0DDD8, 0xD0E2EC, 0xD3DE20, 0xD5DBEC, 0xD5DDD8, 0xD5E1E2, 0xD5E220, 0xD820DF, 0xD8D520, 0xD8D820, 0xD8EF20, 0xDBD5DD, 0xDBD820, 0xDBECDD,
0xDDD020, 0xDDD520, 0xDDD8D5, 0xDDD8EF, 0xDDDE20, 0xDDDED2, 0xDE20D2, 0xDE20DF, 0xDE20E1, 0xDED220, 0xDED2D0, 0xDED3DE, 0xDED920, 0xDEDBEC, 0xDEDC20, 0xDEE1E2,
0xDFDEDB, 0xDFE0D5, 0xDFE0D8, 0xDFE0DE, 0xE0D0D2, 0xE0D5D4, 0xE1E2D0, 0xE1E2D2, 0xE1E2D8, 0xE1EF20, 0xE2D5DB, 0xE2DE20, 0xE2DEE0, 0xE2EC20, 0xE7E2DE, 0xEBE520,
}
func newRecognizer_8859_5(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-5",
language: language,
charMap: &charMap_8859_5,
ngram: ngram,
}
}
func newRecognizer_8859_5_ru() *recognizerSingleByte {
return newRecognizer_8859_5("ru", &ngrams_8859_5_ru)
}
var charMap_8859_6 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
}
var ngrams_8859_6_ar = [64]uint32{
0x20C7E4, 0x20C7E6, 0x20C8C7, 0x20D9E4, 0x20E1EA, 0x20E4E4, 0x20E5E6, 0x20E8C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E420, 0xC7E4C3, 0xC7E4C7, 0xC7E4C8,
0xC7E4CA, 0xC7E4CC, 0xC7E4CD, 0xC7E4CF, 0xC7E4D3, 0xC7E4D9, 0xC7E4E2, 0xC7E4E5, 0xC7E4E8, 0xC7E4EA, 0xC7E520, 0xC7E620, 0xC7E6CA, 0xC820C7, 0xC920C7, 0xC920E1,
0xC920E4, 0xC920E5, 0xC920E8, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xD920C7, 0xD9E4E9, 0xE1EA20, 0xE420C7, 0xE4C920, 0xE4E920, 0xE4EA20,
0xE520C7, 0xE5C720, 0xE5C920, 0xE5E620, 0xE620C7, 0xE720C7, 0xE7C720, 0xE8C7E4, 0xE8E620, 0xE920C7, 0xEA20C7, 0xEA20E5, 0xEA20E8, 0xEAC920, 0xEAD120, 0xEAE620,
}
func newRecognizer_8859_6(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-6",
language: language,
charMap: &charMap_8859_6,
ngram: ngram,
}
}
func newRecognizer_8859_6_ar() *recognizerSingleByte {
return newRecognizer_8859_6("ar", &ngrams_8859_6_ar)
}
var charMap_8859_7 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xA1, 0xA2, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xDC, 0x20,
0xDD, 0xDE, 0xDF, 0x20, 0xFC, 0x20, 0xFD, 0xFE,
0xC0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0x20, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
}
var ngrams_8859_7_el = [64]uint32{
0x20E1ED, 0x20E1F0, 0x20E3E9, 0x20E4E9, 0x20E5F0, 0x20E720, 0x20EAE1, 0x20ECE5, 0x20EDE1, 0x20EF20, 0x20F0E1, 0x20F0EF, 0x20F0F1, 0x20F3F4, 0x20F3F5, 0x20F4E7,
0x20F4EF, 0xDFE120, 0xE120E1, 0xE120F4, 0xE1E920, 0xE1ED20, 0xE1F0FC, 0xE1F220, 0xE3E9E1, 0xE5E920, 0xE5F220, 0xE720F4, 0xE7ED20, 0xE7F220, 0xE920F4, 0xE9E120,
0xE9EADE, 0xE9F220, 0xEAE1E9, 0xEAE1F4, 0xECE520, 0xED20E1, 0xED20E5, 0xED20F0, 0xEDE120, 0xEFF220, 0xEFF520, 0xF0EFF5, 0xF0F1EF, 0xF0FC20, 0xF220E1, 0xF220E5,
0xF220EA, 0xF220F0, 0xF220F4, 0xF3E520, 0xF3E720, 0xF3F4EF, 0xF4E120, 0xF4E1E9, 0xF4E7ED, 0xF4E7F2, 0xF4E9EA, 0xF4EF20, 0xF4EFF5, 0xF4F9ED, 0xF9ED20, 0xFEED20,
}
func newRecognizer_8859_7(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-7",
hasC1ByteCharset: "windows-1253",
language: language,
charMap: &charMap_8859_7,
ngram: ngram,
}
}
func newRecognizer_8859_7_el() *recognizerSingleByte {
return newRecognizer_8859_7("el", &ngrams_8859_7_el)
}
var charMap_8859_8 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0x20, 0x20, 0x20, 0x20, 0x20,
}
var ngrams_8859_8_I_he = [64]uint32{
0x20E0E5, 0x20E0E7, 0x20E0E9, 0x20E0FA, 0x20E1E9, 0x20E1EE, 0x20E4E0, 0x20E4E5, 0x20E4E9, 0x20E4EE, 0x20E4F2, 0x20E4F9, 0x20E4FA, 0x20ECE0, 0x20ECE4, 0x20EEE0,
0x20F2EC, 0x20F9EC, 0xE0FA20, 0xE420E0, 0xE420E1, 0xE420E4, 0xE420EC, 0xE420EE, 0xE420F9, 0xE4E5E0, 0xE5E020, 0xE5ED20, 0xE5EF20, 0xE5F820, 0xE5FA20, 0xE920E4,
0xE9E420, 0xE9E5FA, 0xE9E9ED, 0xE9ED20, 0xE9EF20, 0xE9F820, 0xE9FA20, 0xEC20E0, 0xEC20E4, 0xECE020, 0xECE420, 0xED20E0, 0xED20E1, 0xED20E4, 0xED20EC, 0xED20EE,
0xED20F9, 0xEEE420, 0xEF20E4, 0xF0E420, 0xF0E920, 0xF0E9ED, 0xF2EC20, 0xF820E4, 0xF8E9ED, 0xF9EC20, 0xFA20E0, 0xFA20E1, 0xFA20E4, 0xFA20EC, 0xFA20EE, 0xFA20F9,
}
var ngrams_8859_8_he = [64]uint32{
0x20E0E5, 0x20E0EC, 0x20E4E9, 0x20E4EC, 0x20E4EE, 0x20E4F0, 0x20E9F0, 0x20ECF2, 0x20ECF9, 0x20EDE5, 0x20EDE9, 0x20EFE5, 0x20EFE9, 0x20F8E5, 0x20F8E9, 0x20FAE0,
0x20FAE5, 0x20FAE9, 0xE020E4, 0xE020EC, 0xE020ED, 0xE020FA, 0xE0E420, 0xE0E5E4, 0xE0EC20, 0xE0EE20, 0xE120E4, 0xE120ED, 0xE120FA, 0xE420E4, 0xE420E9, 0xE420EC,
0xE420ED, 0xE420EF, 0xE420F8, 0xE420FA, 0xE4EC20, 0xE5E020, 0xE5E420, 0xE7E020, 0xE9E020, 0xE9E120, 0xE9E420, 0xEC20E4, 0xEC20ED, 0xEC20FA, 0xECF220, 0xECF920,
0xEDE9E9, 0xEDE9F0, 0xEDE9F8, 0xEE20E4, 0xEE20ED, 0xEE20FA, 0xEEE120, 0xEEE420, 0xF2E420, 0xF920E4, 0xF920ED, 0xF920FA, 0xF9E420, 0xFAE020, 0xFAE420, 0xFAE5E9,
}
func newRecognizer_8859_8(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-8",
hasC1ByteCharset: "windows-1255",
language: language,
charMap: &charMap_8859_8,
ngram: ngram,
}
}
func newRecognizer_8859_8_I_he() *recognizerSingleByte {
r := newRecognizer_8859_8("he", &ngrams_8859_8_I_he)
r.charset = "ISO-8859-8-I"
return r
}
func newRecognizer_8859_8_he() *recognizerSingleByte {
return newRecognizer_8859_8("he", &ngrams_8859_8_he)
}
var charMap_8859_9 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x69, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_8859_9_tr = [64]uint32{
0x206261, 0x206269, 0x206275, 0x206461, 0x206465, 0x206765, 0x206861, 0x20696C, 0x206B61, 0x206B6F, 0x206D61, 0x206F6C, 0x207361, 0x207461, 0x207665, 0x207961,
0x612062, 0x616B20, 0x616C61, 0x616D61, 0x616E20, 0x616EFD, 0x617220, 0x617261, 0x6172FD, 0x6173FD, 0x617961, 0x626972, 0x646120, 0x646520, 0x646920, 0x652062,
0x65206B, 0x656469, 0x656E20, 0x657220, 0x657269, 0x657369, 0x696C65, 0x696E20, 0x696E69, 0x697220, 0x6C616E, 0x6C6172, 0x6C6520, 0x6C6572, 0x6E2061, 0x6E2062,
0x6E206B, 0x6E6461, 0x6E6465, 0x6E6520, 0x6E6920, 0x6E696E, 0x6EFD20, 0x72696E, 0x72FD6E, 0x766520, 0x796120, 0x796F72, 0xFD6E20, 0xFD6E64, 0xFD6EFD, 0xFDF0FD,
}
func newRecognizer_8859_9(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-9",
hasC1ByteCharset: "windows-1254",
language: language,
charMap: &charMap_8859_9,
ngram: ngram,
}
}
func newRecognizer_8859_9_tr() *recognizerSingleByte {
return newRecognizer_8859_9("tr", &ngrams_8859_9_tr)
}
var charMap_windows_1256 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x81, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
0x88, 0x20, 0x8A, 0x20, 0x9C, 0x8D, 0x8E, 0x8F,
0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x98, 0x20, 0x9A, 0x20, 0x9C, 0x20, 0x20, 0x9F,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0x20,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0x20, 0x20, 0x20, 0x20, 0xF4, 0x20, 0x20, 0x20,
0x20, 0xF9, 0x20, 0xFB, 0xFC, 0x20, 0x20, 0xFF,
}
var ngrams_windows_1256 = [64]uint32{
0x20C7E1, 0x20C7E4, 0x20C8C7, 0x20DAE1, 0x20DDED, 0x20E1E1, 0x20E3E4, 0x20E6C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E120, 0xC7E1C3, 0xC7E1C7, 0xC7E1C8,
0xC7E1CA, 0xC7E1CC, 0xC7E1CD, 0xC7E1CF, 0xC7E1D3, 0xC7E1DA, 0xC7E1DE, 0xC7E1E3, 0xC7E1E6, 0xC7E1ED, 0xC7E320, 0xC7E420, 0xC7E4CA, 0xC820C7, 0xC920C7, 0xC920DD,
0xC920E1, 0xC920E3, 0xC920E6, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xDA20C7, 0xDAE1EC, 0xDDED20, 0xE120C7, 0xE1C920, 0xE1EC20, 0xE1ED20,
0xE320C7, 0xE3C720, 0xE3C920, 0xE3E420, 0xE420C7, 0xE520C7, 0xE5C720, 0xE6C7E1, 0xE6E420, 0xEC20C7, 0xED20C7, 0xED20E3, 0xED20E6, 0xEDC920, 0xEDD120, 0xEDE420,
}
func newRecognizer_windows_1256() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "windows-1256",
language: "ar",
charMap: &charMap_windows_1256,
ngram: &ngrams_windows_1256,
}
}
var charMap_windows_1251 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x90, 0x83, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
0x20, 0xA2, 0xA2, 0xBC, 0x20, 0xB4, 0x20, 0x20,
0xB8, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0xBF,
0x20, 0x20, 0xB3, 0xB3, 0xB4, 0xB5, 0x20, 0x20,
0xB8, 0x20, 0xBA, 0x20, 0xBC, 0xBE, 0xBE, 0xBF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_windows_1251 = [64]uint32{
0x20E220, 0x20E2EE, 0x20E4EE, 0x20E7E0, 0x20E820, 0x20EAE0, 0x20EAEE, 0x20EDE0, 0x20EDE5, 0x20EEE1, 0x20EFEE, 0x20EFF0, 0x20F0E0, 0x20F1EE, 0x20F1F2, 0x20F2EE,
0x20F7F2, 0x20FDF2, 0xE0EDE8, 0xE0F2FC, 0xE3EE20, 0xE5EBFC, 0xE5EDE8, 0xE5F1F2, 0xE5F220, 0xE820EF, 0xE8E520, 0xE8E820, 0xE8FF20, 0xEBE5ED, 0xEBE820, 0xEBFCED,
0xEDE020, 0xEDE520, 0xEDE8E5, 0xEDE8FF, 0xEDEE20, 0xEDEEE2, 0xEE20E2, 0xEE20EF, 0xEE20F1, 0xEEE220, 0xEEE2E0, 0xEEE3EE, 0xEEE920, 0xEEEBFC, 0xEEEC20, 0xEEF1F2,
0xEFEEEB, 0xEFF0E5, 0xEFF0E8, 0xEFF0EE, 0xF0E0E2, 0xF0E5E4, 0xF1F2E0, 0xF1F2E2, 0xF1F2E8, 0xF1FF20, 0xF2E5EB, 0xF2EE20, 0xF2EEF0, 0xF2FC20, 0xF7F2EE, 0xFBF520,
}
func newRecognizer_windows_1251() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "windows-1251",
language: "ar",
charMap: &charMap_windows_1251,
ngram: &ngrams_windows_1251,
}
}
var charMap_KOI8_R = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
}
var ngrams_KOI8_R = [64]uint32{
0x20C4CF, 0x20C920, 0x20CBC1, 0x20CBCF, 0x20CEC1, 0x20CEC5, 0x20CFC2, 0x20D0CF, 0x20D0D2, 0x20D2C1, 0x20D3CF, 0x20D3D4, 0x20D4CF, 0x20D720, 0x20D7CF, 0x20DAC1,
0x20DCD4, 0x20DED4, 0xC1CEC9, 0xC1D4D8, 0xC5CCD8, 0xC5CEC9, 0xC5D3D4, 0xC5D420, 0xC7CF20, 0xC920D0, 0xC9C520, 0xC9C920, 0xC9D120, 0xCCC5CE, 0xCCC920, 0xCCD8CE,
0xCEC120, 0xCEC520, 0xCEC9C5, 0xCEC9D1, 0xCECF20, 0xCECFD7, 0xCF20D0, 0xCF20D3, 0xCF20D7, 0xCFC7CF, 0xCFCA20, 0xCFCCD8, 0xCFCD20, 0xCFD3D4, 0xCFD720, 0xCFD7C1,
0xD0CFCC, 0xD0D2C5, 0xD0D2C9, 0xD0D2CF, 0xD2C1D7, 0xD2C5C4, 0xD3D120, 0xD3D4C1, 0xD3D4C9, 0xD3D4D7, 0xD4C5CC, 0xD4CF20, 0xD4CFD2, 0xD4D820, 0xD9C820, 0xDED4CF,
}
func newRecognizer_KOI8_R() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "KOI8-R",
language: "ru",
charMap: &charMap_KOI8_R,
ngram: &ngrams_KOI8_R,
}
}
var charMap_IBM424_he = [256]byte{
/* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
/* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 4- */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 5- */ 0x40, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 7- */ 0x40, 0x71, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x40, 0x40,
/* 8- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 9- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* B- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
}
var ngrams_IBM424_he_rtl = [64]uint32{
0x404146, 0x404148, 0x404151, 0x404171, 0x404251, 0x404256, 0x404541, 0x404546, 0x404551, 0x404556, 0x404562, 0x404569, 0x404571, 0x405441, 0x405445, 0x405641,
0x406254, 0x406954, 0x417140, 0x454041, 0x454042, 0x454045, 0x454054, 0x454056, 0x454069, 0x454641, 0x464140, 0x465540, 0x465740, 0x466840, 0x467140, 0x514045,
0x514540, 0x514671, 0x515155, 0x515540, 0x515740, 0x516840, 0x517140, 0x544041, 0x544045, 0x544140, 0x544540, 0x554041, 0x554042, 0x554045, 0x554054, 0x554056,
0x554069, 0x564540, 0x574045, 0x584540, 0x585140, 0x585155, 0x625440, 0x684045, 0x685155, 0x695440, 0x714041, 0x714042, 0x714045, 0x714054, 0x714056, 0x714069,
}
var ngrams_IBM424_he_ltr = [64]uint32{
0x404146, 0x404154, 0x404551, 0x404554, 0x404556, 0x404558, 0x405158, 0x405462, 0x405469, 0x405546, 0x405551, 0x405746, 0x405751, 0x406846, 0x406851, 0x407141,
0x407146, 0x407151, 0x414045, 0x414054, 0x414055, 0x414071, 0x414540, 0x414645, 0x415440, 0x415640, 0x424045, 0x424055, 0x424071, 0x454045, 0x454051, 0x454054,
0x454055, 0x454057, 0x454068, 0x454071, 0x455440, 0x464140, 0x464540, 0x484140, 0x514140, 0x514240, 0x514540, 0x544045, 0x544055, 0x544071, 0x546240, 0x546940,
0x555151, 0x555158, 0x555168, 0x564045, 0x564055, 0x564071, 0x564240, 0x564540, 0x624540, 0x694045, 0x694055, 0x694071, 0x694540, 0x714140, 0x714540, 0x714651,
}
func newRecognizer_IBM424_he(charset string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: charset,
language: "he",
charMap: &charMap_IBM424_he,
ngram: ngram,
}
}
func newRecognizer_IBM424_he_rtl() *recognizerSingleByte {
return newRecognizer_IBM424_he("IBM424_rtl", &ngrams_IBM424_he_rtl)
}
func newRecognizer_IBM424_he_ltr() *recognizerSingleByte {
return newRecognizer_IBM424_he("IBM424_ltr", &ngrams_IBM424_he_ltr)
}
var charMap_IBM420_ar = [256]byte{
/* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
/* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 4- */ 0x40, 0x40, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 5- */ 0x40, 0x51, 0x52, 0x40, 0x40, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 7- */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 8- */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
/* 9- */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
/* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
/* B- */ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0x40, 0x40, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
/* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0xCB, 0x40, 0xCD, 0x40, 0xCF,
/* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
/* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xEA, 0xEB, 0x40, 0xED, 0xEE, 0xEF,
/* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xFB, 0xFC, 0xFD, 0xFE, 0x40,
}
var ngrams_IBM420_ar_rtl = [64]uint32{
0x4056B1, 0x4056BD, 0x405856, 0x409AB1, 0x40ABDC, 0x40B1B1, 0x40BBBD, 0x40CF56, 0x564056, 0x564640, 0x566340, 0x567540, 0x56B140, 0x56B149, 0x56B156, 0x56B158,
0x56B163, 0x56B167, 0x56B169, 0x56B173, 0x56B178, 0x56B19A, 0x56B1AD, 0x56B1BB, 0x56B1CF, 0x56B1DC, 0x56BB40, 0x56BD40, 0x56BD63, 0x584056, 0x624056, 0x6240AB,
0x6240B1, 0x6240BB, 0x6240CF, 0x634056, 0x734056, 0x736240, 0x754056, 0x756240, 0x784056, 0x9A4056, 0x9AB1DA, 0xABDC40, 0xB14056, 0xB16240, 0xB1DA40, 0xB1DC40,
0xBB4056, 0xBB5640, 0xBB6240, 0xBBBD40, 0xBD4056, 0xBF4056, 0xBF5640, 0xCF56B1, 0xCFBD40, 0xDA4056, 0xDC4056, 0xDC40BB, 0xDC40CF, 0xDC6240, 0xDC7540, 0xDCBD40,
}
var ngrams_IBM420_ar_ltr = [64]uint32{
0x404656, 0x4056BB, 0x4056BF, 0x406273, 0x406275, 0x4062B1, 0x4062BB, 0x4062DC, 0x406356, 0x407556, 0x4075DC, 0x40B156, 0x40BB56, 0x40BD56, 0x40BDBB, 0x40BDCF,
0x40BDDC, 0x40DAB1, 0x40DCAB, 0x40DCB1, 0x49B156, 0x564056, 0x564058, 0x564062, 0x564063, 0x564073, 0x564075, 0x564078, 0x56409A, 0x5640B1, 0x5640BB, 0x5640BD,
0x5640BF, 0x5640DA, 0x5640DC, 0x565840, 0x56B156, 0x56CF40, 0x58B156, 0x63B156, 0x63BD56, 0x67B156, 0x69B156, 0x73B156, 0x78B156, 0x9AB156, 0xAB4062, 0xADB156,
0xB14062, 0xB15640, 0xB156CF, 0xB19A40, 0xB1B140, 0xBB4062, 0xBB40DC, 0xBBB156, 0xBD5640, 0xBDBB40, 0xCF4062, 0xCF40DC, 0xCFB156, 0xDAB19A, 0xDCAB40, 0xDCB156,
}
func newRecognizer_IBM420_ar(charset string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: charset,
language: "ar",
charMap: &charMap_IBM420_ar,
ngram: ngram,
}
}
func newRecognizer_IBM420_ar_rtl() *recognizerSingleByte {
return newRecognizer_IBM420_ar("IBM420_rtl", &ngrams_IBM420_ar_rtl)
}
func newRecognizer_IBM420_ar_ltr() *recognizerSingleByte {
return newRecognizer_IBM420_ar("IBM420_ltr", &ngrams_IBM420_ar_ltr)
}
Fix one more bug
package chardet
// Recognizer for single byte charset family
type recognizerSingleByte struct {
charset string
hasC1ByteCharset string
language string
charMap *[256]byte
ngram *[64]uint32
}
func (r *recognizerSingleByte) Match(input *recognizerInput) recognizerOutput {
var charset string = r.charset
if input.hasC1Bytes && len(r.hasC1ByteCharset) > 0 {
charset = r.hasC1ByteCharset
}
return recognizerOutput{
Charset: charset,
Language: r.language,
Confidence: r.parseNgram(input.input),
}
}
type ngramState struct {
ngram uint32
ignoreSpace bool
ngramCount, ngramHit uint32
table *[64]uint32
}
func newNgramState(table *[64]uint32) *ngramState {
return &ngramState{
ngram: 0,
ignoreSpace: false,
ngramCount: 0,
ngramHit: 0,
table: table,
}
}
func (s *ngramState) AddByte(b byte) {
const ngramMask = 0xFFFFFF
if !(b == 0x20 && s.ignoreSpace) {
s.ngram = ((s.ngram << 8) | uint32(b)) & ngramMask
s.ignoreSpace = (s.ngram == 0x20)
s.ngramCount++
if s.lookup() {
s.ngramHit++
}
}
s.ignoreSpace = (b == 0x20)
}
func (s *ngramState) HitRate() float32 {
if s.ngramCount == 0 {
return 0
}
return float32(s.ngramHit) / float32(s.ngramCount)
}
func (s *ngramState) lookup() bool {
var index int
if s.table[index+32] <= s.ngram {
index += 32
}
if s.table[index+16] <= s.ngram {
index += 16
}
if s.table[index+8] <= s.ngram {
index += 8
}
if s.table[index+4] <= s.ngram {
index += 4
}
if s.table[index+2] <= s.ngram {
index += 2
}
if s.table[index+1] <= s.ngram {
index += 1
}
if s.table[index] > s.ngram {
index -= 1
}
if index < 0 || s.table[index] != s.ngram {
return false
}
return true
}
func (r *recognizerSingleByte) parseNgram(input []byte) int {
state := newNgramState(r.ngram)
for _, inChar := range input {
c := r.charMap[inChar]
if c != 0 {
state.AddByte(c)
}
}
state.AddByte(0x20)
rate := state.HitRate()
if rate > 0.33 {
return 98
}
return int(rate * 300)
}
var charMap_8859_1 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_8859_1_en = [64]uint32{
0x206120, 0x20616E, 0x206265, 0x20636F, 0x20666F, 0x206861, 0x206865, 0x20696E, 0x206D61, 0x206F66, 0x207072, 0x207265, 0x207361, 0x207374, 0x207468, 0x20746F,
0x207768, 0x616964, 0x616C20, 0x616E20, 0x616E64, 0x617320, 0x617420, 0x617465, 0x617469, 0x642061, 0x642074, 0x652061, 0x652073, 0x652074, 0x656420, 0x656E74,
0x657220, 0x657320, 0x666F72, 0x686174, 0x686520, 0x686572, 0x696420, 0x696E20, 0x696E67, 0x696F6E, 0x697320, 0x6E2061, 0x6E2074, 0x6E6420, 0x6E6720, 0x6E7420,
0x6F6620, 0x6F6E20, 0x6F7220, 0x726520, 0x727320, 0x732061, 0x732074, 0x736169, 0x737420, 0x742074, 0x746572, 0x746861, 0x746865, 0x74696F, 0x746F20, 0x747320,
}
var ngrams_8859_1_da = [64]uint32{
0x206166, 0x206174, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207369, 0x207374, 0x207469, 0x207669, 0x616620,
0x616E20, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646572, 0x646574, 0x652073, 0x656420, 0x656465, 0x656E20, 0x656E64, 0x657220, 0x657265, 0x657320,
0x657420, 0x666F72, 0x676520, 0x67656E, 0x676572, 0x696765, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6572, 0x6C6967, 0x6C6C65, 0x6D6564, 0x6E6465, 0x6E6520,
0x6E6720, 0x6E6765, 0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722064, 0x722065, 0x722073, 0x726520, 0x737465, 0x742073, 0x746520, 0x746572, 0x74696C, 0x766572,
}
var ngrams_8859_1_de = [64]uint32{
0x20616E, 0x206175, 0x206265, 0x206461, 0x206465, 0x206469, 0x206569, 0x206765, 0x206861, 0x20696E, 0x206D69, 0x207363, 0x207365, 0x20756E, 0x207665, 0x20766F,
0x207765, 0x207A75, 0x626572, 0x636820, 0x636865, 0x636874, 0x646173, 0x64656E, 0x646572, 0x646965, 0x652064, 0x652073, 0x65696E, 0x656974, 0x656E20, 0x657220,
0x657320, 0x67656E, 0x68656E, 0x687420, 0x696368, 0x696520, 0x696E20, 0x696E65, 0x697420, 0x6C6963, 0x6C6C65, 0x6E2061, 0x6E2064, 0x6E2073, 0x6E6420, 0x6E6465,
0x6E6520, 0x6E6720, 0x6E6765, 0x6E7465, 0x722064, 0x726465, 0x726569, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x756E64, 0x756E67, 0x766572,
}
var ngrams_8859_1_es = [64]uint32{
0x206120, 0x206361, 0x20636F, 0x206465, 0x20656C, 0x20656E, 0x206573, 0x20696E, 0x206C61, 0x206C6F, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
0x20756E, 0x207920, 0x612063, 0x612064, 0x612065, 0x61206C, 0x612070, 0x616369, 0x61646F, 0x616C20, 0x617220, 0x617320, 0x6369F3, 0x636F6E, 0x646520, 0x64656C,
0x646F20, 0x652064, 0x652065, 0x65206C, 0x656C20, 0x656E20, 0x656E74, 0x657320, 0x657374, 0x69656E, 0x69F36E, 0x6C6120, 0x6C6F73, 0x6E2065, 0x6E7465, 0x6F2064,
0x6F2065, 0x6F6E20, 0x6F7220, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732064, 0x732065, 0x732070, 0x736520, 0x746520, 0x746F20, 0x756520, 0xF36E20,
}
var ngrams_8859_1_fr = [64]uint32{
0x206175, 0x20636F, 0x206461, 0x206465, 0x206475, 0x20656E, 0x206574, 0x206C61, 0x206C65, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207365, 0x20736F, 0x20756E,
0x20E020, 0x616E74, 0x617469, 0x636520, 0x636F6E, 0x646520, 0x646573, 0x647520, 0x652061, 0x652063, 0x652064, 0x652065, 0x65206C, 0x652070, 0x652073, 0x656E20,
0x656E74, 0x657220, 0x657320, 0x657420, 0x657572, 0x696F6E, 0x697320, 0x697420, 0x6C6120, 0x6C6520, 0x6C6573, 0x6D656E, 0x6E2064, 0x6E6520, 0x6E7320, 0x6E7420,
0x6F6E20, 0x6F6E74, 0x6F7572, 0x717565, 0x72206C, 0x726520, 0x732061, 0x732064, 0x732065, 0x73206C, 0x732070, 0x742064, 0x746520, 0x74696F, 0x756520, 0x757220,
}
var ngrams_8859_1_it = [64]uint32{
0x20616C, 0x206368, 0x20636F, 0x206465, 0x206469, 0x206520, 0x20696C, 0x20696E, 0x206C61, 0x207065, 0x207072, 0x20756E, 0x612063, 0x612064, 0x612070, 0x612073,
0x61746F, 0x636865, 0x636F6E, 0x64656C, 0x646920, 0x652061, 0x652063, 0x652064, 0x652069, 0x65206C, 0x652070, 0x652073, 0x656C20, 0x656C6C, 0x656E74, 0x657220,
0x686520, 0x692061, 0x692063, 0x692064, 0x692073, 0x696120, 0x696C20, 0x696E20, 0x696F6E, 0x6C6120, 0x6C6520, 0x6C6920, 0x6C6C61, 0x6E6520, 0x6E6920, 0x6E6F20,
0x6E7465, 0x6F2061, 0x6F2064, 0x6F2069, 0x6F2073, 0x6F6E20, 0x6F6E65, 0x706572, 0x726120, 0x726520, 0x736920, 0x746120, 0x746520, 0x746920, 0x746F20, 0x7A696F,
}
var ngrams_8859_1_nl = [64]uint32{
0x20616C, 0x206265, 0x206461, 0x206465, 0x206469, 0x206565, 0x20656E, 0x206765, 0x206865, 0x20696E, 0x206D61, 0x206D65, 0x206F70, 0x207465, 0x207661, 0x207665,
0x20766F, 0x207765, 0x207A69, 0x61616E, 0x616172, 0x616E20, 0x616E64, 0x617220, 0x617420, 0x636874, 0x646520, 0x64656E, 0x646572, 0x652062, 0x652076, 0x65656E,
0x656572, 0x656E20, 0x657220, 0x657273, 0x657420, 0x67656E, 0x686574, 0x696520, 0x696E20, 0x696E67, 0x697320, 0x6E2062, 0x6E2064, 0x6E2065, 0x6E2068, 0x6E206F,
0x6E2076, 0x6E6465, 0x6E6720, 0x6F6E64, 0x6F6F72, 0x6F7020, 0x6F7220, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x76616E, 0x766572, 0x766F6F,
}
var ngrams_8859_1_no = [64]uint32{
0x206174, 0x206176, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207365, 0x20736B, 0x20736F, 0x207374, 0x207469,
0x207669, 0x20E520, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646574, 0x652073, 0x656420, 0x656E20, 0x656E65, 0x657220, 0x657265, 0x657420, 0x657474,
0x666F72, 0x67656E, 0x696B6B, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6520, 0x6C6C65, 0x6D6564, 0x6D656E, 0x6E2073, 0x6E6520, 0x6E6720, 0x6E6765, 0x6E6E65,
0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722073, 0x726520, 0x736F6D, 0x737465, 0x742073, 0x746520, 0x74656E, 0x746572, 0x74696C, 0x747420, 0x747465, 0x766572,
}
var ngrams_8859_1_pt = [64]uint32{
0x206120, 0x20636F, 0x206461, 0x206465, 0x20646F, 0x206520, 0x206573, 0x206D61, 0x206E6F, 0x206F20, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
0x20756D, 0x612061, 0x612063, 0x612064, 0x612070, 0x616465, 0x61646F, 0x616C20, 0x617220, 0x617261, 0x617320, 0x636F6D, 0x636F6E, 0x646120, 0x646520, 0x646F20,
0x646F73, 0x652061, 0x652064, 0x656D20, 0x656E74, 0x657320, 0x657374, 0x696120, 0x696361, 0x6D656E, 0x6E7465, 0x6E746F, 0x6F2061, 0x6F2063, 0x6F2064, 0x6F2065,
0x6F2070, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732061, 0x732064, 0x732065, 0x732070, 0x737461, 0x746520, 0x746F20, 0x756520, 0xE36F20, 0xE7E36F,
}
var ngrams_8859_1_sv = [64]uint32{
0x206174, 0x206176, 0x206465, 0x20656E, 0x2066F6, 0x206861, 0x206920, 0x20696E, 0x206B6F, 0x206D65, 0x206F63, 0x2070E5, 0x20736B, 0x20736F, 0x207374, 0x207469,
0x207661, 0x207669, 0x20E472, 0x616465, 0x616E20, 0x616E64, 0x617220, 0x617474, 0x636820, 0x646520, 0x64656E, 0x646572, 0x646574, 0x656420, 0x656E20, 0x657220,
0x657420, 0x66F672, 0x67656E, 0x696C6C, 0x696E67, 0x6B6120, 0x6C6C20, 0x6D6564, 0x6E2073, 0x6E6120, 0x6E6465, 0x6E6720, 0x6E6765, 0x6E696E, 0x6F6368, 0x6F6D20,
0x6F6E20, 0x70E520, 0x722061, 0x722073, 0x726120, 0x736B61, 0x736F6D, 0x742073, 0x746120, 0x746520, 0x746572, 0x74696C, 0x747420, 0x766172, 0xE47220, 0xF67220,
}
func newRecognizer_8859_1(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-1",
hasC1ByteCharset: "windows-1252",
language: language,
charMap: &charMap_8859_1,
ngram: ngram,
}
}
func newRecognizer_8859_1_en() *recognizerSingleByte {
return newRecognizer_8859_1("en", &ngrams_8859_1_en)
}
func newRecognizer_8859_1_da() *recognizerSingleByte {
return newRecognizer_8859_1("da", &ngrams_8859_1_da)
}
func newRecognizer_8859_1_de() *recognizerSingleByte {
return newRecognizer_8859_1("de", &ngrams_8859_1_de)
}
func newRecognizer_8859_1_es() *recognizerSingleByte {
return newRecognizer_8859_1("es", &ngrams_8859_1_es)
}
func newRecognizer_8859_1_fr() *recognizerSingleByte {
return newRecognizer_8859_1("fr", &ngrams_8859_1_fr)
}
func newRecognizer_8859_1_it() *recognizerSingleByte {
return newRecognizer_8859_1("it", &ngrams_8859_1_it)
}
func newRecognizer_8859_1_nl() *recognizerSingleByte {
return newRecognizer_8859_1("nl", &ngrams_8859_1_nl)
}
func newRecognizer_8859_1_no() *recognizerSingleByte {
return newRecognizer_8859_1("no", &ngrams_8859_1_no)
}
func newRecognizer_8859_1_pt() *recognizerSingleByte {
return newRecognizer_8859_1("pt", &ngrams_8859_1_pt)
}
func newRecognizer_8859_1_sv() *recognizerSingleByte {
return newRecognizer_8859_1("sv", &ngrams_8859_1_sv)
}
var charMap_8859_2 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0x20,
0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0xB7,
0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
}
var ngrams_8859_2_cs = [64]uint32{
0x206120, 0x206279, 0x20646F, 0x206A65, 0x206E61, 0x206E65, 0x206F20, 0x206F64, 0x20706F, 0x207072, 0x2070F8, 0x20726F, 0x207365, 0x20736F, 0x207374, 0x20746F,
0x207620, 0x207679, 0x207A61, 0x612070, 0x636520, 0x636820, 0x652070, 0x652073, 0x652076, 0x656D20, 0x656EED, 0x686F20, 0x686F64, 0x697374, 0x6A6520, 0x6B7465,
0x6C6520, 0x6C6920, 0x6E6120, 0x6EE920, 0x6EEC20, 0x6EED20, 0x6F2070, 0x6F646E, 0x6F6A69, 0x6F7374, 0x6F7520, 0x6F7661, 0x706F64, 0x706F6A, 0x70726F, 0x70F865,
0x736520, 0x736F75, 0x737461, 0x737469, 0x73746E, 0x746572, 0x746EED, 0x746F20, 0x752070, 0xBE6520, 0xE16EED, 0xE9686F, 0xED2070, 0xED2073, 0xED6D20, 0xF86564,
}
var ngrams_8859_2_hu = [64]uint32{
0x206120, 0x20617A, 0x206265, 0x206567, 0x20656C, 0x206665, 0x206861, 0x20686F, 0x206973, 0x206B65, 0x206B69, 0x206BF6, 0x206C65, 0x206D61, 0x206D65, 0x206D69,
0x206E65, 0x20737A, 0x207465, 0x20E973, 0x612061, 0x61206B, 0x61206D, 0x612073, 0x616B20, 0x616E20, 0x617A20, 0x62616E, 0x62656E, 0x656779, 0x656B20, 0x656C20,
0x656C65, 0x656D20, 0x656E20, 0x657265, 0x657420, 0x657465, 0x657474, 0x677920, 0x686F67, 0x696E74, 0x697320, 0x6B2061, 0x6BF67A, 0x6D6567, 0x6D696E, 0x6E2061,
0x6E616B, 0x6E656B, 0x6E656D, 0x6E7420, 0x6F6779, 0x732061, 0x737A65, 0x737A74, 0x737AE1, 0x73E967, 0x742061, 0x747420, 0x74E173, 0x7A6572, 0xE16E20, 0xE97320,
}
var ngrams_8859_2_pl = [64]uint32{
0x20637A, 0x20646F, 0x206920, 0x206A65, 0x206B6F, 0x206D61, 0x206D69, 0x206E61, 0x206E69, 0x206F64, 0x20706F, 0x207072, 0x207369, 0x207720, 0x207769, 0x207779,
0x207A20, 0x207A61, 0x612070, 0x612077, 0x616E69, 0x636820, 0x637A65, 0x637A79, 0x646F20, 0x647A69, 0x652070, 0x652073, 0x652077, 0x65207A, 0x65676F, 0x656A20,
0x656D20, 0x656E69, 0x676F20, 0x696120, 0x696520, 0x69656A, 0x6B6120, 0x6B6920, 0x6B6965, 0x6D6965, 0x6E6120, 0x6E6961, 0x6E6965, 0x6F2070, 0x6F7761, 0x6F7769,
0x706F6C, 0x707261, 0x70726F, 0x70727A, 0x727A65, 0x727A79, 0x7369EA, 0x736B69, 0x737461, 0x776965, 0x796368, 0x796D20, 0x7A6520, 0x7A6965, 0x7A7920, 0xF37720,
}
var ngrams_8859_2_ro = [64]uint32{
0x206120, 0x206163, 0x206361, 0x206365, 0x20636F, 0x206375, 0x206465, 0x206469, 0x206C61, 0x206D61, 0x207065, 0x207072, 0x207365, 0x2073E3, 0x20756E, 0x20BA69,
0x20EE6E, 0x612063, 0x612064, 0x617265, 0x617420, 0x617465, 0x617520, 0x636172, 0x636F6E, 0x637520, 0x63E320, 0x646520, 0x652061, 0x652063, 0x652064, 0x652070,
0x652073, 0x656120, 0x656920, 0x656C65, 0x656E74, 0x657374, 0x692061, 0x692063, 0x692064, 0x692070, 0x696520, 0x696920, 0x696E20, 0x6C6120, 0x6C6520, 0x6C6F72,
0x6C7569, 0x6E6520, 0x6E7472, 0x6F7220, 0x70656E, 0x726520, 0x726561, 0x727520, 0x73E320, 0x746520, 0x747275, 0x74E320, 0x756920, 0x756C20, 0xBA6920, 0xEE6E20,
}
func newRecognizer_8859_2(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-2",
hasC1ByteCharset: "windows-1250",
language: language,
charMap: &charMap_8859_2,
ngram: ngram,
}
}
func newRecognizer_8859_2_cs() *recognizerSingleByte {
return newRecognizer_8859_1("cs", &ngrams_8859_2_cs)
}
func newRecognizer_8859_2_hu() *recognizerSingleByte {
return newRecognizer_8859_1("hu", &ngrams_8859_2_hu)
}
func newRecognizer_8859_2_pl() *recognizerSingleByte {
return newRecognizer_8859_1("pl", &ngrams_8859_2_pl)
}
func newRecognizer_8859_2_ro() *recognizerSingleByte {
return newRecognizer_8859_1("ro", &ngrams_8859_2_ro)
}
var charMap_8859_5 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
}
var ngrams_8859_5_ru = [64]uint32{
0x20D220, 0x20D2DE, 0x20D4DE, 0x20D7D0, 0x20D820, 0x20DAD0, 0x20DADE, 0x20DDD0, 0x20DDD5, 0x20DED1, 0x20DFDE, 0x20DFE0, 0x20E0D0, 0x20E1DE, 0x20E1E2, 0x20E2DE,
0x20E7E2, 0x20EDE2, 0xD0DDD8, 0xD0E2EC, 0xD3DE20, 0xD5DBEC, 0xD5DDD8, 0xD5E1E2, 0xD5E220, 0xD820DF, 0xD8D520, 0xD8D820, 0xD8EF20, 0xDBD5DD, 0xDBD820, 0xDBECDD,
0xDDD020, 0xDDD520, 0xDDD8D5, 0xDDD8EF, 0xDDDE20, 0xDDDED2, 0xDE20D2, 0xDE20DF, 0xDE20E1, 0xDED220, 0xDED2D0, 0xDED3DE, 0xDED920, 0xDEDBEC, 0xDEDC20, 0xDEE1E2,
0xDFDEDB, 0xDFE0D5, 0xDFE0D8, 0xDFE0DE, 0xE0D0D2, 0xE0D5D4, 0xE1E2D0, 0xE1E2D2, 0xE1E2D8, 0xE1EF20, 0xE2D5DB, 0xE2DE20, 0xE2DEE0, 0xE2EC20, 0xE7E2DE, 0xEBE520,
}
func newRecognizer_8859_5(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-5",
language: language,
charMap: &charMap_8859_5,
ngram: ngram,
}
}
func newRecognizer_8859_5_ru() *recognizerSingleByte {
return newRecognizer_8859_5("ru", &ngrams_8859_5_ru)
}
var charMap_8859_6 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
}
var ngrams_8859_6_ar = [64]uint32{
0x20C7E4, 0x20C7E6, 0x20C8C7, 0x20D9E4, 0x20E1EA, 0x20E4E4, 0x20E5E6, 0x20E8C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E420, 0xC7E4C3, 0xC7E4C7, 0xC7E4C8,
0xC7E4CA, 0xC7E4CC, 0xC7E4CD, 0xC7E4CF, 0xC7E4D3, 0xC7E4D9, 0xC7E4E2, 0xC7E4E5, 0xC7E4E8, 0xC7E4EA, 0xC7E520, 0xC7E620, 0xC7E6CA, 0xC820C7, 0xC920C7, 0xC920E1,
0xC920E4, 0xC920E5, 0xC920E8, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xD920C7, 0xD9E4E9, 0xE1EA20, 0xE420C7, 0xE4C920, 0xE4E920, 0xE4EA20,
0xE520C7, 0xE5C720, 0xE5C920, 0xE5E620, 0xE620C7, 0xE720C7, 0xE7C720, 0xE8C7E4, 0xE8E620, 0xE920C7, 0xEA20C7, 0xEA20E5, 0xEA20E8, 0xEAC920, 0xEAD120, 0xEAE620,
}
func newRecognizer_8859_6(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-6",
language: language,
charMap: &charMap_8859_6,
ngram: ngram,
}
}
func newRecognizer_8859_6_ar() *recognizerSingleByte {
return newRecognizer_8859_6("ar", &ngrams_8859_6_ar)
}
var charMap_8859_7 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0xA1, 0xA2, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xDC, 0x20,
0xDD, 0xDE, 0xDF, 0x20, 0xFC, 0x20, 0xFD, 0xFE,
0xC0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0x20, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
}
var ngrams_8859_7_el = [64]uint32{
0x20E1ED, 0x20E1F0, 0x20E3E9, 0x20E4E9, 0x20E5F0, 0x20E720, 0x20EAE1, 0x20ECE5, 0x20EDE1, 0x20EF20, 0x20F0E1, 0x20F0EF, 0x20F0F1, 0x20F3F4, 0x20F3F5, 0x20F4E7,
0x20F4EF, 0xDFE120, 0xE120E1, 0xE120F4, 0xE1E920, 0xE1ED20, 0xE1F0FC, 0xE1F220, 0xE3E9E1, 0xE5E920, 0xE5F220, 0xE720F4, 0xE7ED20, 0xE7F220, 0xE920F4, 0xE9E120,
0xE9EADE, 0xE9F220, 0xEAE1E9, 0xEAE1F4, 0xECE520, 0xED20E1, 0xED20E5, 0xED20F0, 0xEDE120, 0xEFF220, 0xEFF520, 0xF0EFF5, 0xF0F1EF, 0xF0FC20, 0xF220E1, 0xF220E5,
0xF220EA, 0xF220F0, 0xF220F4, 0xF3E520, 0xF3E720, 0xF3F4EF, 0xF4E120, 0xF4E1E9, 0xF4E7ED, 0xF4E7F2, 0xF4E9EA, 0xF4EF20, 0xF4EFF5, 0xF4F9ED, 0xF9ED20, 0xFEED20,
}
func newRecognizer_8859_7(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-7",
hasC1ByteCharset: "windows-1253",
language: language,
charMap: &charMap_8859_7,
ngram: ngram,
}
}
func newRecognizer_8859_7_el() *recognizerSingleByte {
return newRecognizer_8859_7("el", &ngrams_8859_7_el)
}
var charMap_8859_8 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0x20, 0x20, 0x20, 0x20, 0x20,
}
var ngrams_8859_8_I_he = [64]uint32{
0x20E0E5, 0x20E0E7, 0x20E0E9, 0x20E0FA, 0x20E1E9, 0x20E1EE, 0x20E4E0, 0x20E4E5, 0x20E4E9, 0x20E4EE, 0x20E4F2, 0x20E4F9, 0x20E4FA, 0x20ECE0, 0x20ECE4, 0x20EEE0,
0x20F2EC, 0x20F9EC, 0xE0FA20, 0xE420E0, 0xE420E1, 0xE420E4, 0xE420EC, 0xE420EE, 0xE420F9, 0xE4E5E0, 0xE5E020, 0xE5ED20, 0xE5EF20, 0xE5F820, 0xE5FA20, 0xE920E4,
0xE9E420, 0xE9E5FA, 0xE9E9ED, 0xE9ED20, 0xE9EF20, 0xE9F820, 0xE9FA20, 0xEC20E0, 0xEC20E4, 0xECE020, 0xECE420, 0xED20E0, 0xED20E1, 0xED20E4, 0xED20EC, 0xED20EE,
0xED20F9, 0xEEE420, 0xEF20E4, 0xF0E420, 0xF0E920, 0xF0E9ED, 0xF2EC20, 0xF820E4, 0xF8E9ED, 0xF9EC20, 0xFA20E0, 0xFA20E1, 0xFA20E4, 0xFA20EC, 0xFA20EE, 0xFA20F9,
}
var ngrams_8859_8_he = [64]uint32{
0x20E0E5, 0x20E0EC, 0x20E4E9, 0x20E4EC, 0x20E4EE, 0x20E4F0, 0x20E9F0, 0x20ECF2, 0x20ECF9, 0x20EDE5, 0x20EDE9, 0x20EFE5, 0x20EFE9, 0x20F8E5, 0x20F8E9, 0x20FAE0,
0x20FAE5, 0x20FAE9, 0xE020E4, 0xE020EC, 0xE020ED, 0xE020FA, 0xE0E420, 0xE0E5E4, 0xE0EC20, 0xE0EE20, 0xE120E4, 0xE120ED, 0xE120FA, 0xE420E4, 0xE420E9, 0xE420EC,
0xE420ED, 0xE420EF, 0xE420F8, 0xE420FA, 0xE4EC20, 0xE5E020, 0xE5E420, 0xE7E020, 0xE9E020, 0xE9E120, 0xE9E420, 0xEC20E4, 0xEC20ED, 0xEC20FA, 0xECF220, 0xECF920,
0xEDE9E9, 0xEDE9F0, 0xEDE9F8, 0xEE20E4, 0xEE20ED, 0xEE20FA, 0xEEE120, 0xEEE420, 0xF2E420, 0xF920E4, 0xF920ED, 0xF920FA, 0xF9E420, 0xFAE020, 0xFAE420, 0xFAE5E9,
}
func newRecognizer_8859_8(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-8",
hasC1ByteCharset: "windows-1255",
language: language,
charMap: &charMap_8859_8,
ngram: ngram,
}
}
func newRecognizer_8859_8_I_he() *recognizerSingleByte {
r := newRecognizer_8859_8("he", &ngrams_8859_8_I_he)
r.charset = "ISO-8859-8-I"
return r
}
func newRecognizer_8859_8_he() *recognizerSingleByte {
return newRecognizer_8859_8("he", &ngrams_8859_8_he)
}
var charMap_8859_9 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x69, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_8859_9_tr = [64]uint32{
0x206261, 0x206269, 0x206275, 0x206461, 0x206465, 0x206765, 0x206861, 0x20696C, 0x206B61, 0x206B6F, 0x206D61, 0x206F6C, 0x207361, 0x207461, 0x207665, 0x207961,
0x612062, 0x616B20, 0x616C61, 0x616D61, 0x616E20, 0x616EFD, 0x617220, 0x617261, 0x6172FD, 0x6173FD, 0x617961, 0x626972, 0x646120, 0x646520, 0x646920, 0x652062,
0x65206B, 0x656469, 0x656E20, 0x657220, 0x657269, 0x657369, 0x696C65, 0x696E20, 0x696E69, 0x697220, 0x6C616E, 0x6C6172, 0x6C6520, 0x6C6572, 0x6E2061, 0x6E2062,
0x6E206B, 0x6E6461, 0x6E6465, 0x6E6520, 0x6E6920, 0x6E696E, 0x6EFD20, 0x72696E, 0x72FD6E, 0x766520, 0x796120, 0x796F72, 0xFD6E20, 0xFD6E64, 0xFD6EFD, 0xFDF0FD,
}
func newRecognizer_8859_9(language string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: "ISO-8859-9",
hasC1ByteCharset: "windows-1254",
language: language,
charMap: &charMap_8859_9,
ngram: ngram,
}
}
func newRecognizer_8859_9_tr() *recognizerSingleByte {
return newRecognizer_8859_9("tr", &ngrams_8859_9_tr)
}
var charMap_windows_1256 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x81, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
0x88, 0x20, 0x8A, 0x20, 0x9C, 0x8D, 0x8E, 0x8F,
0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x98, 0x20, 0x9A, 0x20, 0x9C, 0x20, 0x20, 0x9F,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0x20,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0x20, 0x20, 0x20, 0x20, 0xF4, 0x20, 0x20, 0x20,
0x20, 0xF9, 0x20, 0xFB, 0xFC, 0x20, 0x20, 0xFF,
}
var ngrams_windows_1256 = [64]uint32{
0x20C7E1, 0x20C7E4, 0x20C8C7, 0x20DAE1, 0x20DDED, 0x20E1E1, 0x20E3E4, 0x20E6C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E120, 0xC7E1C3, 0xC7E1C7, 0xC7E1C8,
0xC7E1CA, 0xC7E1CC, 0xC7E1CD, 0xC7E1CF, 0xC7E1D3, 0xC7E1DA, 0xC7E1DE, 0xC7E1E3, 0xC7E1E6, 0xC7E1ED, 0xC7E320, 0xC7E420, 0xC7E4CA, 0xC820C7, 0xC920C7, 0xC920DD,
0xC920E1, 0xC920E3, 0xC920E6, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xDA20C7, 0xDAE1EC, 0xDDED20, 0xE120C7, 0xE1C920, 0xE1EC20, 0xE1ED20,
0xE320C7, 0xE3C720, 0xE3C920, 0xE3E420, 0xE420C7, 0xE520C7, 0xE5C720, 0xE6C7E1, 0xE6E420, 0xEC20C7, 0xED20C7, 0xED20E3, 0xED20E6, 0xEDC920, 0xEDD120, 0xEDE420,
}
func newRecognizer_windows_1256() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "windows-1256",
language: "ar",
charMap: &charMap_windows_1256,
ngram: &ngrams_windows_1256,
}
}
var charMap_windows_1251 = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x90, 0x83, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
0x20, 0xA2, 0xA2, 0xBC, 0x20, 0xB4, 0x20, 0x20,
0xB8, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0xBF,
0x20, 0x20, 0xB3, 0xB3, 0xB4, 0xB5, 0x20, 0x20,
0xB8, 0x20, 0xBA, 0x20, 0xBC, 0xBE, 0xBE, 0xBF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
}
var ngrams_windows_1251 = [64]uint32{
0x20E220, 0x20E2EE, 0x20E4EE, 0x20E7E0, 0x20E820, 0x20EAE0, 0x20EAEE, 0x20EDE0, 0x20EDE5, 0x20EEE1, 0x20EFEE, 0x20EFF0, 0x20F0E0, 0x20F1EE, 0x20F1F2, 0x20F2EE,
0x20F7F2, 0x20FDF2, 0xE0EDE8, 0xE0F2FC, 0xE3EE20, 0xE5EBFC, 0xE5EDE8, 0xE5F1F2, 0xE5F220, 0xE820EF, 0xE8E520, 0xE8E820, 0xE8FF20, 0xEBE5ED, 0xEBE820, 0xEBFCED,
0xEDE020, 0xEDE520, 0xEDE8E5, 0xEDE8FF, 0xEDEE20, 0xEDEEE2, 0xEE20E2, 0xEE20EF, 0xEE20F1, 0xEEE220, 0xEEE2E0, 0xEEE3EE, 0xEEE920, 0xEEEBFC, 0xEEEC20, 0xEEF1F2,
0xEFEEEB, 0xEFF0E5, 0xEFF0E8, 0xEFF0EE, 0xF0E0E2, 0xF0E5E4, 0xF1F2E0, 0xF1F2E2, 0xF1F2E8, 0xF1FF20, 0xF2E5EB, 0xF2EE20, 0xF2EEF0, 0xF2FC20, 0xF7F2EE, 0xFBF520,
}
func newRecognizer_windows_1251() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "windows-1251",
language: "ar",
charMap: &charMap_windows_1251,
ngram: &ngrams_windows_1251,
}
}
var charMap_KOI8_R = [256]byte{
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
}
var ngrams_KOI8_R = [64]uint32{
0x20C4CF, 0x20C920, 0x20CBC1, 0x20CBCF, 0x20CEC1, 0x20CEC5, 0x20CFC2, 0x20D0CF, 0x20D0D2, 0x20D2C1, 0x20D3CF, 0x20D3D4, 0x20D4CF, 0x20D720, 0x20D7CF, 0x20DAC1,
0x20DCD4, 0x20DED4, 0xC1CEC9, 0xC1D4D8, 0xC5CCD8, 0xC5CEC9, 0xC5D3D4, 0xC5D420, 0xC7CF20, 0xC920D0, 0xC9C520, 0xC9C920, 0xC9D120, 0xCCC5CE, 0xCCC920, 0xCCD8CE,
0xCEC120, 0xCEC520, 0xCEC9C5, 0xCEC9D1, 0xCECF20, 0xCECFD7, 0xCF20D0, 0xCF20D3, 0xCF20D7, 0xCFC7CF, 0xCFCA20, 0xCFCCD8, 0xCFCD20, 0xCFD3D4, 0xCFD720, 0xCFD7C1,
0xD0CFCC, 0xD0D2C5, 0xD0D2C9, 0xD0D2CF, 0xD2C1D7, 0xD2C5C4, 0xD3D120, 0xD3D4C1, 0xD3D4C9, 0xD3D4D7, 0xD4C5CC, 0xD4CF20, 0xD4CFD2, 0xD4D820, 0xD9C820, 0xDED4CF,
}
func newRecognizer_KOI8_R() *recognizerSingleByte {
return &recognizerSingleByte{
charset: "KOI8-R",
language: "ru",
charMap: &charMap_KOI8_R,
ngram: &ngrams_KOI8_R,
}
}
var charMap_IBM424_he = [256]byte{
/* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
/* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 4- */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 5- */ 0x40, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 7- */ 0x40, 0x71, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x40, 0x40,
/* 8- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 9- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* B- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
}
var ngrams_IBM424_he_rtl = [64]uint32{
0x404146, 0x404148, 0x404151, 0x404171, 0x404251, 0x404256, 0x404541, 0x404546, 0x404551, 0x404556, 0x404562, 0x404569, 0x404571, 0x405441, 0x405445, 0x405641,
0x406254, 0x406954, 0x417140, 0x454041, 0x454042, 0x454045, 0x454054, 0x454056, 0x454069, 0x454641, 0x464140, 0x465540, 0x465740, 0x466840, 0x467140, 0x514045,
0x514540, 0x514671, 0x515155, 0x515540, 0x515740, 0x516840, 0x517140, 0x544041, 0x544045, 0x544140, 0x544540, 0x554041, 0x554042, 0x554045, 0x554054, 0x554056,
0x554069, 0x564540, 0x574045, 0x584540, 0x585140, 0x585155, 0x625440, 0x684045, 0x685155, 0x695440, 0x714041, 0x714042, 0x714045, 0x714054, 0x714056, 0x714069,
}
var ngrams_IBM424_he_ltr = [64]uint32{
0x404146, 0x404154, 0x404551, 0x404554, 0x404556, 0x404558, 0x405158, 0x405462, 0x405469, 0x405546, 0x405551, 0x405746, 0x405751, 0x406846, 0x406851, 0x407141,
0x407146, 0x407151, 0x414045, 0x414054, 0x414055, 0x414071, 0x414540, 0x414645, 0x415440, 0x415640, 0x424045, 0x424055, 0x424071, 0x454045, 0x454051, 0x454054,
0x454055, 0x454057, 0x454068, 0x454071, 0x455440, 0x464140, 0x464540, 0x484140, 0x514140, 0x514240, 0x514540, 0x544045, 0x544055, 0x544071, 0x546240, 0x546940,
0x555151, 0x555158, 0x555168, 0x564045, 0x564055, 0x564071, 0x564240, 0x564540, 0x624540, 0x694045, 0x694055, 0x694071, 0x694540, 0x714140, 0x714540, 0x714651,
}
func newRecognizer_IBM424_he(charset string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: charset,
language: "he",
charMap: &charMap_IBM424_he,
ngram: ngram,
}
}
func newRecognizer_IBM424_he_rtl() *recognizerSingleByte {
return newRecognizer_IBM424_he("IBM424_rtl", &ngrams_IBM424_he_rtl)
}
func newRecognizer_IBM424_he_ltr() *recognizerSingleByte {
return newRecognizer_IBM424_he("IBM424_ltr", &ngrams_IBM424_he_ltr)
}
var charMap_IBM420_ar = [256]byte{
/* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
/* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 4- */ 0x40, 0x40, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 5- */ 0x40, 0x51, 0x52, 0x40, 0x40, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 7- */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
/* 8- */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
/* 9- */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
/* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
/* B- */ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0x40, 0x40, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
/* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0xCB, 0x40, 0xCD, 0x40, 0xCF,
/* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
/* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xEA, 0xEB, 0x40, 0xED, 0xEE, 0xEF,
/* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xFB, 0xFC, 0xFD, 0xFE, 0x40,
}
var ngrams_IBM420_ar_rtl = [64]uint32{
0x4056B1, 0x4056BD, 0x405856, 0x409AB1, 0x40ABDC, 0x40B1B1, 0x40BBBD, 0x40CF56, 0x564056, 0x564640, 0x566340, 0x567540, 0x56B140, 0x56B149, 0x56B156, 0x56B158,
0x56B163, 0x56B167, 0x56B169, 0x56B173, 0x56B178, 0x56B19A, 0x56B1AD, 0x56B1BB, 0x56B1CF, 0x56B1DC, 0x56BB40, 0x56BD40, 0x56BD63, 0x584056, 0x624056, 0x6240AB,
0x6240B1, 0x6240BB, 0x6240CF, 0x634056, 0x734056, 0x736240, 0x754056, 0x756240, 0x784056, 0x9A4056, 0x9AB1DA, 0xABDC40, 0xB14056, 0xB16240, 0xB1DA40, 0xB1DC40,
0xBB4056, 0xBB5640, 0xBB6240, 0xBBBD40, 0xBD4056, 0xBF4056, 0xBF5640, 0xCF56B1, 0xCFBD40, 0xDA4056, 0xDC4056, 0xDC40BB, 0xDC40CF, 0xDC6240, 0xDC7540, 0xDCBD40,
}
var ngrams_IBM420_ar_ltr = [64]uint32{
0x404656, 0x4056BB, 0x4056BF, 0x406273, 0x406275, 0x4062B1, 0x4062BB, 0x4062DC, 0x406356, 0x407556, 0x4075DC, 0x40B156, 0x40BB56, 0x40BD56, 0x40BDBB, 0x40BDCF,
0x40BDDC, 0x40DAB1, 0x40DCAB, 0x40DCB1, 0x49B156, 0x564056, 0x564058, 0x564062, 0x564063, 0x564073, 0x564075, 0x564078, 0x56409A, 0x5640B1, 0x5640BB, 0x5640BD,
0x5640BF, 0x5640DA, 0x5640DC, 0x565840, 0x56B156, 0x56CF40, 0x58B156, 0x63B156, 0x63BD56, 0x67B156, 0x69B156, 0x73B156, 0x78B156, 0x9AB156, 0xAB4062, 0xADB156,
0xB14062, 0xB15640, 0xB156CF, 0xB19A40, 0xB1B140, 0xBB4062, 0xBB40DC, 0xBBB156, 0xBD5640, 0xBDBB40, 0xCF4062, 0xCF40DC, 0xCFB156, 0xDAB19A, 0xDCAB40, 0xDCB156,
}
func newRecognizer_IBM420_ar(charset string, ngram *[64]uint32) *recognizerSingleByte {
return &recognizerSingleByte{
charset: charset,
language: "ar",
charMap: &charMap_IBM420_ar,
ngram: ngram,
}
}
func newRecognizer_IBM420_ar_rtl() *recognizerSingleByte {
return newRecognizer_IBM420_ar("IBM420_rtl", &ngrams_IBM420_ar_rtl)
}
func newRecognizer_IBM420_ar_ltr() *recognizerSingleByte {
return newRecognizer_IBM420_ar("IBM420_ltr", &ngrams_IBM420_ar_ltr)
}
|
fix GPO missing flags
|
package outbox
import (
"github.com/cloudfoundry-incubator/runtime-schema/bbs"
steno "github.com/cloudfoundry/gosteno"
"github.com/cloudfoundry/yagnats"
)
func Listen(bbs bbs.StagerBBS, natsClient yagnats.NATSClient, logger *steno.Logger) {
for {
runOnces, _, errs := bbs.WatchForCompletedRunOnce()
dance:
for {
select {
case runOnce := <-runOnces:
err := bbs.ResolveRunOnce(runOnce)
if err == nil {
natsClient.Publish(runOnce.ReplyTo, []byte("{}"))
}
case err := <-errs:
logger.Warnf("error watching for completions: %s\n", err)
break dance
}
}
}
}
improve runonce logging
package outbox
import (
"github.com/cloudfoundry-incubator/runtime-schema/bbs"
steno "github.com/cloudfoundry/gosteno"
"github.com/cloudfoundry/yagnats"
)
func Listen(bbs bbs.StagerBBS, natsClient yagnats.NATSClient, logger *steno.Logger) {
for {
runOnces, _, errs := bbs.WatchForCompletedRunOnce()
dance:
for {
select {
case runOnce := <-runOnces:
logger.Infod(map[string]interface{}{
"guid": runOnce.Guid,
}, "stager.resolve.runonce")
err := bbs.ResolveRunOnce(runOnce)
if err == nil {
natsClient.Publish(runOnce.ReplyTo, []byte("{}"))
logger.Infod(map[string]interface{}{
"guid": runOnce.Guid,
"reply-to": runOnce.ReplyTo,
}, "stager.resolve.runonce.success")
} else {
logger.Errord(map[string]interface{}{
"guid": runOnce.Guid,
"error": err.Error(),
}, "stager.resolve.runonce.failed")
}
case err := <-errs:
logger.Warnf("error watching for completions: %s\n", err)
break dance
}
}
}
}
|
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build ignore
package main
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"text/template"
"time"
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
noupgrade bool
version string
goVersion float64
race bool
)
type target struct {
name string
buildPkg string
binaryName string
archiveFiles []archiveFile
debianFiles []archiveFile
tags []string
}
type archiveFile struct {
src string
dst string
perm os.FileMode
}
var targets = map[string]target{
"all": {
// Only valid for the "build" and "install" commands as it lacks all
// the archive creation stuff.
buildPkg: "./cmd/...",
},
"syncthing": {
// The default target for "build", "install", "tar", "zip", "deb", etc.
name: "syncthing",
buildPkg: "./cmd/syncthing",
binaryName: "syncthing", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "README.md", dst: "README.txt", perm: 0644},
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
// All files from etc/ and extra/ added automatically in init().
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
},
},
"discosrv": {
name: "discosrv",
buildPkg: "./cmd/discosrv",
binaryName: "discosrv", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "cmd/discosrv/README.md", dst: "README.txt", perm: 0644},
{src: "cmd/discosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/discosrv/README.md", dst: "deb/usr/share/doc/discosrv/README.txt", perm: 0644},
{src: "cmd/discosrv/LICENSE", dst: "deb/usr/share/doc/discosrv/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/discosrv/AUTHORS.txt", perm: 0644},
},
tags: []string{"purego"},
},
"relaysrv": {
name: "relaysrv",
buildPkg: "./cmd/relaysrv",
binaryName: "relaysrv", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "cmd/relaysrv/README.md", dst: "README.txt", perm: 0644},
{src: "cmd/relaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/relaysrv/README.md", dst: "deb/usr/share/doc/relaysrv/README.txt", perm: 0644},
{src: "cmd/relaysrv/LICENSE", dst: "deb/usr/share/doc/relaysrv/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/relaysrv/AUTHORS.txt", perm: 0644},
},
},
}
func init() {
// The "syncthing" target includes a few more files found in the "etc"
// and "extra" dirs.
syncthingPkg := targets["syncthing"]
for _, file := range listFiles("etc") {
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
}
for _, file := range listFiles("extra") {
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
}
for _, file := range listFiles("extra") {
syncthingPkg.debianFiles = append(syncthingPkg.debianFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
}
targets["syncthing"] = syncthingPkg
}
const minGoVersion = 1.3
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
if os.Getenv("GOPATH") == "" {
setGoPath()
}
// We use Go 1.5+ vendoring.
os.Setenv("GO15VENDOREXPERIMENT", "1")
// Set path to $GOPATH/bin:$PATH so that we can for sure find tools we
// might have installed during "build.go setup".
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
parseFlags()
switch goarch {
case "386", "amd64", "arm", "arm64", "ppc64", "ppc64le":
break
default:
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
}
goVersion, _ = checkRequiredGoVersion()
// Invoking build.go with no parameters at all is equivalent to "go run
// build.go install all" as that builds everything (incrementally),
// which is what you want for maximum error checking during development.
if flag.NArg() == 0 {
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
install(targets["all"], tags)
vet("cmd", "lib")
lint("./cmd/...")
lint("./lib/...")
return
}
// Otherwise, with any command given but not a target, the target is
// "syncthing". So "go run build.go install" is "go run build.go install
// syncthing" etc.
targetName := "syncthing"
if flag.NArg() > 1 {
targetName = flag.Arg(1)
}
target, ok := targets[targetName]
if !ok {
log.Fatalln("Unknown target", target)
}
cmd := flag.Arg(0)
switch cmd {
case "setup":
setup()
case "install":
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
install(target, tags)
case "build":
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
build(target, tags)
case "test":
test("./lib/...", "./cmd/...")
case "bench":
bench("./lib/...", "./cmd/...")
case "assets":
rebuildAssets()
case "xdr":
xdr()
case "translate":
translate()
case "transifex":
transifex()
case "tar":
buildTar(target)
case "zip":
buildZip(target)
case "deb":
buildDeb(target)
case "clean":
clean()
case "vet":
vet("build.go")
vet("cmd", "lib")
case "lint":
lint(".")
lint("./cmd/...")
lint("./lib/...")
if isGometalinterInstalled() {
dirs := []string{".", "./cmd/...", "./lib/..."}
gometalinter("deadcode", dirs, "test/util.go")
gometalinter("structcheck", dirs)
gometalinter("varcheck", dirs)
}
default:
log.Fatalf("Unknown command %q", cmd)
}
}
// setGoPath sets GOPATH correctly with the assumption that we are
// in $GOPATH/src/github.com/syncthing/syncthing.
func setGoPath() {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
func parseFlags() {
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.BoolVar(&noupgrade, "no-upgrade", noupgrade, "Disable upgrade functionality")
flag.StringVar(&version, "version", getVersion(), "Set compiled in version string")
flag.BoolVar(&race, "race", race, "Use race detector")
flag.Parse()
}
func checkRequiredGoVersion() (float64, bool) {
re := regexp.MustCompile(`go(\d+\.\d+)`)
ver := runtime.Version()
if m := re.FindStringSubmatch(ver); len(m) == 2 {
vs := string(m[1])
// This is a standard go build. Verify that it's new enough.
f, err := strconv.ParseFloat(vs, 64)
if err != nil {
log.Printf("*** Couldn't parse Go version out of %q.\n*** This isn't known to work, proceed on your own risk.", vs)
return 0, false
}
if f < 1.5 {
log.Printf("*** Go version %.01f doesn't support the vendoring mechanism.\n*** Ensure correct dependencies in your $GOPATH.", f)
} else if f < minGoVersion {
log.Fatalf("*** Go version %.01f is less than required %.01f.\n*** This is known not to work, not proceeding.", f, minGoVersion)
}
return f, true
}
log.Printf("*** Unknown Go version %q.\n*** This isn't known to work, proceed on your own risk.", ver)
return 0, false
}
func setup() {
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/cover")
runPrint("go", "get", "-v", "golang.org/x/net/html")
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
runPrint("go", "get", "-v", "github.com/alecthomas/gometalinter")
runPrint("go", "get", "-v", "github.com/mitchellh/go-wordwrap")
}
func test(pkgs ...string) {
lazyRebuildAssets()
useRace := runtime.GOARCH == "amd64"
switch runtime.GOOS {
case "darwin", "linux", "freebsd", "windows":
default:
useRace = false
}
if useRace {
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s"}, pkgs...)...)
} else {
runPrint("go", append([]string{"test", "-short", "-timeout", "60s"}, pkgs...)...)
}
}
func bench(pkgs ...string) {
lazyRebuildAssets()
runPrint("go", append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
}
func install(target target, tags []string) {
lazyRebuildAssets()
tags = append(target.tags, tags...)
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
args := []string{"install", "-v", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, target.buildPkg)
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
runPrint("go", args...)
}
func build(target target, tags []string) {
lazyRebuildAssets()
tags = append(target.tags, tags...)
rmr(target.binaryName)
args := []string{"build", "-i", "-v", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, target.buildPkg)
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
runPrint("go", args...)
}
func buildTar(target target) {
name := archiveName(target)
filename := name + ".tar.gz"
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build(target, tags)
if goos == "darwin" {
macosCodesign(target.binaryName)
}
for i := range target.archiveFiles {
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
}
tarGz(filename, target.archiveFiles)
log.Println(filename)
}
func buildZip(target target) {
target.binaryName += ".exe"
name := archiveName(target)
filename := name + ".zip"
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build(target, tags)
for i := range target.archiveFiles {
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
}
zipFile(filename, target.archiveFiles)
log.Println(filename)
}
func buildDeb(target target) {
os.RemoveAll("deb")
// "goarch" here is set to whatever the Debian packages expect. We correct
// "it to what we actually know how to build and keep the Debian variant
// "name in "debarch".
debarch := goarch
switch goarch {
case "i386":
goarch = "386"
case "armel", "armhf":
goarch = "arm"
}
build(target, []string{"noupgrade"})
for i := range target.debianFiles {
target.debianFiles[i].src = strings.Replace(target.debianFiles[i].src, "{{binary}}", target.binaryName, 1)
target.debianFiles[i].dst = strings.Replace(target.debianFiles[i].dst, "{{binary}}", target.binaryName, 1)
}
for _, af := range target.debianFiles {
if err := copyFile(af.src, af.dst, af.perm); err != nil {
log.Fatal(err)
}
}
os.MkdirAll("deb/DEBIAN", 0755)
data := map[string]string{
"name": target.name,
"arch": debarch,
"version": version[1:],
"date": time.Now().Format(time.RFC1123),
}
debTemplateFiles := append(listFiles("debtpl/common"), listFiles("debtpl/"+target.name)...)
for _, file := range debTemplateFiles {
tpl, err := template.New(filepath.Base(file)).ParseFiles(file)
if err != nil {
log.Fatal(err)
}
outFile := filepath.Join("deb/DEBIAN", filepath.Base(file))
out, err := os.Create(outFile)
if err != nil {
log.Fatal(err)
}
if err := tpl.Execute(out, data); err != nil {
log.Fatal(err)
}
if err := out.Close(); err != nil {
log.Fatal(err)
}
info, _ := os.Lstat(file)
os.Chmod(outFile, info.Mode())
}
}
func copyFile(src, dst string, perm os.FileMode) error {
dstDir := filepath.Dir(dst)
os.MkdirAll(dstDir, 0755) // ignore error
srcFd, err := os.Open(src)
if err != nil {
return err
}
defer srcFd.Close()
dstFd, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm)
if err != nil {
return err
}
defer dstFd.Close()
_, err = io.Copy(dstFd, srcFd)
return err
}
func listFiles(dir string) []string {
var res []string
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.Mode().IsRegular() {
res = append(res, path)
}
return nil
})
return res
}
func rebuildAssets() {
runPipe("lib/auto/gui.files.go", "go", "run", "script/genassets.go", "gui")
}
func lazyRebuildAssets() {
if shouldRebuildAssets() {
rebuildAssets()
}
}
func shouldRebuildAssets() bool {
info, err := os.Stat("lib/auto/gui.files.go")
if err != nil {
// If the file doesn't exist, we must rebuild it
return true
}
// Check if any of the files in gui/ are newer than the asset file. If
// so we should rebuild it.
currentBuild := info.ModTime()
assetsAreNewer := false
filepath.Walk("gui", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if assetsAreNewer {
return nil
}
assetsAreNewer = info.ModTime().After(currentBuild)
return nil
})
return assetsAreNewer
}
func xdr() {
runPrint("go", "generate", "./lib/discover", "./lib/db", "./lib/protocol", "./lib/relay/protocol")
}
func translate() {
os.Chdir("gui/default/assets/lang")
runPipe("lang-en-new.json", "go", "run", "../../../../script/translate.go", "lang-en.json", "../../../")
os.Remove("lang-en.json")
err := os.Rename("lang-en-new.json", "lang-en.json")
if err != nil {
log.Fatal(err)
}
os.Chdir("../../../..")
}
func transifex() {
os.Chdir("gui/default/assets/lang")
runPrint("go", "run", "../../../../script/transifexdl.go")
}
func clean() {
rmr("bin")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
}
func ldflags() string {
sep := '='
if goVersion > 0 && goVersion < 1.5 {
sep = ' '
}
b := new(bytes.Buffer)
b.WriteString("-w")
fmt.Fprintf(b, " -X main.Version%c%s", sep, version)
fmt.Fprintf(b, " -X main.BuildStamp%c%d", sep, buildStamp())
fmt.Fprintf(b, " -X main.BuildUser%c%s", sep, buildUser())
fmt.Fprintf(b, " -X main.BuildHost%c%s", sep, buildHost())
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func getReleaseVersion() (string, error) {
fd, err := os.Open("RELEASE")
if err != nil {
return "", err
}
defer fd.Close()
bs, err := ioutil.ReadAll(fd)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(bs)), nil
}
func getGitVersion() (string, error) {
v, err := runError("git", "describe", "--always", "--dirty")
if err != nil {
return "", err
}
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
s[0] = '+'
return s
})
return string(v), nil
}
func getVersion() string {
// First try for a RELEASE file,
if ver, err := getReleaseVersion(); err == nil {
return ver
}
// ... then see if we have a Git tag.
if ver, err := getGitVersion(); err == nil {
if strings.Contains(ver, "-") {
// The version already contains a hash and stuff. See if we can
// find a current branch name to tack onto it as well.
return ver + getBranchSuffix()
}
return ver
}
// This seems to be a dev build.
return "unknown-dev"
}
func getBranchSuffix() string {
bs, err := runError("git", "branch", "-a", "--contains")
if err != nil {
return ""
}
branches := strings.Split(string(bs), "\n")
if len(branches) == 0 {
return ""
}
branch := ""
for i, candidate := range branches {
if strings.HasPrefix(candidate, "*") {
// This is the current branch. Select it!
branch = strings.TrimLeft(candidate, " \t*")
break
} else if i == 0 {
// Otherwise the first branch in the list will do.
branch = strings.TrimSpace(branch)
}
}
if branch == "" {
return ""
}
// The branch name may be on the form "remotes/origin/foo" from which we
// just want "foo".
parts := strings.Split(branch, "/")
if len(parts) == 0 || len(parts[len(parts)-1]) == 0 {
return ""
}
branch = parts[len(parts)-1]
if branch == "master" {
// master builds are the default.
return ""
}
validBranchRe := regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
if !validBranchRe.MatchString(branch) {
// There's some odd stuff in the branch name. Better skip it.
return ""
}
return "-" + branch
}
func buildStamp() int64 {
bs, err := runError("git", "show", "-s", "--format=%ct")
if err != nil {
return time.Now().Unix()
}
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func buildUser() string {
u, err := user.Current()
if err != nil {
return "unknown-user"
}
return strings.Replace(u.Username, " ", "-", -1)
}
func buildHost() string {
h, err := os.Hostname()
if err != nil {
return "unknown-host"
}
return h
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func archiveName(target target) string {
return fmt.Sprintf("%s-%s-%s", target.name, buildArch(), version)
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
return bytes.TrimSpace(bs), err
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func runPipe(file, cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "), ">", file)
fd, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = fd
ecmd.Stderr = os.Stderr
err = ecmd.Run()
if err != nil {
log.Fatal(err)
}
fd.Close()
}
func tarGz(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
gw := gzip.NewWriter(fd)
tw := tar.NewWriter(gw)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
h := &tar.Header{
Name: f.dst,
Size: info.Size(),
Mode: int64(info.Mode()),
ModTime: info.ModTime(),
}
err = tw.WriteHeader(h)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(tw, sf)
if err != nil {
log.Fatal(err)
}
sf.Close()
}
err = tw.Close()
if err != nil {
log.Fatal(err)
}
err = gw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func zipFile(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
zw := zip.NewWriter(fd)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
fh, err := zip.FileInfoHeader(info)
if err != nil {
log.Fatal(err)
}
fh.Name = f.dst
fh.Method = zip.Deflate
if strings.HasSuffix(f.dst, ".txt") {
// Text file. Read it and convert line endings.
bs, err := ioutil.ReadAll(sf)
if err != nil {
log.Fatal(err)
}
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\n', '\r'}, -1)
fh.UncompressedSize = uint32(len(bs))
fh.UncompressedSize64 = uint64(len(bs))
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
of.Write(bs)
} else {
// Binary file. Copy verbatim.
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(of, sf)
if err != nil {
log.Fatal(err)
}
}
}
err = zw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func vet(dirs ...string) {
params := []string{"tool", "vet", "-all"}
params = append(params, dirs...)
bs, err := runError("go", params...)
if len(bs) > 0 {
log.Printf("%s", bs)
}
if err != nil {
if exitStatus(err) == 3 {
// Exit code 3, the "vet" tool is not installed
return
}
// A genuine error exit from the vet tool.
log.Fatal(err)
}
}
func lint(pkg string) {
bs, err := runError("golint", pkg)
if err != nil {
log.Println(`- No golint, not linting. Try "go get -u github.com/golang/lint/golint".`)
return
}
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
for _, line := range bytes.Split(bs, []byte("\n")) {
if analCommentPolicy.Match(line) {
continue
}
if len(line) > 0 {
log.Printf("%s", line)
}
}
}
func macosCodesign(file string) {
if pass := os.Getenv("CODESIGN_KEYCHAIN_PASS"); pass != "" {
bs, err := runError("security", "unlock-keychain", "-p", pass)
if err != nil {
log.Println("Codesign: unlocking keychain failed:", string(bs))
return
}
}
if id := os.Getenv("CODESIGN_IDENTITY"); id != "" {
bs, err := runError("codesign", "-s", id, file)
if err != nil {
log.Println("Codesign: signing failed:", string(bs))
return
}
log.Println("Codesign: successfully signed", file)
}
}
func exitStatus(err error) int {
if err, ok := err.(*exec.ExitError); ok {
if ws, ok := err.ProcessState.Sys().(syscall.WaitStatus); ok {
return ws.ExitStatus()
}
}
return -1
}
func isGometalinterInstalled() bool {
if _, err := runError("gometalinter", "--disable-all"); err != nil {
log.Println("gometalinter is not installed")
return false
}
return true
}
func gometalinter(linter string, dirs []string, excludes ...string) {
params := []string{"--disable-all"}
params = append(params, fmt.Sprintf("--deadline=%ds", 60))
params = append(params, "--enable="+linter)
for _, exclude := range excludes {
params = append(params, "--exclude="+exclude)
}
for _, dir := range dirs {
params = append(params, dir)
}
bs, err := runError("gometalinter", params...)
if len(bs) > 0 {
log.Printf("%s", bs)
}
if err != nil {
log.Printf("%v", err)
}
}
build: Use purego tags on 'all' target
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build ignore
package main
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"text/template"
"time"
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
noupgrade bool
version string
goVersion float64
race bool
)
type target struct {
name string
buildPkg string
binaryName string
archiveFiles []archiveFile
debianFiles []archiveFile
tags []string
}
type archiveFile struct {
src string
dst string
perm os.FileMode
}
var targets = map[string]target{
"all": {
// Only valid for the "build" and "install" commands as it lacks all
// the archive creation stuff.
buildPkg: "./cmd/...",
tags: []string{"purego"},
},
"syncthing": {
// The default target for "build", "install", "tar", "zip", "deb", etc.
name: "syncthing",
buildPkg: "./cmd/syncthing",
binaryName: "syncthing", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "README.md", dst: "README.txt", perm: 0644},
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
// All files from etc/ and extra/ added automatically in init().
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
},
},
"discosrv": {
name: "discosrv",
buildPkg: "./cmd/discosrv",
binaryName: "discosrv", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "cmd/discosrv/README.md", dst: "README.txt", perm: 0644},
{src: "cmd/discosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/discosrv/README.md", dst: "deb/usr/share/doc/discosrv/README.txt", perm: 0644},
{src: "cmd/discosrv/LICENSE", dst: "deb/usr/share/doc/discosrv/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/discosrv/AUTHORS.txt", perm: 0644},
},
tags: []string{"purego"},
},
"relaysrv": {
name: "relaysrv",
buildPkg: "./cmd/relaysrv",
binaryName: "relaysrv", // .exe will be added automatically for Windows builds
archiveFiles: []archiveFile{
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
{src: "cmd/relaysrv/README.md", dst: "README.txt", perm: 0644},
{src: "cmd/relaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/relaysrv/README.md", dst: "deb/usr/share/doc/relaysrv/README.txt", perm: 0644},
{src: "cmd/relaysrv/LICENSE", dst: "deb/usr/share/doc/relaysrv/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/relaysrv/AUTHORS.txt", perm: 0644},
},
},
}
func init() {
// The "syncthing" target includes a few more files found in the "etc"
// and "extra" dirs.
syncthingPkg := targets["syncthing"]
for _, file := range listFiles("etc") {
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
}
for _, file := range listFiles("extra") {
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
}
for _, file := range listFiles("extra") {
syncthingPkg.debianFiles = append(syncthingPkg.debianFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
}
targets["syncthing"] = syncthingPkg
}
const minGoVersion = 1.3
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
if os.Getenv("GOPATH") == "" {
setGoPath()
}
// We use Go 1.5+ vendoring.
os.Setenv("GO15VENDOREXPERIMENT", "1")
// Set path to $GOPATH/bin:$PATH so that we can for sure find tools we
// might have installed during "build.go setup".
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
parseFlags()
switch goarch {
case "386", "amd64", "arm", "arm64", "ppc64", "ppc64le":
break
default:
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
}
goVersion, _ = checkRequiredGoVersion()
// Invoking build.go with no parameters at all is equivalent to "go run
// build.go install all" as that builds everything (incrementally),
// which is what you want for maximum error checking during development.
if flag.NArg() == 0 {
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
install(targets["all"], tags)
vet("cmd", "lib")
lint("./cmd/...")
lint("./lib/...")
return
}
// Otherwise, with any command given but not a target, the target is
// "syncthing". So "go run build.go install" is "go run build.go install
// syncthing" etc.
targetName := "syncthing"
if flag.NArg() > 1 {
targetName = flag.Arg(1)
}
target, ok := targets[targetName]
if !ok {
log.Fatalln("Unknown target", target)
}
cmd := flag.Arg(0)
switch cmd {
case "setup":
setup()
case "install":
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
install(target, tags)
case "build":
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
build(target, tags)
case "test":
test("./lib/...", "./cmd/...")
case "bench":
bench("./lib/...", "./cmd/...")
case "assets":
rebuildAssets()
case "xdr":
xdr()
case "translate":
translate()
case "transifex":
transifex()
case "tar":
buildTar(target)
case "zip":
buildZip(target)
case "deb":
buildDeb(target)
case "clean":
clean()
case "vet":
vet("build.go")
vet("cmd", "lib")
case "lint":
lint(".")
lint("./cmd/...")
lint("./lib/...")
if isGometalinterInstalled() {
dirs := []string{".", "./cmd/...", "./lib/..."}
gometalinter("deadcode", dirs, "test/util.go")
gometalinter("structcheck", dirs)
gometalinter("varcheck", dirs)
}
default:
log.Fatalf("Unknown command %q", cmd)
}
}
// setGoPath sets GOPATH correctly with the assumption that we are
// in $GOPATH/src/github.com/syncthing/syncthing.
func setGoPath() {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
func parseFlags() {
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.BoolVar(&noupgrade, "no-upgrade", noupgrade, "Disable upgrade functionality")
flag.StringVar(&version, "version", getVersion(), "Set compiled in version string")
flag.BoolVar(&race, "race", race, "Use race detector")
flag.Parse()
}
func checkRequiredGoVersion() (float64, bool) {
re := regexp.MustCompile(`go(\d+\.\d+)`)
ver := runtime.Version()
if m := re.FindStringSubmatch(ver); len(m) == 2 {
vs := string(m[1])
// This is a standard go build. Verify that it's new enough.
f, err := strconv.ParseFloat(vs, 64)
if err != nil {
log.Printf("*** Couldn't parse Go version out of %q.\n*** This isn't known to work, proceed on your own risk.", vs)
return 0, false
}
if f < 1.5 {
log.Printf("*** Go version %.01f doesn't support the vendoring mechanism.\n*** Ensure correct dependencies in your $GOPATH.", f)
} else if f < minGoVersion {
log.Fatalf("*** Go version %.01f is less than required %.01f.\n*** This is known not to work, not proceeding.", f, minGoVersion)
}
return f, true
}
log.Printf("*** Unknown Go version %q.\n*** This isn't known to work, proceed on your own risk.", ver)
return 0, false
}
func setup() {
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/cover")
runPrint("go", "get", "-v", "golang.org/x/net/html")
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
runPrint("go", "get", "-v", "github.com/alecthomas/gometalinter")
runPrint("go", "get", "-v", "github.com/mitchellh/go-wordwrap")
}
func test(pkgs ...string) {
lazyRebuildAssets()
useRace := runtime.GOARCH == "amd64"
switch runtime.GOOS {
case "darwin", "linux", "freebsd", "windows":
default:
useRace = false
}
if useRace {
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s"}, pkgs...)...)
} else {
runPrint("go", append([]string{"test", "-short", "-timeout", "60s"}, pkgs...)...)
}
}
func bench(pkgs ...string) {
lazyRebuildAssets()
runPrint("go", append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
}
func install(target target, tags []string) {
lazyRebuildAssets()
tags = append(target.tags, tags...)
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
args := []string{"install", "-v", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, target.buildPkg)
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
runPrint("go", args...)
}
func build(target target, tags []string) {
lazyRebuildAssets()
tags = append(target.tags, tags...)
rmr(target.binaryName)
args := []string{"build", "-i", "-v", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, target.buildPkg)
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
runPrint("go", args...)
}
func buildTar(target target) {
name := archiveName(target)
filename := name + ".tar.gz"
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build(target, tags)
if goos == "darwin" {
macosCodesign(target.binaryName)
}
for i := range target.archiveFiles {
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
}
tarGz(filename, target.archiveFiles)
log.Println(filename)
}
func buildZip(target target) {
target.binaryName += ".exe"
name := archiveName(target)
filename := name + ".zip"
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build(target, tags)
for i := range target.archiveFiles {
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
}
zipFile(filename, target.archiveFiles)
log.Println(filename)
}
func buildDeb(target target) {
os.RemoveAll("deb")
// "goarch" here is set to whatever the Debian packages expect. We correct
// "it to what we actually know how to build and keep the Debian variant
// "name in "debarch".
debarch := goarch
switch goarch {
case "i386":
goarch = "386"
case "armel", "armhf":
goarch = "arm"
}
build(target, []string{"noupgrade"})
for i := range target.debianFiles {
target.debianFiles[i].src = strings.Replace(target.debianFiles[i].src, "{{binary}}", target.binaryName, 1)
target.debianFiles[i].dst = strings.Replace(target.debianFiles[i].dst, "{{binary}}", target.binaryName, 1)
}
for _, af := range target.debianFiles {
if err := copyFile(af.src, af.dst, af.perm); err != nil {
log.Fatal(err)
}
}
os.MkdirAll("deb/DEBIAN", 0755)
data := map[string]string{
"name": target.name,
"arch": debarch,
"version": version[1:],
"date": time.Now().Format(time.RFC1123),
}
debTemplateFiles := append(listFiles("debtpl/common"), listFiles("debtpl/"+target.name)...)
for _, file := range debTemplateFiles {
tpl, err := template.New(filepath.Base(file)).ParseFiles(file)
if err != nil {
log.Fatal(err)
}
outFile := filepath.Join("deb/DEBIAN", filepath.Base(file))
out, err := os.Create(outFile)
if err != nil {
log.Fatal(err)
}
if err := tpl.Execute(out, data); err != nil {
log.Fatal(err)
}
if err := out.Close(); err != nil {
log.Fatal(err)
}
info, _ := os.Lstat(file)
os.Chmod(outFile, info.Mode())
}
}
func copyFile(src, dst string, perm os.FileMode) error {
dstDir := filepath.Dir(dst)
os.MkdirAll(dstDir, 0755) // ignore error
srcFd, err := os.Open(src)
if err != nil {
return err
}
defer srcFd.Close()
dstFd, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm)
if err != nil {
return err
}
defer dstFd.Close()
_, err = io.Copy(dstFd, srcFd)
return err
}
func listFiles(dir string) []string {
var res []string
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.Mode().IsRegular() {
res = append(res, path)
}
return nil
})
return res
}
func rebuildAssets() {
runPipe("lib/auto/gui.files.go", "go", "run", "script/genassets.go", "gui")
}
func lazyRebuildAssets() {
if shouldRebuildAssets() {
rebuildAssets()
}
}
func shouldRebuildAssets() bool {
info, err := os.Stat("lib/auto/gui.files.go")
if err != nil {
// If the file doesn't exist, we must rebuild it
return true
}
// Check if any of the files in gui/ are newer than the asset file. If
// so we should rebuild it.
currentBuild := info.ModTime()
assetsAreNewer := false
filepath.Walk("gui", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if assetsAreNewer {
return nil
}
assetsAreNewer = info.ModTime().After(currentBuild)
return nil
})
return assetsAreNewer
}
func xdr() {
runPrint("go", "generate", "./lib/discover", "./lib/db", "./lib/protocol", "./lib/relay/protocol")
}
func translate() {
os.Chdir("gui/default/assets/lang")
runPipe("lang-en-new.json", "go", "run", "../../../../script/translate.go", "lang-en.json", "../../../")
os.Remove("lang-en.json")
err := os.Rename("lang-en-new.json", "lang-en.json")
if err != nil {
log.Fatal(err)
}
os.Chdir("../../../..")
}
func transifex() {
os.Chdir("gui/default/assets/lang")
runPrint("go", "run", "../../../../script/transifexdl.go")
}
func clean() {
rmr("bin")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
}
func ldflags() string {
sep := '='
if goVersion > 0 && goVersion < 1.5 {
sep = ' '
}
b := new(bytes.Buffer)
b.WriteString("-w")
fmt.Fprintf(b, " -X main.Version%c%s", sep, version)
fmt.Fprintf(b, " -X main.BuildStamp%c%d", sep, buildStamp())
fmt.Fprintf(b, " -X main.BuildUser%c%s", sep, buildUser())
fmt.Fprintf(b, " -X main.BuildHost%c%s", sep, buildHost())
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func getReleaseVersion() (string, error) {
fd, err := os.Open("RELEASE")
if err != nil {
return "", err
}
defer fd.Close()
bs, err := ioutil.ReadAll(fd)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(bs)), nil
}
func getGitVersion() (string, error) {
v, err := runError("git", "describe", "--always", "--dirty")
if err != nil {
return "", err
}
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
s[0] = '+'
return s
})
return string(v), nil
}
func getVersion() string {
// First try for a RELEASE file,
if ver, err := getReleaseVersion(); err == nil {
return ver
}
// ... then see if we have a Git tag.
if ver, err := getGitVersion(); err == nil {
if strings.Contains(ver, "-") {
// The version already contains a hash and stuff. See if we can
// find a current branch name to tack onto it as well.
return ver + getBranchSuffix()
}
return ver
}
// This seems to be a dev build.
return "unknown-dev"
}
func getBranchSuffix() string {
bs, err := runError("git", "branch", "-a", "--contains")
if err != nil {
return ""
}
branches := strings.Split(string(bs), "\n")
if len(branches) == 0 {
return ""
}
branch := ""
for i, candidate := range branches {
if strings.HasPrefix(candidate, "*") {
// This is the current branch. Select it!
branch = strings.TrimLeft(candidate, " \t*")
break
} else if i == 0 {
// Otherwise the first branch in the list will do.
branch = strings.TrimSpace(branch)
}
}
if branch == "" {
return ""
}
// The branch name may be on the form "remotes/origin/foo" from which we
// just want "foo".
parts := strings.Split(branch, "/")
if len(parts) == 0 || len(parts[len(parts)-1]) == 0 {
return ""
}
branch = parts[len(parts)-1]
if branch == "master" {
// master builds are the default.
return ""
}
validBranchRe := regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
if !validBranchRe.MatchString(branch) {
// There's some odd stuff in the branch name. Better skip it.
return ""
}
return "-" + branch
}
func buildStamp() int64 {
bs, err := runError("git", "show", "-s", "--format=%ct")
if err != nil {
return time.Now().Unix()
}
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func buildUser() string {
u, err := user.Current()
if err != nil {
return "unknown-user"
}
return strings.Replace(u.Username, " ", "-", -1)
}
func buildHost() string {
h, err := os.Hostname()
if err != nil {
return "unknown-host"
}
return h
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func archiveName(target target) string {
return fmt.Sprintf("%s-%s-%s", target.name, buildArch(), version)
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
return bytes.TrimSpace(bs), err
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func runPipe(file, cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "), ">", file)
fd, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = fd
ecmd.Stderr = os.Stderr
err = ecmd.Run()
if err != nil {
log.Fatal(err)
}
fd.Close()
}
func tarGz(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
gw := gzip.NewWriter(fd)
tw := tar.NewWriter(gw)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
h := &tar.Header{
Name: f.dst,
Size: info.Size(),
Mode: int64(info.Mode()),
ModTime: info.ModTime(),
}
err = tw.WriteHeader(h)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(tw, sf)
if err != nil {
log.Fatal(err)
}
sf.Close()
}
err = tw.Close()
if err != nil {
log.Fatal(err)
}
err = gw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func zipFile(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
zw := zip.NewWriter(fd)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
fh, err := zip.FileInfoHeader(info)
if err != nil {
log.Fatal(err)
}
fh.Name = f.dst
fh.Method = zip.Deflate
if strings.HasSuffix(f.dst, ".txt") {
// Text file. Read it and convert line endings.
bs, err := ioutil.ReadAll(sf)
if err != nil {
log.Fatal(err)
}
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\n', '\r'}, -1)
fh.UncompressedSize = uint32(len(bs))
fh.UncompressedSize64 = uint64(len(bs))
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
of.Write(bs)
} else {
// Binary file. Copy verbatim.
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(of, sf)
if err != nil {
log.Fatal(err)
}
}
}
err = zw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func vet(dirs ...string) {
params := []string{"tool", "vet", "-all"}
params = append(params, dirs...)
bs, err := runError("go", params...)
if len(bs) > 0 {
log.Printf("%s", bs)
}
if err != nil {
if exitStatus(err) == 3 {
// Exit code 3, the "vet" tool is not installed
return
}
// A genuine error exit from the vet tool.
log.Fatal(err)
}
}
func lint(pkg string) {
bs, err := runError("golint", pkg)
if err != nil {
log.Println(`- No golint, not linting. Try "go get -u github.com/golang/lint/golint".`)
return
}
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
for _, line := range bytes.Split(bs, []byte("\n")) {
if analCommentPolicy.Match(line) {
continue
}
if len(line) > 0 {
log.Printf("%s", line)
}
}
}
func macosCodesign(file string) {
if pass := os.Getenv("CODESIGN_KEYCHAIN_PASS"); pass != "" {
bs, err := runError("security", "unlock-keychain", "-p", pass)
if err != nil {
log.Println("Codesign: unlocking keychain failed:", string(bs))
return
}
}
if id := os.Getenv("CODESIGN_IDENTITY"); id != "" {
bs, err := runError("codesign", "-s", id, file)
if err != nil {
log.Println("Codesign: signing failed:", string(bs))
return
}
log.Println("Codesign: successfully signed", file)
}
}
func exitStatus(err error) int {
if err, ok := err.(*exec.ExitError); ok {
if ws, ok := err.ProcessState.Sys().(syscall.WaitStatus); ok {
return ws.ExitStatus()
}
}
return -1
}
func isGometalinterInstalled() bool {
if _, err := runError("gometalinter", "--disable-all"); err != nil {
log.Println("gometalinter is not installed")
return false
}
return true
}
func gometalinter(linter string, dirs []string, excludes ...string) {
params := []string{"--disable-all"}
params = append(params, fmt.Sprintf("--deadline=%ds", 60))
params = append(params, "--enable="+linter)
for _, exclude := range excludes {
params = append(params, "--exclude="+exclude)
}
for _, dir := range dirs {
params = append(params, dir)
}
bs, err := runError("gometalinter", params...)
if len(bs) > 0 {
log.Printf("%s", bs)
}
if err != nil {
log.Printf("%v", err)
}
}
|
Fix divide by zero in common.DemoHeader.FrameTime (#109)
* Return 0 in common.DemoHeader.FrameTime if PlaybackFrames is 0 to avoid dividing by zero
Co-Authored-By: marksamman <da2eb6183c9fa7ea83988ee64046b05ce0ddaae1@gmail.com>
|
// Input/output handling for GoAWK interpreter
package interp
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
. "github.com/benhoyt/goawk/internal/ast"
. "github.com/benhoyt/goawk/lexer"
)
// Print a line of output followed by a newline
func (p *interp) printLine(writer io.Writer, line string) error {
err := writeOutput(writer, line)
if err != nil {
return err
}
return writeOutput(writer, p.outputRecordSep)
}
// Implement a buffered version of WriteCloser so output is buffered
// when redirecting to a file (eg: print >"out")
type bufferedWriteCloser struct {
*bufio.Writer
io.Closer
}
func newBufferedWriteClose(w io.WriteCloser) *bufferedWriteCloser {
writer := bufio.NewWriterSize(w, outputBufSize)
return &bufferedWriteCloser{writer, w}
}
func (wc *bufferedWriteCloser) Close() error {
err := wc.Writer.Flush()
if err != nil {
return err
}
return wc.Closer.Close()
}
// Determine the output stream for given redirect token and
// destination (file or pipe name)
func (p *interp) getOutputStream(redirect Token, dest Expr) (io.Writer, error) {
if redirect == ILLEGAL {
// Token "ILLEGAL" means send to standard output
return p.output, nil
}
destValue, err := p.eval(dest)
if err != nil {
return nil, err
}
name := p.toString(destValue)
if _, ok := p.inputStreams[name]; ok {
return nil, newError("can't write to reader stream")
}
if w, ok := p.outputStreams[name]; ok {
return w, nil
}
switch redirect {
case GREATER, APPEND:
// Write or append to file
flags := os.O_CREATE | os.O_WRONLY
if redirect == GREATER {
flags |= os.O_TRUNC
} else {
flags |= os.O_APPEND
}
w, err := os.OpenFile(name, flags, 0644)
if err != nil {
return nil, newError("output redirection error: %s", err)
}
buffered := newBufferedWriteClose(w)
p.outputStreams[name] = buffered
return buffered, nil
case PIPE:
// Pipe to command
cmd := exec.Command("sh", "-c", name)
w, err := cmd.StdinPipe()
if err != nil {
return nil, newError("error connecting to stdin pipe: %v", err)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, newError("error connecting to stdout pipe: %v", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, newError("error connecting to stderr pipe: %v", err)
}
err = cmd.Start()
if err != nil {
fmt.Fprintln(p.errorOutput, err)
return ioutil.Discard, nil
}
go func() {
io.Copy(p.output, stdout)
}()
go func() {
io.Copy(p.errorOutput, stderr)
}()
p.commands[name] = cmd
p.outputStreams[name] = w
return w, nil
default:
// Should never happen
panic(fmt.Sprintf("unexpected redirect type %s", redirect))
}
}
// Get input Scanner to use for "getline" based on file name
func (p *interp) getInputScannerFile(name string) (*bufio.Scanner, error) {
if _, ok := p.outputStreams[name]; ok {
return nil, newError("can't read from writer stream")
}
if _, ok := p.inputStreams[name]; ok {
return p.scanners[name], nil
}
r, err := os.Open(name)
if err != nil {
return nil, newError("input redirection error: %s", err)
}
scanner := p.newScanner(r)
p.scanners[name] = scanner
p.inputStreams[name] = r
return scanner, nil
}
// Get input Scanner to use for "getline" based on pipe name
func (p *interp) getInputScannerPipe(name string) (*bufio.Scanner, error) {
if _, ok := p.outputStreams[name]; ok {
return nil, newError("can't read from writer stream")
}
if _, ok := p.inputStreams[name]; ok {
return p.scanners[name], nil
}
cmd := exec.Command("sh", "-c", name)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, newError("error connecting to stdin pipe: %v", err)
}
r, err := cmd.StdoutPipe()
if err != nil {
return nil, newError("error connecting to stdout pipe: %v", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, newError("error connecting to stderr pipe: %v", err)
}
err = cmd.Start()
if err != nil {
fmt.Fprintln(p.errorOutput, err)
return bufio.NewScanner(strings.NewReader("")), nil
}
go func() {
io.Copy(stdin, p.stdin)
stdin.Close()
}()
go func() {
io.Copy(p.errorOutput, stderr)
}()
scanner := p.newScanner(r)
p.commands[name] = cmd
p.inputStreams[name] = r
p.scanners[name] = scanner
return scanner, nil
}
// Create a new buffered Scanner for reading input records
func (p *interp) newScanner(input io.Reader) *bufio.Scanner {
scanner := bufio.NewScanner(input)
switch p.recordSep {
case "\n":
// Scanner default is to split on newlines
case "":
// Empty string for RS means split on \n\n (blank lines)
scanner.Split(scanLinesBlank)
default:
splitter := byteSplitter{p.recordSep[0]}
scanner.Split(splitter.scan)
}
buffer := make([]byte, inputBufSize)
scanner.Buffer(buffer, maxRecordLength)
return scanner
}
// Copied from bufio/scan.go in the stdlib: I guess it's a bit more
// efficient than bytes.TrimSuffix(data, []byte("\r"))
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[:len(data)-1]
}
return data
}
func dropLF(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\n' {
return data[:len(data)-1]
}
return data
}
func scanLinesBlank(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
// Skip newlines at beginning of data
i := 0
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
}
if i >= len(data) {
// At end of data after newlines, skip entire data block
return i, nil, nil
}
start := i
// Try to find two consecutive newlines (or \n\r\n for Windows)
for ; i < len(data); i++ {
if data[i] != '\n' {
continue
}
end := i
if i+1 < len(data) && data[i+1] == '\n' {
i += 2
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++ // Skip newlines at end of record
}
return i, dropCR(data[start:end]), nil
}
if i+2 < len(data) && data[i+1] == '\r' && data[i+2] == '\n' {
i += 3
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++ // Skip newlines at end of record
}
return i, dropCR(data[start:end]), nil
}
}
// If we're at EOF, we have one final record; return it
if atEOF {
return len(data), dropCR(dropLF(data)), nil
}
// Request more data
return 0, nil, nil
}
// Splitter function that splits records on the given separator byte
type byteSplitter struct {
sep byte
}
func (s byteSplitter) scan(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, s.sep); i >= 0 {
// We have a full sep-terminated record
return i + 1, data[0:i], nil
}
// If at EOF, we have a final, non-terminated record; return it
if atEOF {
return len(data), data, nil
}
// Request more data
return 0, nil, nil
}
// Setup for a new input file with given name (empty string if stdin)
func (p *interp) setFile(filename string) {
p.filename = filename
p.fileLineNum = 0
}
// Setup for a new input line (but don't parse it into fields till we
// need to)
func (p *interp) setLine(line string) {
p.line = line
p.haveFields = false
}
// Ensure that the current line is parsed into fields, splitting it
// into fields if it hasn't been already
func (p *interp) ensureFields() {
if p.haveFields {
return
}
p.haveFields = true
if p.fieldSep == " " {
// FS space (default) means split fields on any whitespace
p.fields = strings.Fields(p.line)
} else if p.line == "" {
p.fields = nil
} else {
// Split on FS as a regex
p.fields = p.fieldSepRegex.Split(p.line, -1)
// Special case for when RS=="" and FS is single character,
// split on newline in addition to FS. See more here:
// https://www.gnu.org/software/gawk/manual/html_node/Multiple-Line.html
if p.recordSep == "" && len(p.fieldSep) == 1 {
fields := make([]string, 0, len(p.fields))
for _, field := range p.fields {
lines := strings.Split(field, "\n")
for _, line := range lines {
trimmed := strings.TrimSuffix(line, "\r")
fields = append(fields, trimmed)
}
}
p.fields = fields
}
}
p.numFields = len(p.fields)
}
// Fetch next line (record) of input from current input file, opening
// next input file if done with previous one
func (p *interp) nextLine() (string, error) {
for {
if p.scanner == nil {
if prevInput, ok := p.input.(io.Closer); ok && p.input != p.stdin {
// Previous input is file, close it
prevInput.Close()
}
if p.filenameIndex >= p.argc && !p.hadFiles {
// Moved past number of ARGV args and haven't seen
// any files yet, use stdin
p.input = p.stdin
p.setFile("")
p.hadFiles = true
} else {
if p.filenameIndex >= p.argc {
// Done with ARGV args, all done with input
return "", io.EOF
}
// Fetch next filename from ARGV
index := strconv.Itoa(p.filenameIndex)
argvIndex := p.program.Arrays["ARGV"]
filename := p.toString(p.getArrayValue(ScopeGlobal, argvIndex, index))
p.filenameIndex++
// Is it actually a var=value assignment?
matches := varRegex.FindStringSubmatch(filename)
if len(matches) >= 3 {
// Yep, set variable to value and keep going
err := p.setVarByName(matches[1], matches[2])
if err != nil {
return "", err
}
continue
} else if filename == "" {
// ARGV arg is empty string, skip
p.input = nil
continue
} else if filename == "-" {
// ARGV arg is "-" meaning stdin
p.input = p.stdin
p.setFile("")
} else {
// A regular file name, open it
input, err := os.Open(filename)
if err != nil {
return "", err
}
p.input = input
p.setFile(filename)
p.hadFiles = true
}
}
p.scanner = p.newScanner(p.input)
}
if p.scanner.Scan() {
// We scanned some input, break and return it
break
}
if err := p.scanner.Err(); err != nil {
return "", fmt.Errorf("error reading from input: %s", err)
}
// Signal loop to move onto next file
p.scanner = nil
}
// Got a line (record) of input, return it
p.lineNum++
p.fileLineNum++
return p.scanner.Text(), nil
}
// Write output string to given writer, producing correct line endings
// on Windows (CR LF)
func writeOutput(w io.Writer, s string) error {
if crlfNewline {
// First normalize to \n, then convert all newlines to \r\n
// (on Windows). NOTE: creating two new strings is almost
// certainly slow; would be better to create a custom Writer.
s = strings.Replace(s, "\r\n", "\n", -1)
s = strings.Replace(s, "\n", "\r\n", -1)
}
_, err := io.WriteString(w, s)
return err
}
// Close all streams, commands, etc (after program execution)
func (p *interp) closeAll() {
if prevInput, ok := p.input.(io.Closer); ok {
prevInput.Close()
}
for _, r := range p.inputStreams {
_ = r.Close()
}
for _, w := range p.outputStreams {
_ = w.Close()
}
for _, cmd := range p.commands {
_ = cmd.Wait()
}
if p.flushOutput {
p.output.(*bufio.Writer).Flush()
}
if p.flushError {
p.errorOutput.(*bufio.Writer).Flush()
}
}
Fix RS="" when there are newlines at the start of the last record (rsnul1nl)
// Input/output handling for GoAWK interpreter
package interp
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
. "github.com/benhoyt/goawk/internal/ast"
. "github.com/benhoyt/goawk/lexer"
)
// Print a line of output followed by a newline
func (p *interp) printLine(writer io.Writer, line string) error {
err := writeOutput(writer, line)
if err != nil {
return err
}
return writeOutput(writer, p.outputRecordSep)
}
// Implement a buffered version of WriteCloser so output is buffered
// when redirecting to a file (eg: print >"out")
type bufferedWriteCloser struct {
*bufio.Writer
io.Closer
}
func newBufferedWriteClose(w io.WriteCloser) *bufferedWriteCloser {
writer := bufio.NewWriterSize(w, outputBufSize)
return &bufferedWriteCloser{writer, w}
}
func (wc *bufferedWriteCloser) Close() error {
err := wc.Writer.Flush()
if err != nil {
return err
}
return wc.Closer.Close()
}
// Determine the output stream for given redirect token and
// destination (file or pipe name)
func (p *interp) getOutputStream(redirect Token, dest Expr) (io.Writer, error) {
if redirect == ILLEGAL {
// Token "ILLEGAL" means send to standard output
return p.output, nil
}
destValue, err := p.eval(dest)
if err != nil {
return nil, err
}
name := p.toString(destValue)
if _, ok := p.inputStreams[name]; ok {
return nil, newError("can't write to reader stream")
}
if w, ok := p.outputStreams[name]; ok {
return w, nil
}
switch redirect {
case GREATER, APPEND:
// Write or append to file
flags := os.O_CREATE | os.O_WRONLY
if redirect == GREATER {
flags |= os.O_TRUNC
} else {
flags |= os.O_APPEND
}
w, err := os.OpenFile(name, flags, 0644)
if err != nil {
return nil, newError("output redirection error: %s", err)
}
buffered := newBufferedWriteClose(w)
p.outputStreams[name] = buffered
return buffered, nil
case PIPE:
// Pipe to command
cmd := exec.Command("sh", "-c", name)
w, err := cmd.StdinPipe()
if err != nil {
return nil, newError("error connecting to stdin pipe: %v", err)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, newError("error connecting to stdout pipe: %v", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, newError("error connecting to stderr pipe: %v", err)
}
err = cmd.Start()
if err != nil {
fmt.Fprintln(p.errorOutput, err)
return ioutil.Discard, nil
}
go func() {
io.Copy(p.output, stdout)
}()
go func() {
io.Copy(p.errorOutput, stderr)
}()
p.commands[name] = cmd
p.outputStreams[name] = w
return w, nil
default:
// Should never happen
panic(fmt.Sprintf("unexpected redirect type %s", redirect))
}
}
// Get input Scanner to use for "getline" based on file name
func (p *interp) getInputScannerFile(name string) (*bufio.Scanner, error) {
if _, ok := p.outputStreams[name]; ok {
return nil, newError("can't read from writer stream")
}
if _, ok := p.inputStreams[name]; ok {
return p.scanners[name], nil
}
r, err := os.Open(name)
if err != nil {
return nil, newError("input redirection error: %s", err)
}
scanner := p.newScanner(r)
p.scanners[name] = scanner
p.inputStreams[name] = r
return scanner, nil
}
// Get input Scanner to use for "getline" based on pipe name
func (p *interp) getInputScannerPipe(name string) (*bufio.Scanner, error) {
if _, ok := p.outputStreams[name]; ok {
return nil, newError("can't read from writer stream")
}
if _, ok := p.inputStreams[name]; ok {
return p.scanners[name], nil
}
cmd := exec.Command("sh", "-c", name)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, newError("error connecting to stdin pipe: %v", err)
}
r, err := cmd.StdoutPipe()
if err != nil {
return nil, newError("error connecting to stdout pipe: %v", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, newError("error connecting to stderr pipe: %v", err)
}
err = cmd.Start()
if err != nil {
fmt.Fprintln(p.errorOutput, err)
return bufio.NewScanner(strings.NewReader("")), nil
}
go func() {
io.Copy(stdin, p.stdin)
stdin.Close()
}()
go func() {
io.Copy(p.errorOutput, stderr)
}()
scanner := p.newScanner(r)
p.commands[name] = cmd
p.inputStreams[name] = r
p.scanners[name] = scanner
return scanner, nil
}
// Create a new buffered Scanner for reading input records
func (p *interp) newScanner(input io.Reader) *bufio.Scanner {
scanner := bufio.NewScanner(input)
switch p.recordSep {
case "\n":
// Scanner default is to split on newlines
case "":
// Empty string for RS means split on \n\n (blank lines)
scanner.Split(scanLinesBlank)
default:
splitter := byteSplitter{p.recordSep[0]}
scanner.Split(splitter.scan)
}
buffer := make([]byte, inputBufSize)
scanner.Buffer(buffer, maxRecordLength)
return scanner
}
// Copied from bufio/scan.go in the stdlib: I guess it's a bit more
// efficient than bytes.TrimSuffix(data, []byte("\r"))
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[:len(data)-1]
}
return data
}
func dropLF(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\n' {
return data[:len(data)-1]
}
return data
}
func scanLinesBlank(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
// Skip newlines at beginning of data
i := 0
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
}
if i >= len(data) {
// At end of data after newlines, skip entire data block
return i, nil, nil
}
start := i
// Try to find two consecutive newlines (or \n\r\n for Windows)
for ; i < len(data); i++ {
if data[i] != '\n' {
continue
}
end := i
if i+1 < len(data) && data[i+1] == '\n' {
i += 2
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++ // Skip newlines at end of record
}
return i, dropCR(data[start:end]), nil
}
if i+2 < len(data) && data[i+1] == '\r' && data[i+2] == '\n' {
i += 3
for i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++ // Skip newlines at end of record
}
return i, dropCR(data[start:end]), nil
}
}
// If we're at EOF, we have one final record; return it
if atEOF {
return len(data), dropCR(dropLF(data[start:])), nil
}
// Request more data
return 0, nil, nil
}
// Splitter function that splits records on the given separator byte
type byteSplitter struct {
sep byte
}
func (s byteSplitter) scan(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, s.sep); i >= 0 {
// We have a full sep-terminated record
return i + 1, data[0:i], nil
}
// If at EOF, we have a final, non-terminated record; return it
if atEOF {
return len(data), data, nil
}
// Request more data
return 0, nil, nil
}
// Setup for a new input file with given name (empty string if stdin)
func (p *interp) setFile(filename string) {
p.filename = filename
p.fileLineNum = 0
}
// Setup for a new input line (but don't parse it into fields till we
// need to)
func (p *interp) setLine(line string) {
p.line = line
p.haveFields = false
}
// Ensure that the current line is parsed into fields, splitting it
// into fields if it hasn't been already
func (p *interp) ensureFields() {
if p.haveFields {
return
}
p.haveFields = true
if p.fieldSep == " " {
// FS space (default) means split fields on any whitespace
p.fields = strings.Fields(p.line)
} else if p.line == "" {
p.fields = nil
} else {
// Split on FS as a regex
p.fields = p.fieldSepRegex.Split(p.line, -1)
// Special case for when RS=="" and FS is single character,
// split on newline in addition to FS. See more here:
// https://www.gnu.org/software/gawk/manual/html_node/Multiple-Line.html
if p.recordSep == "" && len(p.fieldSep) == 1 {
fields := make([]string, 0, len(p.fields))
for _, field := range p.fields {
lines := strings.Split(field, "\n")
for _, line := range lines {
trimmed := strings.TrimSuffix(line, "\r")
fields = append(fields, trimmed)
}
}
p.fields = fields
}
}
p.numFields = len(p.fields)
}
// Fetch next line (record) of input from current input file, opening
// next input file if done with previous one
func (p *interp) nextLine() (string, error) {
for {
if p.scanner == nil {
if prevInput, ok := p.input.(io.Closer); ok && p.input != p.stdin {
// Previous input is file, close it
prevInput.Close()
}
if p.filenameIndex >= p.argc && !p.hadFiles {
// Moved past number of ARGV args and haven't seen
// any files yet, use stdin
p.input = p.stdin
p.setFile("")
p.hadFiles = true
} else {
if p.filenameIndex >= p.argc {
// Done with ARGV args, all done with input
return "", io.EOF
}
// Fetch next filename from ARGV
index := strconv.Itoa(p.filenameIndex)
argvIndex := p.program.Arrays["ARGV"]
filename := p.toString(p.getArrayValue(ScopeGlobal, argvIndex, index))
p.filenameIndex++
// Is it actually a var=value assignment?
matches := varRegex.FindStringSubmatch(filename)
if len(matches) >= 3 {
// Yep, set variable to value and keep going
err := p.setVarByName(matches[1], matches[2])
if err != nil {
return "", err
}
continue
} else if filename == "" {
// ARGV arg is empty string, skip
p.input = nil
continue
} else if filename == "-" {
// ARGV arg is "-" meaning stdin
p.input = p.stdin
p.setFile("")
} else {
// A regular file name, open it
input, err := os.Open(filename)
if err != nil {
return "", err
}
p.input = input
p.setFile(filename)
p.hadFiles = true
}
}
p.scanner = p.newScanner(p.input)
}
if p.scanner.Scan() {
// We scanned some input, break and return it
break
}
if err := p.scanner.Err(); err != nil {
return "", fmt.Errorf("error reading from input: %s", err)
}
// Signal loop to move onto next file
p.scanner = nil
}
// Got a line (record) of input, return it
p.lineNum++
p.fileLineNum++
return p.scanner.Text(), nil
}
// Write output string to given writer, producing correct line endings
// on Windows (CR LF)
func writeOutput(w io.Writer, s string) error {
if crlfNewline {
// First normalize to \n, then convert all newlines to \r\n
// (on Windows). NOTE: creating two new strings is almost
// certainly slow; would be better to create a custom Writer.
s = strings.Replace(s, "\r\n", "\n", -1)
s = strings.Replace(s, "\n", "\r\n", -1)
}
_, err := io.WriteString(w, s)
return err
}
// Close all streams, commands, etc (after program execution)
func (p *interp) closeAll() {
if prevInput, ok := p.input.(io.Closer); ok {
prevInput.Close()
}
for _, r := range p.inputStreams {
_ = r.Close()
}
for _, w := range p.outputStreams {
_ = w.Close()
}
for _, cmd := range p.commands {
_ = cmd.Wait()
}
if p.flushOutput {
p.output.(*bufio.Writer).Flush()
}
if p.flushError {
p.errorOutput.(*bufio.Writer).Flush()
}
}
|
change the interval field of metadata plugin to execution_interval
|
package main
import (
"database/sql"
"encoding/xml"
"flag"
irc "github.com/fluffle/goirc/client"
_ "github.com/go-sql-driver/mysql"
"io/ioutil"
"log"
"net/http"
"strings"
)
type Config struct {
Channel string
DBConn string
BotName string
}
var config Config
func sendUrl(channel, url string, conn *irc.Conn) {
resp, err := http.Get(url)
if err != nil {
return
}
respbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
stringbody := string(respbody)
titlestart := strings.Index(stringbody, "<title>")
titleend := strings.Index(stringbody, "</title>")
if titlestart != -1 && titlestart != -1 {
title := string(respbody[titlestart+7 : titleend])
title = "Title: " + title
conn.Privmsg(channel, title)
}
}
func handleMessage(conn *irc.Conn, line *irc.Line) {
for _, word := range strings.Split(line.Args[1], " ") {
word = strings.TrimSpace(word)
if strings.HasPrefix(word, "http") {
go sendUrl(line.Args[0], word, conn)
}
}
db, err := sql.Open("mysql", config.DBConn)
if err != nil {
log.Println(err)
}
defer db.Close()
_, err = db.Exec("insert into messages (Nick, Ident, Host, Src, Cmd, Channel, Message, Time) values (?, ?, ?, ?, ?, ?, ?, ?)", line.Nick, line.Ident, line.Host, line.Src, line.Cmd, line.Args[0], line.Args[1], line.Time)
if err != nil {
log.Println(err)
}
}
func main() {
flag.Parse()
xmlFile, err := ioutil.ReadFile("config.xml")
if err != nil {
log.Fatal(err)
}
xml.Unmarshal(xmlFile, &config)
log.Printf("Joining channel %s", config.Channel)
c := irc.SimpleClient(config.BotName)
c.AddHandler(irc.CONNECTED,
func(conn *irc.Conn, line *irc.Line) {
conn.Join(config.Channel)
log.Println("Connected!")
})
quit := make(chan bool)
c.AddHandler(irc.DISCONNECTED,
func(conn *irc.Conn, line *irc.Line) { quit <- true })
c.AddHandler("PRIVMSG", handleMessage)
if err := c.Connect("irc.freenode.net"); err != nil {
log.Fatalln("Connection error: %s\n", err)
}
<-quit
}
Unescape HTML so I dont get funky codes
package main
import (
"database/sql"
"encoding/xml"
"flag"
irc "github.com/fluffle/goirc/client"
_ "github.com/go-sql-driver/mysql"
"io/ioutil"
"log"
"net/http"
"strings"
"html"
)
type Config struct {
Channel string
DBConn string
BotName string
}
var config Config
func sendUrl(channel, url string, conn *irc.Conn) {
resp, err := http.Get(url)
if err != nil {
return
}
respbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
stringbody := string(respbody)
titlestart := strings.Index(stringbody, "<title>")
titleend := strings.Index(stringbody, "</title>")
if titlestart != -1 && titlestart != -1 {
title := string(respbody[titlestart+7 : titleend])
title = "Title: " + html.UnescapeString(title)
conn.Privmsg(channel, title)
}
}
func handleMessage(conn *irc.Conn, line *irc.Line) {
for _, word := range strings.Split(line.Args[1], " ") {
word = strings.TrimSpace(word)
if strings.HasPrefix(word, "http") {
go sendUrl(line.Args[0], word, conn)
}
}
db, err := sql.Open("mysql", config.DBConn)
if err != nil {
log.Println(err)
}
defer db.Close()
_, err = db.Exec("insert into messages (Nick, Ident, Host, Src, Cmd, Channel, Message, Time) values (?, ?, ?, ?, ?, ?, ?, ?)", line.Nick, line.Ident, line.Host, line.Src, line.Cmd, line.Args[0], line.Args[1], line.Time)
if err != nil {
log.Println(err)
}
}
func main() {
flag.Parse()
xmlFile, err := ioutil.ReadFile("config.xml")
if err != nil {
log.Fatal(err)
}
xml.Unmarshal(xmlFile, &config)
log.Printf("Joining channel %s", config.Channel)
c := irc.SimpleClient(config.BotName)
c.AddHandler(irc.CONNECTED,
func(conn *irc.Conn, line *irc.Line) {
conn.Join(config.Channel)
log.Println("Connected!")
})
quit := make(chan bool)
c.AddHandler(irc.DISCONNECTED,
func(conn *irc.Conn, line *irc.Line) { quit <- true })
c.AddHandler("PRIVMSG", handleMessage)
if err := c.Connect("irc.freenode.net"); err != nil {
log.Fatalln("Connection error: %s\n", err)
}
<-quit
}
|
package index
import (
"bufio"
"crypto/sha512"
"encoding/hex"
"github.com/tywkeene/autobd/options"
"io"
"io/ioutil"
"os"
"path"
"time"
)
type Index struct {
Name string `json:"name"`
Checksum string `json:"checksum,omitempty"`
Size int64 `json:"size"`
ModTime time.Time `json:"lastModified"`
Mode os.FileMode `json:"fileMode"`
IsDir bool `json:"isDir"`
Files map[string]*Index `json:"files,omitempty"`
}
//Generate a sha512 checksum for 'path'
func GetChecksum(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
stats, err := file.Stat()
if err != nil {
return "", err
}
size := stats.Size()
raw := make([]byte, size)
buffer := bufio.NewReader(file)
_, err = buffer.Read(raw)
hash := sha512.New()
io.WriteString(hash, string(raw))
checksum := hex.EncodeToString(hash.Sum(nil))
return checksum, nil
}
func NewIndex(name string, size int64, modtime time.Time, mode os.FileMode, isDir bool) *Index {
var checksum string
var err error
if isDir == false {
checksum, err = GetChecksum(name)
if err != nil {
return nil
}
} else {
checksum = ""
}
return &Index{name, checksum, size, modtime, mode, isDir, nil}
}
//Recursively genearate an index for dirPath
func GetIndex(dirPath string) (map[string]*Index, error) {
if dirPath == "/" || dirPath == "../" || dirPath == ".." {
dirPath = "./"
}
list, err := ioutil.ReadDir(dirPath)
if err != nil {
return nil, err
}
index := make(map[string]*Index)
for _, child := range list {
if child.Name() == options.Config.NodeMetadataFile {
continue
}
childPath := path.Join(dirPath, child.Name())
index[childPath] = NewIndex(childPath, child.Size(), child.ModTime(), child.Mode(), child.IsDir())
if child.IsDir() == true {
childContent, err := GetIndex(childPath)
if err != nil {
return nil, err
}
index[childPath].Files = childContent
}
}
return index, nil
}
Fixes #51
Buffered read of input file for SHA512 calculation instead of
loading the entire file into memory first.
package index
import (
"bufio"
"crypto/sha512"
"encoding/hex"
"io/ioutil"
"os"
"path"
"time"
"github.com/tywkeene/autobd/options"
)
type Index struct {
Name string `json:"name"`
Checksum string `json:"checksum,omitempty"`
Size int64 `json:"size"`
ModTime time.Time `json:"lastModified"`
Mode os.FileMode `json:"fileMode"`
IsDir bool `json:"isDir"`
Files map[string]*Index `json:"files,omitempty"`
}
// GetChecksum returns the SHA512 hash of the file at 'path'.
func GetChecksum(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
hash := sha512.New()
buf := bufio.NewReader(file)
_, err = buf.WriteTo(hash)
if err != nil {
return "", err
}
sum := hex.EncodeToString(hash.Sum(nil))
return sum, nil
}
func NewIndex(name string, size int64, modtime time.Time, mode os.FileMode, isDir bool) *Index {
var checksum string
var err error
if isDir == false {
checksum, err = GetChecksum(name)
if err != nil {
return nil
}
} else {
checksum = ""
}
return &Index{name, checksum, size, modtime, mode, isDir, nil}
}
//Recursively genearate an index for dirPath
func GetIndex(dirPath string) (map[string]*Index, error) {
if dirPath == "/" || dirPath == "../" || dirPath == ".." {
dirPath = "./"
}
list, err := ioutil.ReadDir(dirPath)
if err != nil {
return nil, err
}
index := make(map[string]*Index)
for _, child := range list {
if child.Name() == options.Config.NodeMetadataFile {
continue
}
childPath := path.Join(dirPath, child.Name())
index[childPath] = NewIndex(childPath, child.Size(), child.ModTime(), child.Mode(), child.IsDir())
if child.IsDir() == true {
childContent, err := GetIndex(childPath)
if err != nil {
return nil, err
}
index[childPath].Files = childContent
}
}
return index, nil
}
|
golint
|
package inject_test
import (
"testing"
"github.com/daaku/go.inject"
)
type Answerable interface {
Answer() int
}
type TypeAnswerStruct struct {
answer int
private int
}
func (t *TypeAnswerStruct) Answer() int {
return t.answer
}
type TypeNestedStruct struct {
A *TypeAnswerStruct `inject:""`
}
func (t *TypeNestedStruct) Answer() int {
return t.A.Answer()
}
func TestRequireTag(t *testing.T) {
var v struct {
A *TypeAnswerStruct
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A is not nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
type TypeWithNonPointerInject struct {
A int `inject:""`
}
func TestErrorOnNonPointerInject(t *testing.T) {
var a TypeWithNonPointerInject
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "found inject tag on unsupported field A in type *inject_test.TypeWithNonPointerInject"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithNonPointerStructInject struct {
A *int `inject:""`
}
func TestErrorOnNonPointerStructInject(t *testing.T) {
var a TypeWithNonPointerStructInject
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "found inject tag on unsupported field A in type *inject_test.TypeWithNonPointerStructInject"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestInjectSimple(t *testing.T) {
var v struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.B.A == nil {
t.Fatal("v.B.A is nil")
}
if v.A != v.B.A {
t.Fatal("got different instances of A")
}
}
func TestDoesNotOverwrite(t *testing.T) {
a := &TypeAnswerStruct{}
var v struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
v.A = a
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != a {
t.Fatal("original A was lost")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
func TestPrivate(t *testing.T) {
var v struct {
A *TypeAnswerStruct `inject:"private"`
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.B.A == nil {
t.Fatal("v.B.A is nil")
}
if v.A == v.B.A {
t.Fatal("got the same A")
}
}
type TypeWithJustColon struct {
A *TypeAnswerStruct `inject:`
}
func TestTagWithJustColon(t *testing.T) {
var a TypeWithJustColon
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "unexpected tag format `inject:` for field A in type *inject_test.TypeWithJustColon"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithOpenQuote struct {
A *TypeAnswerStruct `inject:"`
}
func TestTagWithOpenQuote(t *testing.T) {
var a TypeWithOpenQuote
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "unexpected tag format `inject:\"` for field A in type *inject_test.TypeWithOpenQuote"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideNonPointer(t *testing.T) {
var g inject.Graph
var i int
err := g.Provide(&inject.Object{Value: i})
if err == nil {
t.Fatal("expected error")
}
const msg = "expected unnamed object value to be a pointer to a struct but got type int with value 0"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideNonPointerStruct(t *testing.T) {
var g inject.Graph
var i *int
err := g.Provide(&inject.Object{Value: i})
if err == nil {
t.Fatal("expected error")
}
const msg = "expected unnamed object value to be a pointer to a struct but got type *int with value <nil>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoOfTheSame(t *testing.T) {
var g inject.Graph
a := TypeAnswerStruct{}
err := g.Provide(&inject.Object{Value: &a})
if err != nil {
t.Fatal(err)
}
err = g.Provide(&inject.Object{Value: &a})
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two unnamed instances of type *inject_test.TypeAnswerStruct"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoOfTheSameWithPopulate(t *testing.T) {
a := TypeAnswerStruct{}
err := inject.Populate(&a, &a)
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two unnamed instances of type *inject_test.TypeAnswerStruct"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoWithTheSameName(t *testing.T) {
var g inject.Graph
const name = "foo"
a := TypeAnswerStruct{}
err := g.Provide(&inject.Object{Value: &a, Name: name})
if err != nil {
t.Fatal(err)
}
err = g.Provide(&inject.Object{Value: &a, Name: name})
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two instances named foo"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestNamedInstanceWithDependencies(t *testing.T) {
var g inject.Graph
a := &TypeNestedStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
var c struct {
A *TypeNestedStruct `inject:"foo"`
}
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if c.A.A == nil {
t.Fatal("c.A.A was not injected")
}
}
func TestTwoNamedInstances(t *testing.T) {
var g inject.Graph
a := &TypeAnswerStruct{}
b := &TypeAnswerStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
if err := g.Provide(&inject.Object{Value: b, Name: "bar"}); err != nil {
t.Fatal(err)
}
var c struct {
A *TypeAnswerStruct `inject:"foo"`
B *TypeAnswerStruct `inject:"bar"`
}
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if c.A != a {
t.Fatal("did not find expected c.A")
}
if c.B != b {
t.Fatal("did not find expected c.B")
}
}
type TypeWithMissingNamed struct {
A *TypeAnswerStruct `inject:"foo"`
}
func TestTagWithMissingNamed(t *testing.T) {
var a TypeWithMissingNamed
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "did not find object named foo required by field A in type *inject_test.TypeWithMissingNamed"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestCompleteProvides(t *testing.T) {
var g inject.Graph
var v struct {
A *TypeAnswerStruct `inject:""`
}
if err := g.Provide(&inject.Object{Value: &v, Complete: true}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A was not nil")
}
}
func TestCompleteNamedProvides(t *testing.T) {
var g inject.Graph
var v struct {
A *TypeAnswerStruct `inject:""`
}
if err := g.Provide(&inject.Object{Value: &v, Complete: true, Name: "foo"}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A was not nil")
}
}
type TypeInjectInterfaceMissing struct {
Answerable Answerable `inject:""`
}
func TestInjectInterfaceMissing(t *testing.T) {
var v TypeInjectInterfaceMissing
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found no assignable value for field Answerable in type *inject_test.TypeInjectInterfaceMissing"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
}
func TestInjectInterface(t *testing.T) {
var v TypeInjectInterface
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Answerable == nil || v.Answerable != v.A {
t.Fatalf(
"expected the same but got Answerable = %T %+v / A = %T %+v",
v.Answerable,
v.Answerable,
v.A,
v.A,
)
}
}
type TypeWithInvalidNamedType struct {
A *TypeNestedStruct `inject:"foo"`
}
func TestInvalidNamedInstanceType(t *testing.T) {
var g inject.Graph
a := &TypeAnswerStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
var c TypeWithInvalidNamedType
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "object named foo of type *inject_test.TypeNestedStruct is not assignable to field A in type *inject_test.TypeWithInvalidNamedType"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectOnPrivateField struct {
a *TypeAnswerStruct `inject:""`
}
func TestInjectOnPrivateField(t *testing.T) {
var a TypeWithInjectOnPrivateField
err := inject.Populate(&a)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectOnPrivateField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectOnPrivateInterfaceField struct {
a Answerable `inject:""`
}
func TestInjectOnPrivateInterfaceField(t *testing.T) {
var a TypeWithInjectOnPrivateField
err := inject.Populate(&a)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectOnPrivateField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectPrivateInterface struct {
Answerable Answerable `inject:"private"`
B *TypeNestedStruct `inject:""`
}
func TestInjectPrivateInterface(t *testing.T) {
var v TypeInjectPrivateInterface
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found private inject tag on interface field Answerable in type *inject_test.TypeInjectPrivateInterface"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectTwoSatisfyInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
func TestInjectTwoSatisfyInterface(t *testing.T) {
var v TypeInjectTwoSatisfyInterface
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found two assignable values for field Answerable in type *inject_test.TypeInjectTwoSatisfyInterface. one type *inject_test.TypeAnswerStruct with value &{0 0} and another type *inject_test.TypeNestedStruct with value <*inject_test.TypeNestedStruct Value>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectNamedTwoSatisfyInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
func TestInjectNamedTwoSatisfyInterface(t *testing.T) {
var g inject.Graph
var v TypeInjectNamedTwoSatisfyInterface
if err := g.Provide(&inject.Object{Name: "foo", Value: &v}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("was expecting error")
}
const msg = "found two assignable values for field Answerable in type *inject_test.TypeInjectNamedTwoSatisfyInterface. one type *inject_test.TypeAnswerStruct with value &{0 0} and another type *inject_test.TypeNestedStruct with value <*inject_test.TypeNestedStruct Value>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectNamedOnPrivateInterfaceField struct {
a Answerable `inject:""`
}
func TestInjectNamedOnPrivateInterfaceField(t *testing.T) {
var g inject.Graph
var v TypeWithInjectNamedOnPrivateInterfaceField
if err := g.Provide(&inject.Object{Name: "foo", Value: &v}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("was expecting error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectNamedOnPrivateInterfaceField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithNonPointerNamedInject struct {
A int `inject:"foo"`
}
func TestErrorOnNonPointerNamedInject(t *testing.T) {
var g inject.Graph
if err := g.Provide(&inject.Object{Name: "foo", Value: 42}); err != nil {
t.Fatal(err)
}
var v TypeWithNonPointerNamedInject
if err := g.Provide(&inject.Object{Value: &v}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != 42 {
t.Fatalf("expected v.A = 42 but got %d", v.A)
}
}
func TestInjectInline(t *testing.T) {
var v struct {
Inline struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
} `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Inline.A == nil {
t.Fatal("v.Inline.A is nil")
}
if v.Inline.B == nil {
t.Fatal("v.Inline.B is nil")
}
if v.Inline.B.A == nil {
t.Fatal("v.Inline.B.A is nil")
}
if v.Inline.A != v.Inline.B.A {
t.Fatal("got different instances of A")
}
}
type TypeWithInlineStructWithPrivate struct {
Inline struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
} `inject:"private"`
}
func TestInjectInlinePrivate(t *testing.T) {
var v TypeWithInlineStructWithPrivate
err := inject.Populate(&v)
if err == nil {
t.Fatal("was expecting an error")
}
const msg = "cannot use private inject on inline struct on field Inline in type *inject_test.TypeWithInlineStructWithPrivate"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithStructValue struct {
Inline TypeNestedStruct `inject:""`
}
func TestInjectWithStructValue(t *testing.T) {
var v TypeWithStructValue
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Inline.A == nil {
t.Fatal("v.Inline.A is nil")
}
}
func TestPrivateIsFollowed(t *testing.T) {
var v struct {
A *TypeNestedStruct `inject:"private"`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A.A == nil {
t.Fatal("v.A.A is nil")
}
}
func TestDoesNotOverwriteInterface(t *testing.T) {
a := &TypeAnswerStruct{}
var v struct {
A Answerable `inject:""`
B *TypeNestedStruct `inject:""`
}
v.A = a
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != a {
t.Fatal("original A was lost")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
func TestInterfaceIncludingPrivate(t *testing.T) {
var v struct {
A Answerable `inject:""`
B *TypeNestedStruct `inject:"private"`
C *TypeAnswerStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.C == nil {
t.Fatal("v.C is nil")
}
if v.A != v.C {
t.Fatal("v.A != v.C")
}
if v.A == v.B {
t.Fatal("v.A == v.B")
}
}
func TestInjectMap(t *testing.T) {
var v struct {
A map[string]int `inject:"private"`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
}
func TestInjectMapWithoutPrivate(t *testing.T) {
var v struct {
A map[string]int `inject:""`
}
err := inject.Populate(&v)
if err == nil {
t.Fatalf("expected error for %+v", v)
}
const msg = `inject on map field A in type *struct { A map[string]int "inject:\"\"" } must be named or private`
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
use named type in TestInjectMapWithoutPrivate
package inject_test
import (
"testing"
"github.com/daaku/go.inject"
)
type Answerable interface {
Answer() int
}
type TypeAnswerStruct struct {
answer int
private int
}
func (t *TypeAnswerStruct) Answer() int {
return t.answer
}
type TypeNestedStruct struct {
A *TypeAnswerStruct `inject:""`
}
func (t *TypeNestedStruct) Answer() int {
return t.A.Answer()
}
func TestRequireTag(t *testing.T) {
var v struct {
A *TypeAnswerStruct
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A is not nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
type TypeWithNonPointerInject struct {
A int `inject:""`
}
func TestErrorOnNonPointerInject(t *testing.T) {
var a TypeWithNonPointerInject
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "found inject tag on unsupported field A in type *inject_test.TypeWithNonPointerInject"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithNonPointerStructInject struct {
A *int `inject:""`
}
func TestErrorOnNonPointerStructInject(t *testing.T) {
var a TypeWithNonPointerStructInject
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "found inject tag on unsupported field A in type *inject_test.TypeWithNonPointerStructInject"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestInjectSimple(t *testing.T) {
var v struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.B.A == nil {
t.Fatal("v.B.A is nil")
}
if v.A != v.B.A {
t.Fatal("got different instances of A")
}
}
func TestDoesNotOverwrite(t *testing.T) {
a := &TypeAnswerStruct{}
var v struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
v.A = a
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != a {
t.Fatal("original A was lost")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
func TestPrivate(t *testing.T) {
var v struct {
A *TypeAnswerStruct `inject:"private"`
B *TypeNestedStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.B.A == nil {
t.Fatal("v.B.A is nil")
}
if v.A == v.B.A {
t.Fatal("got the same A")
}
}
type TypeWithJustColon struct {
A *TypeAnswerStruct `inject:`
}
func TestTagWithJustColon(t *testing.T) {
var a TypeWithJustColon
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "unexpected tag format `inject:` for field A in type *inject_test.TypeWithJustColon"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithOpenQuote struct {
A *TypeAnswerStruct `inject:"`
}
func TestTagWithOpenQuote(t *testing.T) {
var a TypeWithOpenQuote
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "unexpected tag format `inject:\"` for field A in type *inject_test.TypeWithOpenQuote"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideNonPointer(t *testing.T) {
var g inject.Graph
var i int
err := g.Provide(&inject.Object{Value: i})
if err == nil {
t.Fatal("expected error")
}
const msg = "expected unnamed object value to be a pointer to a struct but got type int with value 0"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideNonPointerStruct(t *testing.T) {
var g inject.Graph
var i *int
err := g.Provide(&inject.Object{Value: i})
if err == nil {
t.Fatal("expected error")
}
const msg = "expected unnamed object value to be a pointer to a struct but got type *int with value <nil>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoOfTheSame(t *testing.T) {
var g inject.Graph
a := TypeAnswerStruct{}
err := g.Provide(&inject.Object{Value: &a})
if err != nil {
t.Fatal(err)
}
err = g.Provide(&inject.Object{Value: &a})
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two unnamed instances of type *inject_test.TypeAnswerStruct"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoOfTheSameWithPopulate(t *testing.T) {
a := TypeAnswerStruct{}
err := inject.Populate(&a, &a)
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two unnamed instances of type *inject_test.TypeAnswerStruct"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestProvideTwoWithTheSameName(t *testing.T) {
var g inject.Graph
const name = "foo"
a := TypeAnswerStruct{}
err := g.Provide(&inject.Object{Value: &a, Name: name})
if err != nil {
t.Fatal(err)
}
err = g.Provide(&inject.Object{Value: &a, Name: name})
if err == nil {
t.Fatal("expected error")
}
const msg = "provided two instances named foo"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestNamedInstanceWithDependencies(t *testing.T) {
var g inject.Graph
a := &TypeNestedStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
var c struct {
A *TypeNestedStruct `inject:"foo"`
}
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if c.A.A == nil {
t.Fatal("c.A.A was not injected")
}
}
func TestTwoNamedInstances(t *testing.T) {
var g inject.Graph
a := &TypeAnswerStruct{}
b := &TypeAnswerStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
if err := g.Provide(&inject.Object{Value: b, Name: "bar"}); err != nil {
t.Fatal(err)
}
var c struct {
A *TypeAnswerStruct `inject:"foo"`
B *TypeAnswerStruct `inject:"bar"`
}
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if c.A != a {
t.Fatal("did not find expected c.A")
}
if c.B != b {
t.Fatal("did not find expected c.B")
}
}
type TypeWithMissingNamed struct {
A *TypeAnswerStruct `inject:"foo"`
}
func TestTagWithMissingNamed(t *testing.T) {
var a TypeWithMissingNamed
err := inject.Populate(&a)
if err == nil {
t.Fatalf("expected error for %+v", a)
}
const msg = "did not find object named foo required by field A in type *inject_test.TypeWithMissingNamed"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
func TestCompleteProvides(t *testing.T) {
var g inject.Graph
var v struct {
A *TypeAnswerStruct `inject:""`
}
if err := g.Provide(&inject.Object{Value: &v, Complete: true}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A was not nil")
}
}
func TestCompleteNamedProvides(t *testing.T) {
var g inject.Graph
var v struct {
A *TypeAnswerStruct `inject:""`
}
if err := g.Provide(&inject.Object{Value: &v, Complete: true, Name: "foo"}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != nil {
t.Fatal("v.A was not nil")
}
}
type TypeInjectInterfaceMissing struct {
Answerable Answerable `inject:""`
}
func TestInjectInterfaceMissing(t *testing.T) {
var v TypeInjectInterfaceMissing
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found no assignable value for field Answerable in type *inject_test.TypeInjectInterfaceMissing"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
}
func TestInjectInterface(t *testing.T) {
var v TypeInjectInterface
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Answerable == nil || v.Answerable != v.A {
t.Fatalf(
"expected the same but got Answerable = %T %+v / A = %T %+v",
v.Answerable,
v.Answerable,
v.A,
v.A,
)
}
}
type TypeWithInvalidNamedType struct {
A *TypeNestedStruct `inject:"foo"`
}
func TestInvalidNamedInstanceType(t *testing.T) {
var g inject.Graph
a := &TypeAnswerStruct{}
if err := g.Provide(&inject.Object{Value: a, Name: "foo"}); err != nil {
t.Fatal(err)
}
var c TypeWithInvalidNamedType
if err := g.Provide(&inject.Object{Value: &c}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "object named foo of type *inject_test.TypeNestedStruct is not assignable to field A in type *inject_test.TypeWithInvalidNamedType"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectOnPrivateField struct {
a *TypeAnswerStruct `inject:""`
}
func TestInjectOnPrivateField(t *testing.T) {
var a TypeWithInjectOnPrivateField
err := inject.Populate(&a)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectOnPrivateField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectOnPrivateInterfaceField struct {
a Answerable `inject:""`
}
func TestInjectOnPrivateInterfaceField(t *testing.T) {
var a TypeWithInjectOnPrivateField
err := inject.Populate(&a)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectOnPrivateField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectPrivateInterface struct {
Answerable Answerable `inject:"private"`
B *TypeNestedStruct `inject:""`
}
func TestInjectPrivateInterface(t *testing.T) {
var v TypeInjectPrivateInterface
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found private inject tag on interface field Answerable in type *inject_test.TypeInjectPrivateInterface"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectTwoSatisfyInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
func TestInjectTwoSatisfyInterface(t *testing.T) {
var v TypeInjectTwoSatisfyInterface
err := inject.Populate(&v)
if err == nil {
t.Fatal("did not find expected error")
}
const msg = "found two assignable values for field Answerable in type *inject_test.TypeInjectTwoSatisfyInterface. one type *inject_test.TypeAnswerStruct with value &{0 0} and another type *inject_test.TypeNestedStruct with value <*inject_test.TypeNestedStruct Value>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeInjectNamedTwoSatisfyInterface struct {
Answerable Answerable `inject:""`
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
}
func TestInjectNamedTwoSatisfyInterface(t *testing.T) {
var g inject.Graph
var v TypeInjectNamedTwoSatisfyInterface
if err := g.Provide(&inject.Object{Name: "foo", Value: &v}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("was expecting error")
}
const msg = "found two assignable values for field Answerable in type *inject_test.TypeInjectNamedTwoSatisfyInterface. one type *inject_test.TypeAnswerStruct with value &{0 0} and another type *inject_test.TypeNestedStruct with value <*inject_test.TypeNestedStruct Value>"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithInjectNamedOnPrivateInterfaceField struct {
a Answerable `inject:""`
}
func TestInjectNamedOnPrivateInterfaceField(t *testing.T) {
var g inject.Graph
var v TypeWithInjectNamedOnPrivateInterfaceField
if err := g.Provide(&inject.Object{Name: "foo", Value: &v}); err != nil {
t.Fatal(err)
}
err := g.Populate()
if err == nil {
t.Fatal("was expecting error")
}
const msg = "inject requested on unexported field a in type *inject_test.TypeWithInjectNamedOnPrivateInterfaceField"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithNonPointerNamedInject struct {
A int `inject:"foo"`
}
func TestErrorOnNonPointerNamedInject(t *testing.T) {
var g inject.Graph
if err := g.Provide(&inject.Object{Name: "foo", Value: 42}); err != nil {
t.Fatal(err)
}
var v TypeWithNonPointerNamedInject
if err := g.Provide(&inject.Object{Value: &v}); err != nil {
t.Fatal(err)
}
if err := g.Populate(); err != nil {
t.Fatal(err)
}
if v.A != 42 {
t.Fatalf("expected v.A = 42 but got %d", v.A)
}
}
func TestInjectInline(t *testing.T) {
var v struct {
Inline struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
} `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Inline.A == nil {
t.Fatal("v.Inline.A is nil")
}
if v.Inline.B == nil {
t.Fatal("v.Inline.B is nil")
}
if v.Inline.B.A == nil {
t.Fatal("v.Inline.B.A is nil")
}
if v.Inline.A != v.Inline.B.A {
t.Fatal("got different instances of A")
}
}
type TypeWithInlineStructWithPrivate struct {
Inline struct {
A *TypeAnswerStruct `inject:""`
B *TypeNestedStruct `inject:""`
} `inject:"private"`
}
func TestInjectInlinePrivate(t *testing.T) {
var v TypeWithInlineStructWithPrivate
err := inject.Populate(&v)
if err == nil {
t.Fatal("was expecting an error")
}
const msg = "cannot use private inject on inline struct on field Inline in type *inject_test.TypeWithInlineStructWithPrivate"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
type TypeWithStructValue struct {
Inline TypeNestedStruct `inject:""`
}
func TestInjectWithStructValue(t *testing.T) {
var v TypeWithStructValue
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.Inline.A == nil {
t.Fatal("v.Inline.A is nil")
}
}
func TestPrivateIsFollowed(t *testing.T) {
var v struct {
A *TypeNestedStruct `inject:"private"`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A.A == nil {
t.Fatal("v.A.A is nil")
}
}
func TestDoesNotOverwriteInterface(t *testing.T) {
a := &TypeAnswerStruct{}
var v struct {
A Answerable `inject:""`
B *TypeNestedStruct `inject:""`
}
v.A = a
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A != a {
t.Fatal("original A was lost")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
}
func TestInterfaceIncludingPrivate(t *testing.T) {
var v struct {
A Answerable `inject:""`
B *TypeNestedStruct `inject:"private"`
C *TypeAnswerStruct `inject:""`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
if v.B == nil {
t.Fatal("v.B is nil")
}
if v.C == nil {
t.Fatal("v.C is nil")
}
if v.A != v.C {
t.Fatal("v.A != v.C")
}
if v.A == v.B {
t.Fatal("v.A == v.B")
}
}
func TestInjectMap(t *testing.T) {
var v struct {
A map[string]int `inject:"private"`
}
if err := inject.Populate(&v); err != nil {
t.Fatal(err)
}
if v.A == nil {
t.Fatal("v.A is nil")
}
}
type TypeInjectWithMapWithoutPrivate struct {
A map[string]int `inject:""`
}
func TestInjectMapWithoutPrivate(t *testing.T) {
var v TypeInjectWithMapWithoutPrivate
err := inject.Populate(&v)
if err == nil {
t.Fatalf("expected error for %+v", v)
}
const msg = "inject on map field A in type *inject_test.TypeInjectWithMapWithoutPrivate must be named or private"
if err.Error() != msg {
t.Fatalf("expected:\n%s\nactual:\n%s", msg, err.Error())
}
}
|
package scanner
import (
"encoding/gob"
"errors"
"fmt"
"github.com/Symantec/Dominator/lib/image"
"github.com/Symantec/Dominator/objectserver"
"os"
"path"
"syscall"
)
func loadImageDataBase(baseDir string, objSrv objectserver.ObjectServer) (
*ImageDataBase, error) {
fi, err := os.Stat(baseDir)
if err != nil {
return nil, errors.New(
fmt.Sprintf("Cannot stat: %s\t%s\n", baseDir, err))
}
if !fi.IsDir() {
return nil, errors.New(fmt.Sprintf("%s is not a directory\n", baseDir))
}
imdb := new(ImageDataBase)
imdb.baseDir = baseDir
imdb.imageMap = make(map[string]*image.Image)
imdb.objectServer = objSrv
err = imdb.scanDirectory("")
if err != nil {
return nil, err
}
return imdb, nil
}
func (imdb *ImageDataBase) scanDirectory(dirname string) error {
file, err := os.Open(path.Join(imdb.baseDir, dirname))
if err != nil {
return err
}
names, err := file.Readdirnames(-1)
file.Close()
for _, name := range names {
filename := path.Join(dirname, name)
var stat syscall.Stat_t
err := syscall.Lstat(path.Join(imdb.baseDir, filename), &stat)
if err != nil {
if err == syscall.ENOENT {
continue
}
return err
}
if stat.Mode&syscall.S_IFMT == syscall.S_IFDIR {
err = imdb.scanDirectory(filename)
} else if stat.Mode&syscall.S_IFMT == syscall.S_IFREG {
err = imdb.loadFile(filename)
}
if err != nil {
if err == syscall.ENOENT {
continue
}
return err
}
}
return nil
}
func (imdb *ImageDataBase) loadFile(filename string) error {
file, err := os.Open(path.Join(imdb.baseDir, filename))
if err != nil {
return err
}
defer file.Close()
decoder := gob.NewDecoder(file)
var image image.Image
err = decoder.Decode(&image)
if err != nil {
return err
}
imdb.imageMap[filename] = &image
return nil
}
Rebuild pointers when loading image from persistent store.
package scanner
import (
"encoding/gob"
"errors"
"fmt"
"github.com/Symantec/Dominator/lib/image"
"github.com/Symantec/Dominator/objectserver"
"os"
"path"
"syscall"
)
func loadImageDataBase(baseDir string, objSrv objectserver.ObjectServer) (
*ImageDataBase, error) {
fi, err := os.Stat(baseDir)
if err != nil {
return nil, errors.New(
fmt.Sprintf("Cannot stat: %s\t%s\n", baseDir, err))
}
if !fi.IsDir() {
return nil, errors.New(fmt.Sprintf("%s is not a directory\n", baseDir))
}
imdb := new(ImageDataBase)
imdb.baseDir = baseDir
imdb.imageMap = make(map[string]*image.Image)
imdb.objectServer = objSrv
err = imdb.scanDirectory("")
if err != nil {
return nil, err
}
return imdb, nil
}
func (imdb *ImageDataBase) scanDirectory(dirname string) error {
file, err := os.Open(path.Join(imdb.baseDir, dirname))
if err != nil {
return err
}
names, err := file.Readdirnames(-1)
file.Close()
for _, name := range names {
filename := path.Join(dirname, name)
var stat syscall.Stat_t
err := syscall.Lstat(path.Join(imdb.baseDir, filename), &stat)
if err != nil {
if err == syscall.ENOENT {
continue
}
return err
}
if stat.Mode&syscall.S_IFMT == syscall.S_IFDIR {
err = imdb.scanDirectory(filename)
} else if stat.Mode&syscall.S_IFMT == syscall.S_IFREG {
err = imdb.loadFile(filename)
}
if err != nil {
if err == syscall.ENOENT {
continue
}
return err
}
}
return nil
}
func (imdb *ImageDataBase) loadFile(filename string) error {
file, err := os.Open(path.Join(imdb.baseDir, filename))
if err != nil {
return err
}
defer file.Close()
decoder := gob.NewDecoder(file)
var image image.Image
err = decoder.Decode(&image)
if err != nil {
return err
}
image.FileSystem.RebuildPointers()
imdb.imageMap[filename] = &image
return nil
}
|
// 在这里写你的事件
package main
import (
"github.com/ying32/govcl/vcl"
)
//::private::
type TMainFormFields struct {
}
func (f *TMainForm) OnFormCreate(sender vcl.IObject) {
f.hideAllTab()
f.PageControl1.SetActivePageIndex(0)
}
func (f *TMainForm) hideAllTab() {
var i int32
for i = 0; i < f.PageControl1.PageCount(); i++ {
f.PageControl1.Pages(i).SetTabVisible(false)
}
}
func (f *TMainForm) OnActPagePrevExecute(sender vcl.IObject) {
f.PageControl1.SetActivePageIndex(f.PageControl1.ActivePageIndex() - 1)
}
func (f *TMainForm) OnActPagePrevUpdate(sender vcl.IObject) {
vcl.ActionFromObj(sender).SetEnabled(f.PageControl1.ActivePageIndex() > 0)
}
func (f *TMainForm) OnActPageNextExecute(sender vcl.IObject) {
f.PageControl1.SetActivePageIndex(f.PageControl1.ActivePageIndex() + 1)
}
func (f *TMainForm) OnActPageNextUpdate(sender vcl.IObject) {
vcl.ActionFromObj(sender).SetEnabled(f.PageControl1.ActivePageIndex() < f.PageControl1.PageCount()-1)
}
Adjust the PageControlWizard example
调整PageControlWizard例子
// 在这里写你的事件
package main
import (
"github.com/ying32/govcl/vcl"
)
//::private::
type TMainFormFields struct {
}
func (f *TMainForm) OnFormCreate(sender vcl.IObject) {
f.hideAllTab()
f.setPage(0)
}
func (f *TMainForm) hideAllTab() {
var i int32
for i = 0; i < f.PageControl1.PageCount(); i++ {
sheet := f.PageControl1.Pages(i)
sheet.SetTabVisible(false)
sheet.SetVisible(false)
}
}
func (f *TMainForm) setPage(idx int32) {
if idx != 0 && idx != -1 && idx != 1 {
return
}
if idx == 0 {
f.PageControl1.SetActivePageIndex(0)
sheet := f.PageControl1.Pages(0)
sheet.SetVisible(true)
return
}
curIdx := f.PageControl1.ActivePageIndex()
sheet := f.PageControl1.Pages(curIdx)
sheet.SetVisible(false)
f.PageControl1.SetActivePageIndex(curIdx + idx)
sheet = f.PageControl1.Pages(curIdx + idx)
sheet.SetVisible(true)
}
func (f *TMainForm) OnActPagePrevExecute(sender vcl.IObject) {
//f.PageControl1.SetActivePageIndex(f.PageControl1.ActivePageIndex() - 1)
f.setPage(-1)
}
func (f *TMainForm) OnActPagePrevUpdate(sender vcl.IObject) {
vcl.ActionFromObj(sender).SetEnabled(f.PageControl1.ActivePageIndex() > 0)
}
func (f *TMainForm) OnActPageNextExecute(sender vcl.IObject) {
//f.PageControl1.SetActivePageIndex(f.PageControl1.ActivePageIndex() + 1)
f.setPage(1)
}
func (f *TMainForm) OnActPageNextUpdate(sender vcl.IObject) {
vcl.ActionFromObj(sender).SetEnabled(f.PageControl1.ActivePageIndex() < f.PageControl1.PageCount()-1)
}
|
// This is a "stub" file. It's a little start on your solution.
// It's not a complete solution though; you have to write some code.
// Package acronym should have a package comment that summarizes what it's about.
// https://golang.org/doc/effective_go.html#commentary
package acronym
func Abbreviate(string) string
// Abbreviate should have a comment documenting it.
func Abbreviate(s string) string {
// Write some code here to pass the test suite.
// Then remove all the stock comments.
// They're here to help you get started but they only clutter a finished solution.
// If you leave them in, reviewers may protest!
return ""
}
acronym: Remove duplicated function definition (#987)
A previous PR (#860) introduced a duplicated definition for the Abbreviate function in the acronym exercise.
// This is a "stub" file. It's a little start on your solution.
// It's not a complete solution though; you have to write some code.
// Package acronym should have a package comment that summarizes what it's about.
// https://golang.org/doc/effective_go.html#commentary
package acronym
// Abbreviate should have a comment documenting it.
func Abbreviate(s string) string {
// Write some code here to pass the test suite.
// Then remove all the stock comments.
// They're here to help you get started but they only clutter a finished solution.
// If you leave them in, reviewers may protest!
return ""
}
|
// Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import (
"encoding/json"
"fmt"
"math/rand"
"sort"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/pubsub/v1"
"go.chromium.org/gae/impl/memory"
ds "go.chromium.org/gae/service/datastore"
tq "go.chromium.org/gae/service/taskqueue"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/clock/testclock"
"go.chromium.org/luci/common/data/rand/mathrand"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/auth/authtest"
"go.chromium.org/luci/server/secrets/testsecrets"
"go.chromium.org/luci/scheduler/appengine/acl"
"go.chromium.org/luci/scheduler/appengine/catalog"
"go.chromium.org/luci/scheduler/appengine/messages"
"go.chromium.org/luci/scheduler/appengine/task"
"go.chromium.org/luci/scheduler/appengine/task/noop"
. "github.com/smartystreets/goconvey/convey"
. "go.chromium.org/luci/common/testing/assertions"
)
func TestGetAllProjects(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Empty.
projects, err := e.GetAllProjects(c)
So(err, ShouldBeNil)
So(len(projects), ShouldEqual, 0)
// Non empty.
So(ds.Put(c,
&Job{JobID: "abc/1", ProjectID: "abc", Enabled: true},
&Job{JobID: "abc/2", ProjectID: "abc", Enabled: true},
&Job{JobID: "def/1", ProjectID: "def", Enabled: true},
&Job{JobID: "xyz/1", ProjectID: "xyz", Enabled: false},
), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
projects, err = e.GetAllProjects(c)
So(err, ShouldBeNil)
So(projects, ShouldResemble, []string{"abc", "def"})
})
}
func TestUpdateProjectJobs(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Doing nothing.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{})
// Adding a new job (ticks every 5 sec).
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Acls: acl.GrantsByRole{Readers: []string{"group:r"}, Owners: []string{"groups:o"}},
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Acls: acl.GrantsByRole{Readers: []string{"group:r"}, Owners: []string{"groups:o"}},
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task := ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(5*time.Second))
tq.GetTestable(c).ResetTasks()
// Readding same job in with exact same config revision -> noop.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
// Changing schedule to tick earlier -> rescheduled.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev2",
Schedule: "*/1 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev2",
Enabled: true,
Schedule: "*/1 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(1 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task = ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(1*time.Second))
tq.GetTestable(c).ResetTasks()
// Removed -> goes to disabled state.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev2",
Enabled: false,
Schedule: "*/1 * * * * * *",
State: JobState{
State: "DISABLED",
},
},
})
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
})
}
func TestTransactionRetries(t *testing.T) {
Convey("retry works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Adding a new job with transaction retry, should enqueue one task.
ds.GetTestable(c).SetTransactionRetryCount(2)
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 928953616732700780,
TickTime: epoch.Add(5 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task := ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(5*time.Second))
tq.GetTestable(c).ResetTasks()
})
Convey("collision is handled", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Pretend collision happened in all retries.
ds.GetTestable(c).SetTransactionRetryCount(15)
err := e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}})
So(transient.Tag.In(err), ShouldBeTrue)
So(allJobs(c), ShouldResemble, []Job{})
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
})
}
func TestResetAllJobsOnDevServer(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
},
},
})
clock.Get(c).(testclock.TestClock).Add(1 * time.Minute)
// ResetAllJobsOnDevServer should reschedule the job.
So(e.ResetAllJobsOnDevServer(c), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(65 * time.Second),
},
},
})
})
}
func TestFullFlow(t *testing.T) {
Convey("full flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
taskBytes := noopTaskBytes()
expectedJobs := func(state JobState) []Job {
return []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
Task: taskBytes,
State: state,
},
}
}
// Adding a new job (ticks every 5 sec).
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Task: taskBytes,
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
}))
// Enqueued timer task to launch it.
tsk := ensureOneTask(c, "timers-q")
So(tsk.Path, ShouldEqual, "/timers")
So(tsk.ETA, ShouldResemble, epoch.Add(5*time.Second))
tq.GetTestable(c).ResetTasks()
// Tick time comes, the tick task is executed, job is added to queue.
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
So(e.ExecuteSerializedAction(c, tsk.Payload, 0), ShouldBeNil)
// Job is in queued state now.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
}))
// Next tick task is added.
tickTask := ensureOneTask(c, "timers-q")
So(tickTask.Path, ShouldEqual, "/timers")
So(tickTask.ETA, ShouldResemble, epoch.Add(10*time.Second))
// Invocation task (ETA is 1 sec in the future).
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
So(invTask.ETA, ShouldResemble, epoch.Add(6*time.Second))
tq.GetTestable(c).ResetTasks()
// Time to run the job and it fails to launch with a transient error.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
// Check data provided via the controller.
So(ctl.JobID(), ShouldEqual, "abc/1")
So(ctl.InvocationID(), ShouldEqual, int64(9200093518582198800))
So(ctl.InvocationNonce(), ShouldEqual, int64(928953616732700780))
So(ctl.Task(), ShouldResemble, &messages.NoopTask{})
ctl.DebugLog("oops, fail")
return errors.New("oops", transient.Tag)
}
So(transient.Tag.In(e.ExecuteSerializedAction(c, invTask.Payload, 0)), ShouldBeTrue)
// Still in QUEUED state, but with InvocatioID assigned.
jobs := allJobs(c)
So(jobs, ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582198800,
}))
jobKey := ds.KeyForObj(c, &jobs[0])
// Check Invocation fields. It indicates that the attempt has failed and
// will be retried.
inv := Invocation{ID: 9200093518582198800, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog := inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582198800,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
Status: task.StatusRetrying,
MutationsCount: 1,
})
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Invocation initiated (attempt 1)")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] oops, fail")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] The invocation will be retried")
// The job is still in QUEUED state.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582198800,
}))
// Second attempt. Now starts, hangs midway, they finishes.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
// Make sure Save() checkpoints the progress.
ctl.DebugLog("Starting")
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
// After first Save the job and the invocation are in running state.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "RUNNING",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationRetryCount: 1,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582296192,
}))
inv := Invocation{ID: 9200093518582296192, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
So(inv, ShouldResemble, Invocation{
ID: 9200093518582296192,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "[22:42:05.000] Invocation initiated (attempt 2)\n[22:42:05.000] Starting\n",
RetryCount: 1,
Status: task.StatusRunning,
MutationsCount: 1,
})
// Noop save, just for the code coverage.
So(ctl.Save(ctx), ShouldBeNil)
// Change state to the final one.
ctl.State().Status = task.StatusSucceeded
ctl.State().ViewURL = "http://view_url"
ctl.State().TaskData = []byte("blah")
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 1), ShouldBeNil)
// After final save.
inv = Invocation{ID: 9200093518582296192, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog = inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582296192,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Finished: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
RetryCount: 1,
Status: task.StatusSucceeded,
ViewURL: "http://view_url",
TaskData: []byte("blah"),
MutationsCount: 2,
})
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Invocation initiated (attempt 2)")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Starting")
So(debugLog, ShouldContainSubstring, "with status SUCCEEDED")
// Previous invocation is aborted now (in Failed state).
inv = Invocation{ID: 9200093518582198800, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog = inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582198800,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Finished: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
Status: task.StatusFailed,
MutationsCount: 2,
})
So(debugLog, ShouldContainSubstring,
"[22:42:05.000] New invocation is starting (9200093518582296192), marking this one as failed")
// Job is in scheduled state again.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
PrevTime: epoch.Add(5 * time.Second),
}))
})
}
func TestForceInvocation(t *testing.T) {
Convey("full flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
So(ds.Put(c, &Job{
JobID: "abc/1",
ProjectID: "abc",
Enabled: true,
Schedule: "triggered",
Task: noopTaskBytes(),
State: JobState{State: JobStateSuspended},
Acls: acl.GrantsByRole{Owners: []string{"one@example.com"}},
}), ShouldBeNil)
ctxOne := auth.WithState(c, &authtest.FakeState{Identity: "user:one@example.com"})
ctxTwo := auth.WithState(c, &authtest.FakeState{Identity: "user:two@example.com"})
// Only owner can trigger.
fut, err := e.ForceInvocation(ctxTwo, "abc/1")
So(err, ShouldEqual, ErrNoSuchJob)
// Triggers something.
fut, err = e.ForceInvocation(ctxOne, "abc/1")
So(err, ShouldBeNil)
So(fut, ShouldNotBeNil)
// No invocation yet.
invID, err := fut.InvocationID(ctxOne)
So(err, ShouldBeNil)
So(invID, ShouldEqual, 0)
// But the launch is queued.
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
tq.GetTestable(c).ResetTasks()
// Launch it.
var startedInvID int64
mgr.launchTask = func(ctx context.Context, ctl task.Controller, _ []task.Trigger) error {
startedInvID = ctl.InvocationID()
ctl.State().Status = task.StatusRunning
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 0), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
// The invocation ID is now available.
invID, err = fut.InvocationID(ctxOne)
So(err, ShouldBeNil)
So(invID, ShouldEqual, startedInvID)
})
}
func TestFullTriggeredFlow(t *testing.T) {
Convey("full triggered flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
taskBytes := noopTaskBytes()
// Create a new triggering noop job (ticks every 5 sec).
jobsDefinitions := []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Task: taskBytes,
Flavor: catalog.JobFlavorTrigger,
TriggeredJobIDs: []string{"abc/2-triggered", "abc/3-triggered"},
},
}
// And also jobs 2, 3 to be triggered by job 1.
for i := 2; i <= 3; i++ {
jobsDefinitions = append(jobsDefinitions, catalog.Definition{
JobID: fmt.Sprintf("abc/%d-triggered", i),
Revision: "rev1",
Schedule: "triggered",
Task: taskBytes,
Flavor: catalog.JobFlavorTriggered,
})
}
So(e.UpdateProjectJobs(c, "abc", jobsDefinitions), ShouldBeNil)
// Enqueued timer task to launch it.
tsk := ensureOneTask(c, "timers-q")
So(tsk.Path, ShouldEqual, "/timers")
So(tsk.ETA, ShouldResemble, epoch.Add(5*time.Second))
tq.GetTestable(c).ResetTasks()
// Tick time comes, the tick task is executed, job is added to queue.
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
So(e.ExecuteSerializedAction(c, tsk.Payload, 0), ShouldBeNil)
// Job1 is in queued state now.
job1 := getJob(c, "abc/1")
So(job1.Flavor, ShouldEqual, catalog.JobFlavorTrigger)
So(job1.TriggeredJobIDs, ShouldResemble, []string{"abc/2-triggered", "abc/3-triggered"})
So(job1.State.State, ShouldEqual, JobStateQueued)
// Next tick task is added.
tickTask := ensureOneTask(c, "timers-q")
So(tickTask.Path, ShouldEqual, "/timers")
So(tickTask.ETA, ShouldResemble, epoch.Add(10*time.Second))
// Invocation task (ETA is 1 sec in the future).
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
So(invTask.ETA, ShouldResemble, epoch.Add(6*time.Second))
tq.GetTestable(c).ResetTasks()
var invID int64 // set inside launchTask once invocation is known.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, _ []task.Trigger) error {
// Make sure Save() checkpoints the progress.
ctl.DebugLog("Starting")
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
// After first Save the job and the invocation are in running state.
j1 := getJob(c, "abc/1")
So(j1.State.State, ShouldEqual, JobStateRunning)
invID = j1.State.InvocationID
inv, err := e.getInvocation(c, "abc/1", invID)
So(err, ShouldBeNil)
So(inv.TriggeredJobIDs, ShouldResemble, []string{"abc/2-triggered", "abc/3-triggered"})
So(inv.DebugLog, ShouldEqual, "[22:42:05.000] Invocation initiated (attempt 1)\n[22:42:05.000] Starting\n")
So(inv.Status, ShouldEqual, task.StatusRunning)
So(inv.MutationsCount, ShouldEqual, 1)
ctl.EmitTrigger(ctx, task.Trigger{ID: "trg", Payload: []byte("note the trigger id")})
ctl.EmitTrigger(ctx, task.Trigger{ID: "trg", Payload: []byte("different payload")})
// Change state to the final one.
ctl.State().Status = task.StatusSucceeded
ctl.State().ViewURL = "http://view_url"
ctl.State().TaskData = []byte("blah")
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 0), ShouldBeNil)
// After final save.
inv, err := e.getInvocation(c, "abc/1", invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusSucceeded)
So(inv.MutationsCount, ShouldEqual, 2)
So(inv.DebugLog, ShouldContainSubstring, "[22:42:05.000] Emitting a trigger trg") // twice.
for _, triggerTask := range popAllTasks(c, "invs-q") {
So(e.ExecuteSerializedAction(c, triggerTask.Payload, 0), ShouldBeNil)
}
// Triggers should result in new invocations for previously suspended jobs.
So(getJob(c, "abc/2-triggered").State.State, ShouldEqual, JobStateQueued)
So(getJob(c, "abc/3-triggered").State.State, ShouldEqual, JobStateQueued)
// Prepare to track triggers passed to task launchers.
deliveredTriggers := map[string][]string{}
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
So(deliveredTriggers, ShouldNotContainKey, ctl.JobID())
ids := make([]string, 0, len(triggers))
for _, t := range triggers {
ids = append(ids, t.ID)
}
sort.Strings(ids) // For deterministic tests.
deliveredTriggers[ctl.JobID()] = ids
ctl.State().Status = task.StatusSucceeded
return nil
}
// Actually execute task launching.
for _, t := range popAllTasks(c, "invs-q") {
So(e.ExecuteSerializedAction(c, t.Payload, 0), ShouldBeNil)
}
So(deliveredTriggers, ShouldResemble, map[string][]string{
"abc/2-triggered": {"trg"}, "abc/3-triggered": {"trg"},
})
})
}
func TestGenerateInvocationID(t *testing.T) {
Convey("generateInvocationID does not collide", t, func() {
c := newTestContext(epoch)
k := ds.NewKey(c, "Job", "", 123, nil)
// Bunch of ids generated at the exact same moment in time do not collide.
ids := map[int64]struct{}{}
for i := 0; i < 20; i++ {
id, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
ids[id] = struct{}{}
}
So(len(ids), ShouldEqual, 20)
})
Convey("generateInvocationID gen IDs with most recent first", t, func() {
c := newTestContext(epoch)
k := ds.NewKey(c, "Job", "", 123, nil)
older, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
newer, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
So(newer, ShouldBeLessThan, older)
})
}
func TestQueries(t *testing.T) {
Convey("with mock data", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
aclPublic := acl.GrantsByRole{Readers: []string{"group:all"}, Owners: []string{"group:administrators"}}
aclSome := acl.GrantsByRole{Readers: []string{"group:some"}}
aclOne := acl.GrantsByRole{Owners: []string{"one@example.com"}}
aclAdmin := acl.GrantsByRole{Readers: []string{"group:administrators"}, Owners: []string{"group:administrators"}}
ctxAnon := auth.WithState(c, &authtest.FakeState{
Identity: "anonymous:anonymous",
IdentityGroups: []string{"all"},
})
ctxOne := auth.WithState(c, &authtest.FakeState{
Identity: "user:one@example.com",
IdentityGroups: []string{"all"},
})
ctxSome := auth.WithState(c, &authtest.FakeState{
Identity: "user:some@example.com",
IdentityGroups: []string{"all", "some"},
})
ctxAdmin := auth.WithState(c, &authtest.FakeState{
Identity: "user:admin@example.com",
IdentityGroups: []string{"administrators", "all"},
})
So(ds.Put(c,
&Job{JobID: "abc/1", ProjectID: "abc", Enabled: true, Acls: aclOne},
&Job{JobID: "abc/2", ProjectID: "abc", Enabled: true, Acls: aclSome},
&Job{JobID: "abc/3", ProjectID: "abc", Enabled: true, Acls: aclPublic},
&Job{JobID: "def/1", ProjectID: "def", Enabled: true, Acls: aclPublic},
&Job{JobID: "def/2", ProjectID: "def", Enabled: false, Acls: aclPublic},
&Job{JobID: "secret/1", ProjectID: "secret", Enabled: true, Acls: aclAdmin},
), ShouldBeNil)
job1 := ds.NewKey(c, "Job", "abc/1", 0, nil)
job2 := ds.NewKey(c, "Job", "abc/2", 0, nil)
job3 := ds.NewKey(c, "Job", "abc/3", 0, nil)
So(ds.Put(c,
&Invocation{ID: 1, JobKey: job1, InvocationNonce: 123},
&Invocation{ID: 2, JobKey: job1, InvocationNonce: 123},
&Invocation{ID: 3, JobKey: job1},
&Invocation{ID: 1, JobKey: job2},
&Invocation{ID: 2, JobKey: job2},
&Invocation{ID: 3, JobKey: job2},
&Invocation{ID: 1, JobKey: job3},
), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
Convey("GetAllProjects ignores ACLs and CurrentIdentity", func() {
test := func(ctx context.Context) {
r, err := e.GetAllProjects(c)
So(err, ShouldBeNil)
So(r, ShouldResemble, []string{"abc", "def", "secret"})
}
test(c)
test(ctxAnon)
test(ctxAdmin)
})
Convey("GetVisibleJobs works", func() {
get := func(ctx context.Context) []string {
jobs, err := e.GetVisibleJobs(ctx)
So(err, ShouldBeNil)
return sortedJobIds(jobs)
}
Convey("Anonymous users see only public jobs", func() {
// Only 3 jobs with default ACLs granting READER access to everyone, but
// def/2 is disabled and so shouldn't be returned.
So(get(ctxAnon), ShouldResemble, []string{"abc/3", "def/1"})
})
Convey("Owners can see their own jobs + public jobs", func() {
// abc/1 is owned by one@example.com.
So(get(ctxOne), ShouldResemble, []string{"abc/1", "abc/3", "def/1"})
})
Convey("Explicit readers", func() {
So(get(ctxSome), ShouldResemble, []string{"abc/2", "abc/3", "def/1"})
})
Convey("Admins have implicit READER access to all jobs", func() {
So(get(ctxAdmin), ShouldResemble, []string{"abc/1", "abc/2", "abc/3", "def/1", "secret/1"})
})
})
Convey("GetProjectJobsRA works", func() {
get := func(ctx context.Context, project string) []string {
jobs, err := e.GetVisibleProjectJobs(ctx, project)
So(err, ShouldBeNil)
return sortedJobIds(jobs)
}
Convey("Anonymous can still see public jobs", func() {
So(get(ctxAnon, "def"), ShouldResemble, []string{"def/1"})
})
Convey("Admin have implicit READER access to all jobs", func() {
So(get(ctxAdmin, "abc"), ShouldResemble, []string{"abc/1", "abc/2", "abc/3"})
})
Convey("Owners can still see their jobs", func() {
So(get(ctxOne, "abc"), ShouldResemble, []string{"abc/1", "abc/3"})
})
Convey("Readers can see their jobs", func() {
So(get(ctxSome, "abc"), ShouldResemble, []string{"abc/2", "abc/3"})
})
})
Convey("GetVisibleJob works", func() {
_, err := e.GetVisibleJob(ctxAdmin, "missing/job")
So(err, ShouldEqual, ErrNoSuchJob)
_, err = e.GetVisibleJob(ctxAnon, "abc/1") // no READER permission.
So(err, ShouldEqual, ErrNoSuchJob)
_, err = e.GetVisibleJob(ctxAnon, "def/2") // not enabled, hence not visible.
So(err, ShouldEqual, ErrNoSuchJob)
job, err := e.GetVisibleJob(ctxAnon, "def/1") // OK.
So(job, ShouldNotBeNil)
So(err, ShouldBeNil)
})
Convey("ListVisibleInvocations works", func() {
Convey("Anonymous can't see non-public job invocations", func() {
_, _, err := e.ListVisibleInvocations(ctxAnon, "abc/1", 2, "")
So(err, ShouldResemble, ErrNoSuchJob)
})
Convey("With paging", func() {
invs, cursor, err := e.ListVisibleInvocations(ctxOne, "abc/1", 2, "")
So(err, ShouldBeNil)
So(len(invs), ShouldEqual, 2)
So(invs[0].ID, ShouldEqual, 1)
So(invs[1].ID, ShouldEqual, 2)
So(cursor, ShouldNotEqual, "")
invs, cursor, err = e.ListVisibleInvocations(ctxOne, "abc/1", 2, cursor)
So(err, ShouldBeNil)
So(len(invs), ShouldEqual, 1)
So(invs[0].ID, ShouldEqual, 3)
So(cursor, ShouldEqual, "")
})
})
Convey("GetInvocation works", func() {
Convey("Anonymous can't see non-public job invocation", func() {
_, err := e.GetVisibleInvocation(ctxAnon, "abc/1", 1)
So(err, ShouldResemble, ErrNoSuchInvocation)
})
Convey("NoSuchInvocation", func() {
_, err := e.GetVisibleInvocation(ctxAdmin, "missing/job", 1)
So(err, ShouldResemble, ErrNoSuchInvocation)
})
Convey("Reader sees", func() {
inv, err := e.GetVisibleInvocation(ctxOne, "abc/1", 1)
So(inv, ShouldNotBeNil)
So(err, ShouldBeNil)
})
})
})
}
func TestRecordOverrun(t *testing.T) {
Convey("RecordOverrun works", t, func(ctx C) {
c := newTestContext(epoch)
e, _ := newTestEngine()
job := &Job{JobID: "abc/1"}
So(ds.Put(c, job), ShouldBeNil)
So(e.recordOverrun(c, "abc/1", 1, 0), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
q := ds.NewQuery("Invocation").Ancestor(ds.KeyForObj(c, job))
var all []Invocation
So(ds.GetAll(c, q, &all), ShouldEqual, nil)
So(all, ShouldResemble, []Invocation{
{
ID: 9200093523825174512,
JobKey: ds.KeyForObj(c, job),
Started: epoch,
Finished: epoch,
Status: task.StatusOverrun,
DebugLog: "[22:42:00.000] New invocation should be starting now, but previous one is still starting\n" +
"[22:42:00.000] Total overruns thus far: 1\n",
}})
})
}
func TestPrepareTopic(t *testing.T) {
Convey("PrepareTopic works", t, func(ctx C) {
c := newTestContext(epoch)
e, _ := newTestEngine()
pubSubCalls := 0
e.configureTopic = func(c context.Context, topic, sub, pushURL, publisher string) error {
pubSubCalls++
ctx.So(topic, ShouldEqual, "projects/app/topics/dev-scheduler+noop+some~publisher.com")
ctx.So(sub, ShouldEqual, "projects/app/subscriptions/dev-scheduler+noop+some~publisher.com")
ctx.So(pushURL, ShouldEqual, "") // pull on dev server
ctx.So(publisher, ShouldEqual, "some@publisher.com")
return nil
}
ctl := &taskController{
ctx: c,
eng: e,
manager: &noop.TaskManager{},
saved: Invocation{
ID: 123456,
JobKey: ds.NewKey(c, "Job", "job_id", 0, nil),
},
}
ctl.populateState()
// Once.
topic, token, err := ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(topic, ShouldEqual, "projects/app/topics/dev-scheduler+noop+some~publisher.com")
So(token, ShouldNotEqual, "")
So(pubSubCalls, ShouldEqual, 1)
// Again. 'configureTopic' should not be called anymore.
_, _, err = ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(pubSubCalls, ShouldEqual, 1)
})
}
func TestProcessPubSubPush(t *testing.T) {
Convey("with mock invocation", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
So(ds.Put(c, &Job{
JobID: "abc/1",
ProjectID: "abc",
Enabled: true,
}), ShouldBeNil)
task, err := proto.Marshal(&messages.TaskDefWrapper{
Noop: &messages.NoopTask{},
})
So(err, ShouldBeNil)
inv := Invocation{
ID: 1,
JobKey: ds.NewKey(c, "Job", "abc/1", 0, nil),
Task: task,
}
So(ds.Put(c, &inv), ShouldBeNil)
// Skip talking to PubSub for real.
e.configureTopic = func(c context.Context, topic, sub, pushURL, publisher string) error {
return nil
}
ctl, err := controllerForInvocation(c, e, &inv)
So(err, ShouldBeNil)
// Grab the working auth token.
_, token, err := ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(token, ShouldNotEqual, "")
Convey("ProcessPubSubPush works", func() {
msg := struct {
Message pubsub.PubsubMessage `json:"message"`
}{
Message: pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token},
Data: "blah",
},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
handled := false
mgr.handleNotification = func(ctx context.Context, msg *pubsub.PubsubMessage) error {
So(msg.Data, ShouldEqual, "blah")
handled = true
return nil
}
So(e.ProcessPubSubPush(c, blob), ShouldBeNil)
So(handled, ShouldBeTrue)
})
Convey("ProcessPubSubPush handles bad token", func() {
msg := struct {
Message pubsub.PubsubMessage `json:"message"`
}{
Message: pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token + "blah"},
Data: "blah",
},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
So(e.ProcessPubSubPush(c, blob), ShouldErrLike, "bad token")
})
Convey("ProcessPubSubPush handles missing invocation", func() {
ds.Delete(c, ds.KeyForObj(c, &inv))
msg := pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
So(transient.Tag.In(e.ProcessPubSubPush(c, blob)), ShouldBeFalse)
})
})
}
func TestAborts(t *testing.T) {
Convey("with mock invocation", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
ctxAnon := auth.WithState(c, &authtest.FakeState{
Identity: "anonymous:anonymous",
})
ctxReader := auth.WithState(c, &authtest.FakeState{
Identity: "user:reader@example.com",
IdentityGroups: []string{"readers"},
})
ctxOwner := auth.WithState(c, &authtest.FakeState{
Identity: "user:owner@example.com",
IdentityGroups: []string{"owners"},
})
// A job in "QUEUED" state (about to run an invocation).
const jobID = "abc/1"
const invNonce = int64(12345)
prepareQueuedJob(c, jobID, invNonce)
launchInv := func() int64 {
var invID int64
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
invID = ctl.InvocationID()
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
return nil
}
So(e.startInvocation(c, jobID, invNonce, "", nil, 0), ShouldBeNil)
// It is alive and the job entity tracks it.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusRunning)
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateRunning)
So(job.State.InvocationID, ShouldEqual, invID)
return invID
}
Convey("AbortInvocation works", func() {
// Actually launch the queued invocation.
invID := launchInv()
// Try to kill it w/o permission.
So(e.AbortInvocation(c, jobID, invID), ShouldNotBeNil) // No current identity.
So(e.AbortInvocation(ctxAnon, jobID, invID), ShouldResemble, ErrNoSuchJob)
So(e.AbortInvocation(ctxReader, jobID, invID), ShouldResemble, ErrNoOwnerPermission)
// Now kill it.
So(e.AbortInvocation(ctxOwner, jobID, invID), ShouldBeNil)
// It is dead.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusAborted)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob kills running invocation", func() {
// Actually launch the queued invocation.
invID := launchInv()
// Try to kill it w/o permission.
So(e.AbortJob(c, jobID), ShouldNotBeNil) // No current identity.
So(e.AbortJob(ctxAnon, jobID), ShouldResemble, ErrNoSuchJob)
So(e.AbortJob(ctxReader, jobID), ShouldResemble, ErrNoOwnerPermission)
// Kill it.
So(e.AbortJob(ctxOwner, jobID), ShouldBeNil)
// It is dead.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusAborted)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob kills queued invocation", func() {
So(e.AbortJob(ctxOwner, jobID), ShouldBeNil)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob fails on non-existing job", func() {
So(e.AbortJob(ctxOwner, "not/exists"), ShouldResemble, ErrNoSuchJob)
})
})
}
func TestAddTimer(t *testing.T) {
Convey("with mock job", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
// A job in "QUEUED" state (about to run an invocation).
const jobID = "abc/1"
const invNonce = int64(12345)
prepareQueuedJob(c, jobID, invNonce)
Convey("AddTimer works", func() {
// Start an invocation that adds a timer.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
ctl.AddTimer(ctx, time.Minute, "timer-name", []byte{1, 2, 3})
ctl.State().Status = task.StatusRunning
return nil
}
So(e.startInvocation(c, jobID, invNonce, "", nil, 0), ShouldBeNil)
// The job is running.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateRunning)
// Added a task to the timers task queue.
tasks := tq.GetTestable(c).GetScheduledTasks()["timers-q"]
So(len(tasks), ShouldEqual, 1)
var tqt *tq.Task
for _, tqt = range tasks {
}
So(tqt.ETA, ShouldResemble, clock.Now(c).Add(time.Minute))
// Verify task body.
payload := actionTaskPayload{}
So(json.Unmarshal(tqt.Payload, &payload), ShouldBeNil)
So(payload, ShouldResemble, actionTaskPayload{
JobID: "abc/1",
InvID: 9200093523825174512,
InvTimer: &invocationTimer{
Delay: time.Minute,
Name: "timer-name",
Payload: []byte{1, 2, 3},
},
})
// Clear the queue.
tq.GetTestable(c).ResetTasks()
// Time comes to execute the task.
mgr.handleTimer = func(ctx context.Context, ctl task.Controller, name string, payload []byte) error {
So(name, ShouldEqual, "timer-name")
So(payload, ShouldResemble, []byte{1, 2, 3})
ctl.AddTimer(ctx, time.Minute, "ignored-timer", nil)
ctl.State().Status = task.StatusSucceeded
return nil
}
clock.Get(c).(testclock.TestClock).Add(time.Minute)
So(e.ExecuteSerializedAction(c, tqt.Payload, 0), ShouldBeNil)
// The job has finished (by timer handler). Moves back to SUSPENDED state.
job, err = e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
// No new timers added for finished job.
tasks = tq.GetTestable(c).GetScheduledTasks()["timers-q"]
So(len(tasks), ShouldEqual, 0)
})
})
}
func TestTrimDebugLog(t *testing.T) {
ctx := clock.Set(context.Background(), testclock.New(epoch))
junk := strings.Repeat("a", 1000)
genLines := func(start, end int) string {
inv := Invocation{}
for i := start; i < end; i++ {
inv.debugLog(ctx, "Line %d - %s", i, junk)
}
return inv.DebugLog
}
Convey("small log is not trimmed", t, func() {
inv := Invocation{
DebugLog: genLines(0, 100),
}
inv.trimDebugLog()
So(inv.DebugLog, ShouldEqual, genLines(0, 100))
})
Convey("huge log is trimmed", t, func() {
inv := Invocation{
DebugLog: genLines(0, 500),
}
inv.trimDebugLog()
So(inv.DebugLog, ShouldEqual,
genLines(0, 94)+"--- the log has been cut here ---\n"+genLines(400, 500))
})
Convey("writing lines to huge log and trimming", t, func() {
inv := Invocation{
DebugLog: genLines(0, 500),
}
inv.trimDebugLog()
for i := 0; i < 10; i++ {
inv.debugLog(ctx, "Line %d - %s", i, junk)
inv.trimDebugLog()
}
// Still single cut only. New 10 lines are at the end.
So(inv.DebugLog, ShouldEqual,
genLines(0, 94)+"--- the log has been cut here ---\n"+genLines(410, 500)+genLines(0, 10))
})
Convey("one huge line", t, func() {
inv := Invocation{
DebugLog: strings.Repeat("z", 300000),
}
inv.trimDebugLog()
const msg = "\n--- the log has been cut here ---\n"
So(inv.DebugLog, ShouldEqual, strings.Repeat("z", debugLogSizeLimit-len(msg))+msg)
})
}
////
func newTestContext(now time.Time) context.Context {
c := memory.Use(context.Background())
c = clock.Set(c, testclock.New(now))
c = mathrand.Set(c, rand.New(rand.NewSource(1000)))
c = testsecrets.Use(c)
ds.GetTestable(c).AddIndexes(&ds.IndexDefinition{
Kind: "Job",
SortBy: []ds.IndexColumn{
{Property: "Enabled"},
{Property: "ProjectID"},
},
})
ds.GetTestable(c).CatchupIndexes()
tq.GetTestable(c).CreateQueue("timers-q")
tq.GetTestable(c).CreateQueue("invs-q")
return c
}
func newTestEngine() (*engineImpl, *fakeTaskManager) {
mgr := &fakeTaskManager{}
cat := catalog.New("scheduler.cfg")
cat.RegisterTaskManager(mgr)
return NewEngine(Config{
Catalog: cat,
TimersQueuePath: "/timers",
TimersQueueName: "timers-q",
InvocationsQueuePath: "/invs",
InvocationsQueueName: "invs-q",
PubSubPushPath: "/push-url",
}).(*engineImpl), mgr
}
////
// fakeTaskManager implement task.Manager interface.
type fakeTaskManager struct {
launchTask func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error
handleNotification func(ctx context.Context, msg *pubsub.PubsubMessage) error
handleTimer func(ctx context.Context, ctl task.Controller, name string, payload []byte) error
}
func (m *fakeTaskManager) Name() string {
return "fake"
}
func (m *fakeTaskManager) ProtoMessageType() proto.Message {
return (*messages.NoopTask)(nil)
}
func (m *fakeTaskManager) Traits() task.Traits {
return task.Traits{}
}
func (m *fakeTaskManager) ValidateProtoMessage(msg proto.Message) error {
return nil
}
func (m *fakeTaskManager) LaunchTask(c context.Context, ctl task.Controller, triggers []task.Trigger) error {
return m.launchTask(c, ctl, triggers)
}
func (m *fakeTaskManager) AbortTask(c context.Context, ctl task.Controller) error {
return nil
}
func (m *fakeTaskManager) HandleNotification(c context.Context, ctl task.Controller, msg *pubsub.PubsubMessage) error {
return m.handleNotification(c, msg)
}
func (m fakeTaskManager) HandleTimer(c context.Context, ctl task.Controller, name string, payload []byte) error {
return m.handleTimer(c, ctl, name, payload)
}
////
func sortedJobIds(jobs []*Job) []string {
ids := stringset.New(len(jobs))
for _, j := range jobs {
ids.Add(j.JobID)
}
asSlice := ids.ToSlice()
sort.Strings(asSlice)
return asSlice
}
// prepareQueuedJob makes datastore entries for a job in QUEUED state.
func prepareQueuedJob(c context.Context, jobID string, invNonce int64) {
taskBlob, err := proto.Marshal(&messages.TaskDefWrapper{
Noop: &messages.NoopTask{},
})
if err != nil {
panic(err)
}
chunks := strings.Split(jobID, "/")
err = ds.Put(c, &Job{
JobID: jobID,
ProjectID: chunks[0],
Enabled: true,
Acls: acl.GrantsByRole{Owners: []string{"group:owners"}, Readers: []string{"group:readers"}},
Task: taskBlob,
Schedule: "triggered",
State: JobState{
State: JobStateQueued,
InvocationNonce: invNonce,
},
})
if err != nil {
panic(err)
}
}
func noopTaskBytes() []byte {
buf, _ := proto.Marshal(&messages.TaskDefWrapper{Noop: &messages.NoopTask{}})
return buf
}
func allJobs(c context.Context) []Job {
ds.GetTestable(c).CatchupIndexes()
entities := []Job{}
if err := ds.GetAll(c, ds.NewQuery("Job"), &entities); err != nil {
panic(err)
}
// Strip UTC location pointers from zero time.Time{} so that ShouldResemble
// can compare it to default time.Time{}. nil location is UTC too.
for i := range entities {
ent := &entities[i]
if ent.State.InvocationTime.IsZero() {
ent.State.InvocationTime = time.Time{}
}
if ent.State.TickTime.IsZero() {
ent.State.TickTime = time.Time{}
}
}
return entities
}
func getJob(c context.Context, jobID string) Job {
for _, job := range allJobs(c) {
if job.JobID == jobID {
return job
}
}
panic(fmt.Errorf("no such jobs %s", jobID))
}
func ensureZeroTasks(c context.Context, q string) {
tqt := tq.GetTestable(c)
tasks := tqt.GetScheduledTasks()[q]
So(tasks == nil || len(tasks) == 0, ShouldBeTrue)
}
func ensureOneTask(c context.Context, q string) *tq.Task {
tqt := tq.GetTestable(c)
tasks := tqt.GetScheduledTasks()[q]
So(len(tasks), ShouldEqual, 1)
for _, t := range tasks {
return t
}
return nil
}
func popAllTasks(c context.Context, q string) []*tq.Task {
tqt := tq.GetTestable(c)
tasks := make([]*tq.Task, 0, len(tqt.GetScheduledTasks()[q]))
for _, t := range tqt.GetScheduledTasks()[q] {
tasks = append(tasks, t)
}
tqt.ResetTasks()
return tasks
}
scheduler: use explicit taskqueue in aniticipation of tq lib usage.
R=9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org
Change-Id: Ie82d0eb42937d77184cf4b2cd55a256266ee8bed
Reviewed-on: https://chromium-review.googlesource.com/671217
Commit-Queue: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Reviewed-by: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
// Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import (
"encoding/json"
"fmt"
"math/rand"
"sort"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/pubsub/v1"
"go.chromium.org/gae/impl/memory"
ds "go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/taskqueue"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/clock/testclock"
"go.chromium.org/luci/common/data/rand/mathrand"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/auth/authtest"
"go.chromium.org/luci/server/secrets/testsecrets"
"go.chromium.org/luci/scheduler/appengine/acl"
"go.chromium.org/luci/scheduler/appengine/catalog"
"go.chromium.org/luci/scheduler/appengine/messages"
"go.chromium.org/luci/scheduler/appengine/task"
"go.chromium.org/luci/scheduler/appengine/task/noop"
. "github.com/smartystreets/goconvey/convey"
. "go.chromium.org/luci/common/testing/assertions"
)
func TestGetAllProjects(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Empty.
projects, err := e.GetAllProjects(c)
So(err, ShouldBeNil)
So(len(projects), ShouldEqual, 0)
// Non empty.
So(ds.Put(c,
&Job{JobID: "abc/1", ProjectID: "abc", Enabled: true},
&Job{JobID: "abc/2", ProjectID: "abc", Enabled: true},
&Job{JobID: "def/1", ProjectID: "def", Enabled: true},
&Job{JobID: "xyz/1", ProjectID: "xyz", Enabled: false},
), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
projects, err = e.GetAllProjects(c)
So(err, ShouldBeNil)
So(projects, ShouldResemble, []string{"abc", "def"})
})
}
func TestUpdateProjectJobs(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Doing nothing.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{})
// Adding a new job (ticks every 5 sec).
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Acls: acl.GrantsByRole{Readers: []string{"group:r"}, Owners: []string{"groups:o"}},
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Acls: acl.GrantsByRole{Readers: []string{"group:r"}, Owners: []string{"groups:o"}},
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task := ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(5*time.Second))
taskqueue.GetTestable(c).ResetTasks()
// Readding same job in with exact same config revision -> noop.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
// Changing schedule to tick earlier -> rescheduled.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev2",
Schedule: "*/1 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev2",
Enabled: true,
Schedule: "*/1 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(1 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task = ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(1*time.Second))
taskqueue.GetTestable(c).ResetTasks()
// Removed -> goes to disabled state.
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev2",
Enabled: false,
Schedule: "*/1 * * * * * *",
State: JobState{
State: "DISABLED",
},
},
})
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
})
}
func TestTransactionRetries(t *testing.T) {
Convey("retry works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Adding a new job with transaction retry, should enqueue one task.
ds.GetTestable(c).SetTransactionRetryCount(2)
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 928953616732700780,
TickTime: epoch.Add(5 * time.Second),
},
},
})
// Enqueued timer task to launch it.
task := ensureOneTask(c, "timers-q")
So(task.Path, ShouldEqual, "/timers")
So(task.ETA, ShouldResemble, epoch.Add(5*time.Second))
taskqueue.GetTestable(c).ResetTasks()
})
Convey("collision is handled", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
// Pretend collision happened in all retries.
ds.GetTestable(c).SetTransactionRetryCount(15)
err := e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}})
So(transient.Tag.In(err), ShouldBeTrue)
So(allJobs(c), ShouldResemble, []Job{})
ensureZeroTasks(c, "timers-q")
ensureZeroTasks(c, "invs-q")
})
}
func TestResetAllJobsOnDevServer(t *testing.T) {
Convey("works", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
},
},
})
clock.Get(c).(testclock.TestClock).Add(1 * time.Minute)
// ResetAllJobsOnDevServer should reschedule the job.
So(e.ResetAllJobsOnDevServer(c), ShouldBeNil)
So(allJobs(c), ShouldResemble, []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
State: JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(65 * time.Second),
},
},
})
})
}
func TestFullFlow(t *testing.T) {
Convey("full flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
taskBytes := noopTaskBytes()
expectedJobs := func(state JobState) []Job {
return []Job{
{
JobID: "abc/1",
ProjectID: "abc",
Revision: "rev1",
Enabled: true,
Schedule: "*/5 * * * * * *",
Task: taskBytes,
State: state,
},
}
}
// Adding a new job (ticks every 5 sec).
So(e.UpdateProjectJobs(c, "abc", []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Task: taskBytes,
}}), ShouldBeNil)
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "SCHEDULED",
TickNonce: 6278013164014963328,
TickTime: epoch.Add(5 * time.Second),
}))
// Enqueued timer task to launch it.
tsk := ensureOneTask(c, "timers-q")
So(tsk.Path, ShouldEqual, "/timers")
So(tsk.ETA, ShouldResemble, epoch.Add(5*time.Second))
taskqueue.GetTestable(c).ResetTasks()
// Tick time comes, the tick task is executed, job is added to queue.
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
So(e.ExecuteSerializedAction(c, tsk.Payload, 0), ShouldBeNil)
// Job is in queued state now.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
}))
// Next tick task is added.
tickTask := ensureOneTask(c, "timers-q")
So(tickTask.Path, ShouldEqual, "/timers")
So(tickTask.ETA, ShouldResemble, epoch.Add(10*time.Second))
// Invocation task (ETA is 1 sec in the future).
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
So(invTask.ETA, ShouldResemble, epoch.Add(6*time.Second))
taskqueue.GetTestable(c).ResetTasks()
// Time to run the job and it fails to launch with a transient error.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
// Check data provided via the controller.
So(ctl.JobID(), ShouldEqual, "abc/1")
So(ctl.InvocationID(), ShouldEqual, int64(9200093518582198800))
So(ctl.InvocationNonce(), ShouldEqual, int64(928953616732700780))
So(ctl.Task(), ShouldResemble, &messages.NoopTask{})
ctl.DebugLog("oops, fail")
return errors.New("oops", transient.Tag)
}
So(transient.Tag.In(e.ExecuteSerializedAction(c, invTask.Payload, 0)), ShouldBeTrue)
// Still in QUEUED state, but with InvocatioID assigned.
jobs := allJobs(c)
So(jobs, ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582198800,
}))
jobKey := ds.KeyForObj(c, &jobs[0])
// Check Invocation fields. It indicates that the attempt has failed and
// will be retried.
inv := Invocation{ID: 9200093518582198800, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog := inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582198800,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
Status: task.StatusRetrying,
MutationsCount: 1,
})
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Invocation initiated (attempt 1)")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] oops, fail")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] The invocation will be retried")
// The job is still in QUEUED state.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "QUEUED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582198800,
}))
// Second attempt. Now starts, hangs midway, they finishes.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
// Make sure Save() checkpoints the progress.
ctl.DebugLog("Starting")
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
// After first Save the job and the invocation are in running state.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "RUNNING",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
InvocationNonce: 928953616732700780,
InvocationRetryCount: 1,
InvocationTime: epoch.Add(5 * time.Second),
InvocationID: 9200093518582296192,
}))
inv := Invocation{ID: 9200093518582296192, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
So(inv, ShouldResemble, Invocation{
ID: 9200093518582296192,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "[22:42:05.000] Invocation initiated (attempt 2)\n[22:42:05.000] Starting\n",
RetryCount: 1,
Status: task.StatusRunning,
MutationsCount: 1,
})
// Noop save, just for the code coverage.
So(ctl.Save(ctx), ShouldBeNil)
// Change state to the final one.
ctl.State().Status = task.StatusSucceeded
ctl.State().ViewURL = "http://view_url"
ctl.State().TaskData = []byte("blah")
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 1), ShouldBeNil)
// After final save.
inv = Invocation{ID: 9200093518582296192, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog = inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582296192,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Finished: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
RetryCount: 1,
Status: task.StatusSucceeded,
ViewURL: "http://view_url",
TaskData: []byte("blah"),
MutationsCount: 2,
})
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Invocation initiated (attempt 2)")
So(debugLog, ShouldContainSubstring, "[22:42:05.000] Starting")
So(debugLog, ShouldContainSubstring, "with status SUCCEEDED")
// Previous invocation is aborted now (in Failed state).
inv = Invocation{ID: 9200093518582198800, JobKey: jobKey}
So(ds.Get(c, &inv), ShouldBeNil)
inv.JobKey = nil // for easier ShouldResemble below
debugLog = inv.DebugLog
inv.DebugLog = ""
So(inv, ShouldResemble, Invocation{
ID: 9200093518582198800,
InvocationNonce: 928953616732700780,
Revision: "rev1",
Started: epoch.Add(5 * time.Second),
Finished: epoch.Add(5 * time.Second),
Task: taskBytes,
DebugLog: "",
Status: task.StatusFailed,
MutationsCount: 2,
})
So(debugLog, ShouldContainSubstring,
"[22:42:05.000] New invocation is starting (9200093518582296192), marking this one as failed")
// Job is in scheduled state again.
So(allJobs(c), ShouldResemble, expectedJobs(JobState{
State: "SCHEDULED",
TickNonce: 886585524575582446,
TickTime: epoch.Add(10 * time.Second),
PrevTime: epoch.Add(5 * time.Second),
}))
})
}
func TestForceInvocation(t *testing.T) {
Convey("full flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
So(ds.Put(c, &Job{
JobID: "abc/1",
ProjectID: "abc",
Enabled: true,
Schedule: "triggered",
Task: noopTaskBytes(),
State: JobState{State: JobStateSuspended},
Acls: acl.GrantsByRole{Owners: []string{"one@example.com"}},
}), ShouldBeNil)
ctxOne := auth.WithState(c, &authtest.FakeState{Identity: "user:one@example.com"})
ctxTwo := auth.WithState(c, &authtest.FakeState{Identity: "user:two@example.com"})
// Only owner can trigger.
fut, err := e.ForceInvocation(ctxTwo, "abc/1")
So(err, ShouldEqual, ErrNoSuchJob)
// Triggers something.
fut, err = e.ForceInvocation(ctxOne, "abc/1")
So(err, ShouldBeNil)
So(fut, ShouldNotBeNil)
// No invocation yet.
invID, err := fut.InvocationID(ctxOne)
So(err, ShouldBeNil)
So(invID, ShouldEqual, 0)
// But the launch is queued.
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
taskqueue.GetTestable(c).ResetTasks()
// Launch it.
var startedInvID int64
mgr.launchTask = func(ctx context.Context, ctl task.Controller, _ []task.Trigger) error {
startedInvID = ctl.InvocationID()
ctl.State().Status = task.StatusRunning
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 0), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
// The invocation ID is now available.
invID, err = fut.InvocationID(ctxOne)
So(err, ShouldBeNil)
So(invID, ShouldEqual, startedInvID)
})
}
func TestFullTriggeredFlow(t *testing.T) {
Convey("full triggered flow", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
taskBytes := noopTaskBytes()
// Create a new triggering noop job (ticks every 5 sec).
jobsDefinitions := []catalog.Definition{
{
JobID: "abc/1",
Revision: "rev1",
Schedule: "*/5 * * * * * *",
Task: taskBytes,
Flavor: catalog.JobFlavorTrigger,
TriggeredJobIDs: []string{"abc/2-triggered", "abc/3-triggered"},
},
}
// And also jobs 2, 3 to be triggered by job 1.
for i := 2; i <= 3; i++ {
jobsDefinitions = append(jobsDefinitions, catalog.Definition{
JobID: fmt.Sprintf("abc/%d-triggered", i),
Revision: "rev1",
Schedule: "triggered",
Task: taskBytes,
Flavor: catalog.JobFlavorTriggered,
})
}
So(e.UpdateProjectJobs(c, "abc", jobsDefinitions), ShouldBeNil)
// Enqueued timer task to launch it.
tsk := ensureOneTask(c, "timers-q")
So(tsk.Path, ShouldEqual, "/timers")
So(tsk.ETA, ShouldResemble, epoch.Add(5*time.Second))
taskqueue.GetTestable(c).ResetTasks()
// Tick time comes, the tick task is executed, job is added to queue.
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
So(e.ExecuteSerializedAction(c, tsk.Payload, 0), ShouldBeNil)
// Job1 is in queued state now.
job1 := getJob(c, "abc/1")
So(job1.Flavor, ShouldEqual, catalog.JobFlavorTrigger)
So(job1.TriggeredJobIDs, ShouldResemble, []string{"abc/2-triggered", "abc/3-triggered"})
So(job1.State.State, ShouldEqual, JobStateQueued)
// Next tick task is added.
tickTask := ensureOneTask(c, "timers-q")
So(tickTask.Path, ShouldEqual, "/timers")
So(tickTask.ETA, ShouldResemble, epoch.Add(10*time.Second))
// Invocation task (ETA is 1 sec in the future).
invTask := ensureOneTask(c, "invs-q")
So(invTask.Path, ShouldEqual, "/invs")
So(invTask.ETA, ShouldResemble, epoch.Add(6*time.Second))
taskqueue.GetTestable(c).ResetTasks()
var invID int64 // set inside launchTask once invocation is known.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, _ []task.Trigger) error {
// Make sure Save() checkpoints the progress.
ctl.DebugLog("Starting")
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
// After first Save the job and the invocation are in running state.
j1 := getJob(c, "abc/1")
So(j1.State.State, ShouldEqual, JobStateRunning)
invID = j1.State.InvocationID
inv, err := e.getInvocation(c, "abc/1", invID)
So(err, ShouldBeNil)
So(inv.TriggeredJobIDs, ShouldResemble, []string{"abc/2-triggered", "abc/3-triggered"})
So(inv.DebugLog, ShouldEqual, "[22:42:05.000] Invocation initiated (attempt 1)\n[22:42:05.000] Starting\n")
So(inv.Status, ShouldEqual, task.StatusRunning)
So(inv.MutationsCount, ShouldEqual, 1)
ctl.EmitTrigger(ctx, task.Trigger{ID: "trg", Payload: []byte("note the trigger id")})
ctl.EmitTrigger(ctx, task.Trigger{ID: "trg", Payload: []byte("different payload")})
// Change state to the final one.
ctl.State().Status = task.StatusSucceeded
ctl.State().ViewURL = "http://view_url"
ctl.State().TaskData = []byte("blah")
return nil
}
So(e.ExecuteSerializedAction(c, invTask.Payload, 0), ShouldBeNil)
// After final save.
inv, err := e.getInvocation(c, "abc/1", invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusSucceeded)
So(inv.MutationsCount, ShouldEqual, 2)
So(inv.DebugLog, ShouldContainSubstring, "[22:42:05.000] Emitting a trigger trg") // twice.
for _, triggerTask := range popAllTasks(c, "invs-q") {
So(e.ExecuteSerializedAction(c, triggerTask.Payload, 0), ShouldBeNil)
}
// Triggers should result in new invocations for previously suspended jobs.
So(getJob(c, "abc/2-triggered").State.State, ShouldEqual, JobStateQueued)
So(getJob(c, "abc/3-triggered").State.State, ShouldEqual, JobStateQueued)
// Prepare to track triggers passed to task launchers.
deliveredTriggers := map[string][]string{}
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
So(deliveredTriggers, ShouldNotContainKey, ctl.JobID())
ids := make([]string, 0, len(triggers))
for _, t := range triggers {
ids = append(ids, t.ID)
}
sort.Strings(ids) // For deterministic tests.
deliveredTriggers[ctl.JobID()] = ids
ctl.State().Status = task.StatusSucceeded
return nil
}
// Actually execute task launching.
for _, t := range popAllTasks(c, "invs-q") {
So(e.ExecuteSerializedAction(c, t.Payload, 0), ShouldBeNil)
}
So(deliveredTriggers, ShouldResemble, map[string][]string{
"abc/2-triggered": {"trg"}, "abc/3-triggered": {"trg"},
})
})
}
func TestGenerateInvocationID(t *testing.T) {
Convey("generateInvocationID does not collide", t, func() {
c := newTestContext(epoch)
k := ds.NewKey(c, "Job", "", 123, nil)
// Bunch of ids generated at the exact same moment in time do not collide.
ids := map[int64]struct{}{}
for i := 0; i < 20; i++ {
id, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
ids[id] = struct{}{}
}
So(len(ids), ShouldEqual, 20)
})
Convey("generateInvocationID gen IDs with most recent first", t, func() {
c := newTestContext(epoch)
k := ds.NewKey(c, "Job", "", 123, nil)
older, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
clock.Get(c).(testclock.TestClock).Add(5 * time.Second)
newer, err := generateInvocationID(c, k)
So(err, ShouldBeNil)
So(newer, ShouldBeLessThan, older)
})
}
func TestQueries(t *testing.T) {
Convey("with mock data", t, func() {
c := newTestContext(epoch)
e, _ := newTestEngine()
aclPublic := acl.GrantsByRole{Readers: []string{"group:all"}, Owners: []string{"group:administrators"}}
aclSome := acl.GrantsByRole{Readers: []string{"group:some"}}
aclOne := acl.GrantsByRole{Owners: []string{"one@example.com"}}
aclAdmin := acl.GrantsByRole{Readers: []string{"group:administrators"}, Owners: []string{"group:administrators"}}
ctxAnon := auth.WithState(c, &authtest.FakeState{
Identity: "anonymous:anonymous",
IdentityGroups: []string{"all"},
})
ctxOne := auth.WithState(c, &authtest.FakeState{
Identity: "user:one@example.com",
IdentityGroups: []string{"all"},
})
ctxSome := auth.WithState(c, &authtest.FakeState{
Identity: "user:some@example.com",
IdentityGroups: []string{"all", "some"},
})
ctxAdmin := auth.WithState(c, &authtest.FakeState{
Identity: "user:admin@example.com",
IdentityGroups: []string{"administrators", "all"},
})
So(ds.Put(c,
&Job{JobID: "abc/1", ProjectID: "abc", Enabled: true, Acls: aclOne},
&Job{JobID: "abc/2", ProjectID: "abc", Enabled: true, Acls: aclSome},
&Job{JobID: "abc/3", ProjectID: "abc", Enabled: true, Acls: aclPublic},
&Job{JobID: "def/1", ProjectID: "def", Enabled: true, Acls: aclPublic},
&Job{JobID: "def/2", ProjectID: "def", Enabled: false, Acls: aclPublic},
&Job{JobID: "secret/1", ProjectID: "secret", Enabled: true, Acls: aclAdmin},
), ShouldBeNil)
job1 := ds.NewKey(c, "Job", "abc/1", 0, nil)
job2 := ds.NewKey(c, "Job", "abc/2", 0, nil)
job3 := ds.NewKey(c, "Job", "abc/3", 0, nil)
So(ds.Put(c,
&Invocation{ID: 1, JobKey: job1, InvocationNonce: 123},
&Invocation{ID: 2, JobKey: job1, InvocationNonce: 123},
&Invocation{ID: 3, JobKey: job1},
&Invocation{ID: 1, JobKey: job2},
&Invocation{ID: 2, JobKey: job2},
&Invocation{ID: 3, JobKey: job2},
&Invocation{ID: 1, JobKey: job3},
), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
Convey("GetAllProjects ignores ACLs and CurrentIdentity", func() {
test := func(ctx context.Context) {
r, err := e.GetAllProjects(c)
So(err, ShouldBeNil)
So(r, ShouldResemble, []string{"abc", "def", "secret"})
}
test(c)
test(ctxAnon)
test(ctxAdmin)
})
Convey("GetVisibleJobs works", func() {
get := func(ctx context.Context) []string {
jobs, err := e.GetVisibleJobs(ctx)
So(err, ShouldBeNil)
return sortedJobIds(jobs)
}
Convey("Anonymous users see only public jobs", func() {
// Only 3 jobs with default ACLs granting READER access to everyone, but
// def/2 is disabled and so shouldn't be returned.
So(get(ctxAnon), ShouldResemble, []string{"abc/3", "def/1"})
})
Convey("Owners can see their own jobs + public jobs", func() {
// abc/1 is owned by one@example.com.
So(get(ctxOne), ShouldResemble, []string{"abc/1", "abc/3", "def/1"})
})
Convey("Explicit readers", func() {
So(get(ctxSome), ShouldResemble, []string{"abc/2", "abc/3", "def/1"})
})
Convey("Admins have implicit READER access to all jobs", func() {
So(get(ctxAdmin), ShouldResemble, []string{"abc/1", "abc/2", "abc/3", "def/1", "secret/1"})
})
})
Convey("GetProjectJobsRA works", func() {
get := func(ctx context.Context, project string) []string {
jobs, err := e.GetVisibleProjectJobs(ctx, project)
So(err, ShouldBeNil)
return sortedJobIds(jobs)
}
Convey("Anonymous can still see public jobs", func() {
So(get(ctxAnon, "def"), ShouldResemble, []string{"def/1"})
})
Convey("Admin have implicit READER access to all jobs", func() {
So(get(ctxAdmin, "abc"), ShouldResemble, []string{"abc/1", "abc/2", "abc/3"})
})
Convey("Owners can still see their jobs", func() {
So(get(ctxOne, "abc"), ShouldResemble, []string{"abc/1", "abc/3"})
})
Convey("Readers can see their jobs", func() {
So(get(ctxSome, "abc"), ShouldResemble, []string{"abc/2", "abc/3"})
})
})
Convey("GetVisibleJob works", func() {
_, err := e.GetVisibleJob(ctxAdmin, "missing/job")
So(err, ShouldEqual, ErrNoSuchJob)
_, err = e.GetVisibleJob(ctxAnon, "abc/1") // no READER permission.
So(err, ShouldEqual, ErrNoSuchJob)
_, err = e.GetVisibleJob(ctxAnon, "def/2") // not enabled, hence not visible.
So(err, ShouldEqual, ErrNoSuchJob)
job, err := e.GetVisibleJob(ctxAnon, "def/1") // OK.
So(job, ShouldNotBeNil)
So(err, ShouldBeNil)
})
Convey("ListVisibleInvocations works", func() {
Convey("Anonymous can't see non-public job invocations", func() {
_, _, err := e.ListVisibleInvocations(ctxAnon, "abc/1", 2, "")
So(err, ShouldResemble, ErrNoSuchJob)
})
Convey("With paging", func() {
invs, cursor, err := e.ListVisibleInvocations(ctxOne, "abc/1", 2, "")
So(err, ShouldBeNil)
So(len(invs), ShouldEqual, 2)
So(invs[0].ID, ShouldEqual, 1)
So(invs[1].ID, ShouldEqual, 2)
So(cursor, ShouldNotEqual, "")
invs, cursor, err = e.ListVisibleInvocations(ctxOne, "abc/1", 2, cursor)
So(err, ShouldBeNil)
So(len(invs), ShouldEqual, 1)
So(invs[0].ID, ShouldEqual, 3)
So(cursor, ShouldEqual, "")
})
})
Convey("GetInvocation works", func() {
Convey("Anonymous can't see non-public job invocation", func() {
_, err := e.GetVisibleInvocation(ctxAnon, "abc/1", 1)
So(err, ShouldResemble, ErrNoSuchInvocation)
})
Convey("NoSuchInvocation", func() {
_, err := e.GetVisibleInvocation(ctxAdmin, "missing/job", 1)
So(err, ShouldResemble, ErrNoSuchInvocation)
})
Convey("Reader sees", func() {
inv, err := e.GetVisibleInvocation(ctxOne, "abc/1", 1)
So(inv, ShouldNotBeNil)
So(err, ShouldBeNil)
})
})
})
}
func TestRecordOverrun(t *testing.T) {
Convey("RecordOverrun works", t, func(ctx C) {
c := newTestContext(epoch)
e, _ := newTestEngine()
job := &Job{JobID: "abc/1"}
So(ds.Put(c, job), ShouldBeNil)
So(e.recordOverrun(c, "abc/1", 1, 0), ShouldBeNil)
ds.GetTestable(c).CatchupIndexes()
q := ds.NewQuery("Invocation").Ancestor(ds.KeyForObj(c, job))
var all []Invocation
So(ds.GetAll(c, q, &all), ShouldEqual, nil)
So(all, ShouldResemble, []Invocation{
{
ID: 9200093523825174512,
JobKey: ds.KeyForObj(c, job),
Started: epoch,
Finished: epoch,
Status: task.StatusOverrun,
DebugLog: "[22:42:00.000] New invocation should be starting now, but previous one is still starting\n" +
"[22:42:00.000] Total overruns thus far: 1\n",
}})
})
}
func TestPrepareTopic(t *testing.T) {
Convey("PrepareTopic works", t, func(ctx C) {
c := newTestContext(epoch)
e, _ := newTestEngine()
pubSubCalls := 0
e.configureTopic = func(c context.Context, topic, sub, pushURL, publisher string) error {
pubSubCalls++
ctx.So(topic, ShouldEqual, "projects/app/topics/dev-scheduler+noop+some~publisher.com")
ctx.So(sub, ShouldEqual, "projects/app/subscriptions/dev-scheduler+noop+some~publisher.com")
ctx.So(pushURL, ShouldEqual, "") // pull on dev server
ctx.So(publisher, ShouldEqual, "some@publisher.com")
return nil
}
ctl := &taskController{
ctx: c,
eng: e,
manager: &noop.TaskManager{},
saved: Invocation{
ID: 123456,
JobKey: ds.NewKey(c, "Job", "job_id", 0, nil),
},
}
ctl.populateState()
// Once.
topic, token, err := ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(topic, ShouldEqual, "projects/app/topics/dev-scheduler+noop+some~publisher.com")
So(token, ShouldNotEqual, "")
So(pubSubCalls, ShouldEqual, 1)
// Again. 'configureTopic' should not be called anymore.
_, _, err = ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(pubSubCalls, ShouldEqual, 1)
})
}
func TestProcessPubSubPush(t *testing.T) {
Convey("with mock invocation", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
So(ds.Put(c, &Job{
JobID: "abc/1",
ProjectID: "abc",
Enabled: true,
}), ShouldBeNil)
task, err := proto.Marshal(&messages.TaskDefWrapper{
Noop: &messages.NoopTask{},
})
So(err, ShouldBeNil)
inv := Invocation{
ID: 1,
JobKey: ds.NewKey(c, "Job", "abc/1", 0, nil),
Task: task,
}
So(ds.Put(c, &inv), ShouldBeNil)
// Skip talking to PubSub for real.
e.configureTopic = func(c context.Context, topic, sub, pushURL, publisher string) error {
return nil
}
ctl, err := controllerForInvocation(c, e, &inv)
So(err, ShouldBeNil)
// Grab the working auth token.
_, token, err := ctl.PrepareTopic(c, "some@publisher.com")
So(err, ShouldBeNil)
So(token, ShouldNotEqual, "")
Convey("ProcessPubSubPush works", func() {
msg := struct {
Message pubsub.PubsubMessage `json:"message"`
}{
Message: pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token},
Data: "blah",
},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
handled := false
mgr.handleNotification = func(ctx context.Context, msg *pubsub.PubsubMessage) error {
So(msg.Data, ShouldEqual, "blah")
handled = true
return nil
}
So(e.ProcessPubSubPush(c, blob), ShouldBeNil)
So(handled, ShouldBeTrue)
})
Convey("ProcessPubSubPush handles bad token", func() {
msg := struct {
Message pubsub.PubsubMessage `json:"message"`
}{
Message: pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token + "blah"},
Data: "blah",
},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
So(e.ProcessPubSubPush(c, blob), ShouldErrLike, "bad token")
})
Convey("ProcessPubSubPush handles missing invocation", func() {
ds.Delete(c, ds.KeyForObj(c, &inv))
msg := pubsub.PubsubMessage{
Attributes: map[string]string{"auth_token": token},
}
blob, err := json.Marshal(&msg)
So(err, ShouldBeNil)
So(transient.Tag.In(e.ProcessPubSubPush(c, blob)), ShouldBeFalse)
})
})
}
func TestAborts(t *testing.T) {
Convey("with mock invocation", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
ctxAnon := auth.WithState(c, &authtest.FakeState{
Identity: "anonymous:anonymous",
})
ctxReader := auth.WithState(c, &authtest.FakeState{
Identity: "user:reader@example.com",
IdentityGroups: []string{"readers"},
})
ctxOwner := auth.WithState(c, &authtest.FakeState{
Identity: "user:owner@example.com",
IdentityGroups: []string{"owners"},
})
// A job in "QUEUED" state (about to run an invocation).
const jobID = "abc/1"
const invNonce = int64(12345)
prepareQueuedJob(c, jobID, invNonce)
launchInv := func() int64 {
var invID int64
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
invID = ctl.InvocationID()
ctl.State().Status = task.StatusRunning
So(ctl.Save(ctx), ShouldBeNil)
return nil
}
So(e.startInvocation(c, jobID, invNonce, "", nil, 0), ShouldBeNil)
// It is alive and the job entity tracks it.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusRunning)
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateRunning)
So(job.State.InvocationID, ShouldEqual, invID)
return invID
}
Convey("AbortInvocation works", func() {
// Actually launch the queued invocation.
invID := launchInv()
// Try to kill it w/o permission.
So(e.AbortInvocation(c, jobID, invID), ShouldNotBeNil) // No current identity.
So(e.AbortInvocation(ctxAnon, jobID, invID), ShouldResemble, ErrNoSuchJob)
So(e.AbortInvocation(ctxReader, jobID, invID), ShouldResemble, ErrNoOwnerPermission)
// Now kill it.
So(e.AbortInvocation(ctxOwner, jobID, invID), ShouldBeNil)
// It is dead.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusAborted)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob kills running invocation", func() {
// Actually launch the queued invocation.
invID := launchInv()
// Try to kill it w/o permission.
So(e.AbortJob(c, jobID), ShouldNotBeNil) // No current identity.
So(e.AbortJob(ctxAnon, jobID), ShouldResemble, ErrNoSuchJob)
So(e.AbortJob(ctxReader, jobID), ShouldResemble, ErrNoOwnerPermission)
// Kill it.
So(e.AbortJob(ctxOwner, jobID), ShouldBeNil)
// It is dead.
inv, err := e.getInvocation(c, jobID, invID)
So(err, ShouldBeNil)
So(inv.Status, ShouldEqual, task.StatusAborted)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob kills queued invocation", func() {
So(e.AbortJob(ctxOwner, jobID), ShouldBeNil)
// The job moved on with its life.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
So(job.State.InvocationID, ShouldEqual, 0)
})
Convey("AbortJob fails on non-existing job", func() {
So(e.AbortJob(ctxOwner, "not/exists"), ShouldResemble, ErrNoSuchJob)
})
})
}
func TestAddTimer(t *testing.T) {
Convey("with mock job", t, func() {
c := newTestContext(epoch)
e, mgr := newTestEngine()
// A job in "QUEUED" state (about to run an invocation).
const jobID = "abc/1"
const invNonce = int64(12345)
prepareQueuedJob(c, jobID, invNonce)
Convey("AddTimer works", func() {
// Start an invocation that adds a timer.
mgr.launchTask = func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error {
ctl.AddTimer(ctx, time.Minute, "timer-name", []byte{1, 2, 3})
ctl.State().Status = task.StatusRunning
return nil
}
So(e.startInvocation(c, jobID, invNonce, "", nil, 0), ShouldBeNil)
// The job is running.
job, err := e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateRunning)
// Added a task to the timers task queue.
tasks := taskqueue.GetTestable(c).GetScheduledTasks()["timers-q"]
So(len(tasks), ShouldEqual, 1)
var tqt *taskqueue.Task
for _, tqt = range tasks {
}
So(tqt.ETA, ShouldResemble, clock.Now(c).Add(time.Minute))
// Verify task body.
payload := actionTaskPayload{}
So(json.Unmarshal(tqt.Payload, &payload), ShouldBeNil)
So(payload, ShouldResemble, actionTaskPayload{
JobID: "abc/1",
InvID: 9200093523825174512,
InvTimer: &invocationTimer{
Delay: time.Minute,
Name: "timer-name",
Payload: []byte{1, 2, 3},
},
})
// Clear the queue.
taskqueue.GetTestable(c).ResetTasks()
// Time comes to execute the task.
mgr.handleTimer = func(ctx context.Context, ctl task.Controller, name string, payload []byte) error {
So(name, ShouldEqual, "timer-name")
So(payload, ShouldResemble, []byte{1, 2, 3})
ctl.AddTimer(ctx, time.Minute, "ignored-timer", nil)
ctl.State().Status = task.StatusSucceeded
return nil
}
clock.Get(c).(testclock.TestClock).Add(time.Minute)
So(e.ExecuteSerializedAction(c, tqt.Payload, 0), ShouldBeNil)
// The job has finished (by timer handler). Moves back to SUSPENDED state.
job, err = e.getJob(c, jobID)
So(err, ShouldBeNil)
So(job.State.State, ShouldEqual, JobStateSuspended)
// No new timers added for finished job.
tasks = taskqueue.GetTestable(c).GetScheduledTasks()["timers-q"]
So(len(tasks), ShouldEqual, 0)
})
})
}
func TestTrimDebugLog(t *testing.T) {
ctx := clock.Set(context.Background(), testclock.New(epoch))
junk := strings.Repeat("a", 1000)
genLines := func(start, end int) string {
inv := Invocation{}
for i := start; i < end; i++ {
inv.debugLog(ctx, "Line %d - %s", i, junk)
}
return inv.DebugLog
}
Convey("small log is not trimmed", t, func() {
inv := Invocation{
DebugLog: genLines(0, 100),
}
inv.trimDebugLog()
So(inv.DebugLog, ShouldEqual, genLines(0, 100))
})
Convey("huge log is trimmed", t, func() {
inv := Invocation{
DebugLog: genLines(0, 500),
}
inv.trimDebugLog()
So(inv.DebugLog, ShouldEqual,
genLines(0, 94)+"--- the log has been cut here ---\n"+genLines(400, 500))
})
Convey("writing lines to huge log and trimming", t, func() {
inv := Invocation{
DebugLog: genLines(0, 500),
}
inv.trimDebugLog()
for i := 0; i < 10; i++ {
inv.debugLog(ctx, "Line %d - %s", i, junk)
inv.trimDebugLog()
}
// Still single cut only. New 10 lines are at the end.
So(inv.DebugLog, ShouldEqual,
genLines(0, 94)+"--- the log has been cut here ---\n"+genLines(410, 500)+genLines(0, 10))
})
Convey("one huge line", t, func() {
inv := Invocation{
DebugLog: strings.Repeat("z", 300000),
}
inv.trimDebugLog()
const msg = "\n--- the log has been cut here ---\n"
So(inv.DebugLog, ShouldEqual, strings.Repeat("z", debugLogSizeLimit-len(msg))+msg)
})
}
////
func newTestContext(now time.Time) context.Context {
c := memory.Use(context.Background())
c = clock.Set(c, testclock.New(now))
c = mathrand.Set(c, rand.New(rand.NewSource(1000)))
c = testsecrets.Use(c)
ds.GetTestable(c).AddIndexes(&ds.IndexDefinition{
Kind: "Job",
SortBy: []ds.IndexColumn{
{Property: "Enabled"},
{Property: "ProjectID"},
},
})
ds.GetTestable(c).CatchupIndexes()
taskqueue.GetTestable(c).CreateQueue("timers-q")
taskqueue.GetTestable(c).CreateQueue("invs-q")
return c
}
func newTestEngine() (*engineImpl, *fakeTaskManager) {
mgr := &fakeTaskManager{}
cat := catalog.New("scheduler.cfg")
cat.RegisterTaskManager(mgr)
return NewEngine(Config{
Catalog: cat,
TimersQueuePath: "/timers",
TimersQueueName: "timers-q",
InvocationsQueuePath: "/invs",
InvocationsQueueName: "invs-q",
PubSubPushPath: "/push-url",
}).(*engineImpl), mgr
}
////
// fakeTaskManager implement task.Manager interface.
type fakeTaskManager struct {
launchTask func(ctx context.Context, ctl task.Controller, triggers []task.Trigger) error
handleNotification func(ctx context.Context, msg *pubsub.PubsubMessage) error
handleTimer func(ctx context.Context, ctl task.Controller, name string, payload []byte) error
}
func (m *fakeTaskManager) Name() string {
return "fake"
}
func (m *fakeTaskManager) ProtoMessageType() proto.Message {
return (*messages.NoopTask)(nil)
}
func (m *fakeTaskManager) Traits() task.Traits {
return task.Traits{}
}
func (m *fakeTaskManager) ValidateProtoMessage(msg proto.Message) error {
return nil
}
func (m *fakeTaskManager) LaunchTask(c context.Context, ctl task.Controller, triggers []task.Trigger) error {
return m.launchTask(c, ctl, triggers)
}
func (m *fakeTaskManager) AbortTask(c context.Context, ctl task.Controller) error {
return nil
}
func (m *fakeTaskManager) HandleNotification(c context.Context, ctl task.Controller, msg *pubsub.PubsubMessage) error {
return m.handleNotification(c, msg)
}
func (m fakeTaskManager) HandleTimer(c context.Context, ctl task.Controller, name string, payload []byte) error {
return m.handleTimer(c, ctl, name, payload)
}
////
func sortedJobIds(jobs []*Job) []string {
ids := stringset.New(len(jobs))
for _, j := range jobs {
ids.Add(j.JobID)
}
asSlice := ids.ToSlice()
sort.Strings(asSlice)
return asSlice
}
// prepareQueuedJob makes datastore entries for a job in QUEUED state.
func prepareQueuedJob(c context.Context, jobID string, invNonce int64) {
taskBlob, err := proto.Marshal(&messages.TaskDefWrapper{
Noop: &messages.NoopTask{},
})
if err != nil {
panic(err)
}
chunks := strings.Split(jobID, "/")
err = ds.Put(c, &Job{
JobID: jobID,
ProjectID: chunks[0],
Enabled: true,
Acls: acl.GrantsByRole{Owners: []string{"group:owners"}, Readers: []string{"group:readers"}},
Task: taskBlob,
Schedule: "triggered",
State: JobState{
State: JobStateQueued,
InvocationNonce: invNonce,
},
})
if err != nil {
panic(err)
}
}
func noopTaskBytes() []byte {
buf, _ := proto.Marshal(&messages.TaskDefWrapper{Noop: &messages.NoopTask{}})
return buf
}
func allJobs(c context.Context) []Job {
ds.GetTestable(c).CatchupIndexes()
entities := []Job{}
if err := ds.GetAll(c, ds.NewQuery("Job"), &entities); err != nil {
panic(err)
}
// Strip UTC location pointers from zero time.Time{} so that ShouldResemble
// can compare it to default time.Time{}. nil location is UTC too.
for i := range entities {
ent := &entities[i]
if ent.State.InvocationTime.IsZero() {
ent.State.InvocationTime = time.Time{}
}
if ent.State.TickTime.IsZero() {
ent.State.TickTime = time.Time{}
}
}
return entities
}
func getJob(c context.Context, jobID string) Job {
for _, job := range allJobs(c) {
if job.JobID == jobID {
return job
}
}
panic(fmt.Errorf("no such jobs %s", jobID))
}
func ensureZeroTasks(c context.Context, q string) {
tqt := taskqueue.GetTestable(c)
tasks := tqt.GetScheduledTasks()[q]
So(tasks == nil || len(tasks) == 0, ShouldBeTrue)
}
func ensureOneTask(c context.Context, q string) *taskqueue.Task {
tqt := taskqueue.GetTestable(c)
tasks := tqt.GetScheduledTasks()[q]
So(len(tasks), ShouldEqual, 1)
for _, t := range tasks {
return t
}
return nil
}
func popAllTasks(c context.Context, q string) []*taskqueue.Task {
tqt := taskqueue.GetTestable(c)
tasks := make([]*taskqueue.Task, 0, len(tqt.GetScheduledTasks()[q]))
for _, t := range tqt.GetScheduledTasks()[q] {
tasks = append(tasks, t)
}
tqt.ResetTasks()
return tasks
}
|
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package frontend
import (
"context"
"net/http"
authServer "go.chromium.org/luci/appengine/gaeauth/server"
"go.chromium.org/luci/appengine/gaemiddleware"
"go.chromium.org/luci/appengine/gaemiddleware/standard"
"go.chromium.org/luci/appengine/tq"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/config/appengine/gaeconfig"
"go.chromium.org/luci/config/impl/remote"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/luci_notify/config"
"go.chromium.org/luci/luci_notify/notify"
)
func init() {
r := router.New()
standard.InstallHandlers(r)
basemw := standard.Base().Extend(auth.Authenticate(authServer.CookieAuth), withRemoteConfigService)
taskDispatcher := tq.Dispatcher{BaseURL: "/internal/tasks/"}
notify.InitDispatcher(&taskDispatcher)
taskDispatcher.InstallRoutes(r, basemw)
// Cron endpoint.
r.GET("/internal/cron/update-config", basemw.Extend(gaemiddleware.RequireCron), config.UpdateHandler)
// Pub/Sub endpoint.
r.POST("/_ah/push-handlers/buildbucket", basemw, func(c *router.Context) {
if err := notify.BuildbucketPubSubHandler(c, &taskDispatcher); err != nil {
logging.Errorf(c.Context, "%s", err)
if transient.Tag.In(err) {
// Retry transient errors.
c.Writer.WriteHeader(http.StatusInternalServerError)
}
}
})
http.Handle("/", r)
}
func withRemoteConfigService(c *router.Context, next router.Handler) {
s, err := gaeconfig.FetchCachedSettings(c.Context)
if err != nil {
c.Writer.WriteHeader(http.StatusInternalServerError)
logging.WithError(err).Errorf(c.Context, "failure retrieving cached settings")
return
}
rInterface := remote.New(s.ConfigServiceHost, false, func(c context.Context) (*http.Client, error) {
t, err := auth.GetRPCTransport(c, auth.AsSelf)
if err != nil {
return nil, err
}
return &http.Client{Transport: t}, nil
})
// insert into context
c.Context = config.WithConfigService(c.Context, rInterface)
next(c)
}
[notify] Add PubSub message metric
Add a counter of buildbucket pubsub messages with a string "status" field
which can take values "success", "permanent-failure" and "transient-failure".
An alert for "permanent-failure" will be added.
Bug: 931518
Change-Id: I88e3fcd0092dd8266f24a911435e35c3fac9311b
Reviewed-on: https://chromium-review.googlesource.com/c/1470612
Reviewed-by: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
Commit-Queue: Nodir Turakulov <ef4933a197ef7b4b3f55f1bec4942aead3637a2a@chromium.org>
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package frontend
import (
"context"
"net/http"
authServer "go.chromium.org/luci/appengine/gaeauth/server"
"go.chromium.org/luci/appengine/gaemiddleware"
"go.chromium.org/luci/appengine/gaemiddleware/standard"
"go.chromium.org/luci/appengine/tq"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/common/tsmon/field"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/luci/config/appengine/gaeconfig"
"go.chromium.org/luci/config/impl/remote"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/luci_notify/config"
"go.chromium.org/luci/luci_notify/notify"
)
var bulidbucketPubSub = metric.NewCounter(
"luci/notify/buildbucket-pubsub",
"Number of received Buildbucket PubSub messages",
nil,
// "success", "transient-failure" or "permanent-failure"
field.String("status"),
)
func init() {
r := router.New()
standard.InstallHandlers(r)
basemw := standard.Base().Extend(auth.Authenticate(authServer.CookieAuth), withRemoteConfigService)
taskDispatcher := tq.Dispatcher{BaseURL: "/internal/tasks/"}
notify.InitDispatcher(&taskDispatcher)
taskDispatcher.InstallRoutes(r, basemw)
// Cron endpoint.
r.GET("/internal/cron/update-config", basemw.Extend(gaemiddleware.RequireCron), config.UpdateHandler)
// Pub/Sub endpoint.
r.POST("/_ah/push-handlers/buildbucket", basemw, func(c *router.Context) {
ctx := c.Context
status := ""
switch err := notify.BuildbucketPubSubHandler(c, &taskDispatcher); {
case transient.Tag.In(err):
status = "transient-failure"
logging.Errorf(ctx, "transient failure: %s", err)
// Retry the message.
c.Writer.WriteHeader(http.StatusInternalServerError)
case err != nil:
status = "permanent-failure"
logging.Errorf(ctx, "permanent failure: %s", err)
default:
status = "success"
}
bulidbucketPubSub.Add(ctx, 1, status)
})
http.Handle("/", r)
}
func withRemoteConfigService(c *router.Context, next router.Handler) {
s, err := gaeconfig.FetchCachedSettings(c.Context)
if err != nil {
c.Writer.WriteHeader(http.StatusInternalServerError)
logging.WithError(err).Errorf(c.Context, "failure retrieving cached settings")
return
}
rInterface := remote.New(s.ConfigServiceHost, false, func(c context.Context) (*http.Client, error) {
t, err := auth.GetRPCTransport(c, auth.AsSelf)
if err != nil {
return nil, err
}
return &http.Client{Transport: t}, nil
})
// insert into context
c.Context = config.WithConfigService(c.Context, rInterface)
next(c)
}
|
package network
import (
"bufio"
"encoding/binary"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// fillConfig fills requested config with any default values.
func (n *bridge) fillConfig(req *api.NetworksPost) error {
// Set some default values where needed.
if req.Config["bridge.mode"] == "fan" {
if req.Config["fan.underlay_subnet"] == "" {
req.Config["fan.underlay_subnet"] = "auto"
}
} else {
if req.Config["ipv4.address"] == "" {
req.Config["ipv4.address"] = "auto"
}
if req.Config["ipv4.address"] == "auto" && req.Config["ipv4.nat"] == "" {
req.Config["ipv4.nat"] = "true"
}
if req.Config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
req.Config["ipv6.address"] = "auto"
}
}
if req.Config["ipv6.address"] == "auto" && req.Config["ipv6.nat"] == "" {
req.Config["ipv6.nat"] = "true"
}
}
return nil
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return shared.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": func(value string) error {
if value == "" {
return nil
}
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := ValidNetworkName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
},
"bridge.hwaddr": shared.IsAny,
"bridge.mtu": shared.IsInt64,
"bridge.mode": func(value string) error {
return shared.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": shared.IsNetworkV4,
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return shared.IsNetworkV4(value)
},
"fan.type": func(value string) error {
return shared.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if shared.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return shared.IsNetworkAddressCIDRV4(value)
},
"ipv4.firewall": shared.IsBool,
"ipv4.nat": shared.IsBool,
"ipv4.nat.order": func(value string) error {
return shared.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": shared.IsNetworkAddressV4,
"ipv4.dhcp": shared.IsBool,
"ipv4.dhcp.gateway": shared.IsNetworkAddressV4,
"ipv4.dhcp.expiry": shared.IsAny,
"ipv4.dhcp.ranges": shared.IsAny,
"ipv4.routes": shared.IsNetworkV4List,
"ipv4.routing": shared.IsBool,
"ipv6.address": func(value string) error {
if shared.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return shared.IsNetworkAddressCIDRV6(value)
},
"ipv6.firewall": shared.IsBool,
"ipv6.nat": shared.IsBool,
"ipv6.nat.order": func(value string) error {
return shared.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": shared.IsNetworkAddressV6,
"ipv6.dhcp": shared.IsBool,
"ipv6.dhcp.expiry": shared.IsAny,
"ipv6.dhcp.stateful": shared.IsBool,
"ipv6.dhcp.ranges": shared.IsAny,
"ipv6.routes": shared.IsNetworkV6List,
"ipv6.routing": shared.IsBool,
"dns.domain": shared.IsAny,
"dns.search": shared.IsAny,
"dns.mode": func(value string) error {
return shared.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": shared.IsAny,
"maas.subnet.ipv4": shared.IsAny,
"maas.subnet.ipv6": shared.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return shared.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = shared.IsNetworkAddress
case "remote":
rules[k] = shared.IsNetworkAddress
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = shared.IsNetworkAddress
case "id":
rules[k] = shared.IsInt64
case "inteface":
rules[k] = ValidNetworkName
case "ttl":
rules[k] = shared.IsUint8
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name))
}
// Delete deletes a network.
func (n *bridge) Delete(clusterNotification bool) error {
n.logger.Debug("Delete", log.Ctx{"clusterNotification": clusterNotification})
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clusterNotification)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
// Sanity checks.
inUse, err := n.IsUsed()
if err != nil {
return err
}
if inUse {
return fmt.Errorf("The network is currently in use")
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err = n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
if n.status == api.NetworkStatusPending {
return fmt.Errorf("Cannot start pending network")
}
// Create directory
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
_, err := exec.LookPath("ovs-vsctl")
if err != nil {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
_, err = shared.RunCommand("ovs-vsctl", "add-br", n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
}
}
// Get a list of tunnels
tunnels := n.getTunnels()
// IPv6 bridge configuration
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Set the MAC address
if n.config["bridge.hwaddr"] != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", n.config["bridge.hwaddr"])
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan)
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.HasDHCPv4() && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
}
// Attempt a workaround for broken DHCP clients
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return err
}
// Update the dnsmasq config
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.HasDHCPv4() {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", GetIP(subnet, 2).String(), GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to using MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
err = n.applyBootRoutesV4(ctRoutes)
if err != nil {
return err
}
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return err
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.HasDHCPv6() {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", GetIP(subnet, 2), GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
err = n.applyBootRoutesV6(ctRoutes)
if err != nil {
return err
}
}
// Configure the fan
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return nil
}
// Parse the overlay
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return err
}
// Get the address
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available)
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return err
}
// Add the address
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", GetIP(hostSubnet, 2).String(), GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT
if n.config["ipv4.nat"] == "" || shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
// Create DHCP hosts directory
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot)
if dnsClustered {
// Create forkdns servers directory
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
_, err := shared.RunCommand("ovs-vsctl", "del-br", n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clusterNotification bool) error {
n.logger.Debug("Update", log.Ctx{"clusterNotification": clusterNotification, "newNetwork": newNetwork})
// When switching to a fan bridge, auto-detect the underlay if not specified.
if newNetwork.Config["bridge.mode"] == "fan" {
if newNetwork.Config["fan.underlay_subnet"] == "" {
newNetwork.Config["fan.underlay_subnet"] = "auto"
}
}
// Populate auto fields.
err := fillAuto(newNetwork.Config)
if err != nil {
return err
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
revert := revert.New()
defer revert.Fail()
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clusterNotification)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", dev)) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
// Apply changes to database.
err = n.common.update(newNetwork, targetNode, clusterNotification)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) error {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
return err
}
}
return nil
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) error {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
return err
}
}
return nil
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
lxd/network/driver/bridge: ovs usage
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package network
import (
"bufio"
"encoding/binary"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// fillConfig fills requested config with any default values.
func (n *bridge) fillConfig(req *api.NetworksPost) error {
// Set some default values where needed.
if req.Config["bridge.mode"] == "fan" {
if req.Config["fan.underlay_subnet"] == "" {
req.Config["fan.underlay_subnet"] = "auto"
}
} else {
if req.Config["ipv4.address"] == "" {
req.Config["ipv4.address"] = "auto"
}
if req.Config["ipv4.address"] == "auto" && req.Config["ipv4.nat"] == "" {
req.Config["ipv4.nat"] = "true"
}
if req.Config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
req.Config["ipv6.address"] = "auto"
}
}
if req.Config["ipv6.address"] == "auto" && req.Config["ipv6.nat"] == "" {
req.Config["ipv6.nat"] = "true"
}
}
return nil
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return shared.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": func(value string) error {
if value == "" {
return nil
}
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := ValidNetworkName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
},
"bridge.hwaddr": shared.IsAny,
"bridge.mtu": shared.IsInt64,
"bridge.mode": func(value string) error {
return shared.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": shared.IsNetworkV4,
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return shared.IsNetworkV4(value)
},
"fan.type": func(value string) error {
return shared.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if shared.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return shared.IsNetworkAddressCIDRV4(value)
},
"ipv4.firewall": shared.IsBool,
"ipv4.nat": shared.IsBool,
"ipv4.nat.order": func(value string) error {
return shared.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": shared.IsNetworkAddressV4,
"ipv4.dhcp": shared.IsBool,
"ipv4.dhcp.gateway": shared.IsNetworkAddressV4,
"ipv4.dhcp.expiry": shared.IsAny,
"ipv4.dhcp.ranges": shared.IsAny,
"ipv4.routes": shared.IsNetworkV4List,
"ipv4.routing": shared.IsBool,
"ipv6.address": func(value string) error {
if shared.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return shared.IsNetworkAddressCIDRV6(value)
},
"ipv6.firewall": shared.IsBool,
"ipv6.nat": shared.IsBool,
"ipv6.nat.order": func(value string) error {
return shared.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": shared.IsNetworkAddressV6,
"ipv6.dhcp": shared.IsBool,
"ipv6.dhcp.expiry": shared.IsAny,
"ipv6.dhcp.stateful": shared.IsBool,
"ipv6.dhcp.ranges": shared.IsAny,
"ipv6.routes": shared.IsNetworkV6List,
"ipv6.routing": shared.IsBool,
"dns.domain": shared.IsAny,
"dns.search": shared.IsAny,
"dns.mode": func(value string) error {
return shared.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": shared.IsAny,
"maas.subnet.ipv4": shared.IsAny,
"maas.subnet.ipv6": shared.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return shared.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = shared.IsNetworkAddress
case "remote":
rules[k] = shared.IsNetworkAddress
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = shared.IsNetworkAddress
case "id":
rules[k] = shared.IsInt64
case "inteface":
rules[k] = ValidNetworkName
case "ttl":
rules[k] = shared.IsUint8
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name))
}
// Delete deletes a network.
func (n *bridge) Delete(clusterNotification bool) error {
n.logger.Debug("Delete", log.Ctx{"clusterNotification": clusterNotification})
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clusterNotification)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
// Sanity checks.
inUse, err := n.IsUsed()
if err != nil {
return err
}
if inUse {
return fmt.Errorf("The network is currently in use")
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err = n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
if n.status == api.NetworkStatusPending {
return fmt.Errorf("Cannot start pending network")
}
// Create directory
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
}
}
// Get a list of tunnels
tunnels := n.getTunnels()
// IPv6 bridge configuration
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Set the MAC address
if n.config["bridge.hwaddr"] != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", n.config["bridge.hwaddr"])
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan)
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.HasDHCPv4() && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
}
// Attempt a workaround for broken DHCP clients
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return err
}
// Update the dnsmasq config
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.HasDHCPv4() {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", GetIP(subnet, 2).String(), GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to using MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
err = n.applyBootRoutesV4(ctRoutes)
if err != nil {
return err
}
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return err
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.HasDHCPv6() {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", GetIP(subnet, 2), GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
err = n.applyBootRoutesV6(ctRoutes)
if err != nil {
return err
}
}
// Configure the fan
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return nil
}
// Parse the overlay
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return err
}
// Get the address
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available)
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return err
}
// Add the address
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", GetIP(hostSubnet, 2).String(), GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT
if n.config["ipv4.nat"] == "" || shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
// Create DHCP hosts directory
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot)
if dnsClustered {
// Create forkdns servers directory
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clusterNotification bool) error {
n.logger.Debug("Update", log.Ctx{"clusterNotification": clusterNotification, "newNetwork": newNetwork})
// When switching to a fan bridge, auto-detect the underlay if not specified.
if newNetwork.Config["bridge.mode"] == "fan" {
if newNetwork.Config["fan.underlay_subnet"] == "" {
newNetwork.Config["fan.underlay_subnet"] = "auto"
}
}
// Populate auto fields.
err := fillAuto(newNetwork.Config)
if err != nil {
return err
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
revert := revert.New()
defer revert.Fail()
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clusterNotification)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", dev)) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
// Apply changes to database.
err = n.common.update(newNetwork, targetNode, clusterNotification)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) error {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
return err
}
}
return nil
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) error {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
return err
}
}
return nil
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
|
package drivers
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
)
func wipeDirectory(path string) error {
// List all entries
entries, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
}
// Individually wipe all entries
for _, entry := range entries {
entryPath := filepath.Join(path, entry.Name())
err := os.RemoveAll(entryPath)
if err != nil {
return err
}
}
return nil
}
func forceUnmount(path string) (bool, error) {
unmounted := false
for {
// Check if already unmounted
if !shared.IsMountPoint(path) {
return unmounted, nil
}
// Try a clean unmount first
err := unix.Unmount(path, 0)
if err != nil {
// Fallback to lazy unmounting
err = unix.Unmount(path, unix.MNT_DETACH)
if err != nil {
return false, err
}
}
unmounted = true
}
}
func sameMount(srcPath string, dstPath string) bool {
// Get the source vfs path information
var srcFsStat unix.Statfs_t
err := unix.Statfs(srcPath, &srcFsStat)
if err != nil {
return false
}
// Get the destination vfs path information
var dstFsStat unix.Statfs_t
err = unix.Statfs(dstPath, &dstFsStat)
if err != nil {
return false
}
// Compare statfs
if srcFsStat.Type != dstFsStat.Type || srcFsStat.Fsid != dstFsStat.Fsid {
return false
}
// Get the source path information
var srcStat unix.Stat_t
err = unix.Stat(srcPath, &srcStat)
if err != nil {
return false
}
// Get the destination path information
var dstStat unix.Stat_t
err = unix.Stat(dstPath, &dstStat)
if err != nil {
return false
}
// Compare inode
if srcStat.Ino != dstStat.Ino {
return false
}
return true
}
func tryMount(src string, dst string, fs string, flags uintptr, options string) error {
var err error
// Attempt 20 mounts over 10s
for i := 0; i < 20; i++ {
err = unix.Mount(src, dst, fs, flags, options)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return err
}
return nil
}
func vfsResources(path string) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(path)
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// GetPoolMountPoint returns the mountpoint of the given pool.
// {LXD_DIR}/storage-pools/<pool>
func GetPoolMountPoint(poolName string) string {
return shared.VarPath("storage-pools", poolName)
}
// GetVolumeMountPoint returns the mount path for a specific volume based on its pool and type and
// whether it is a snapshot or not.
// For VolumeTypeImage the volName is the image fingerprint.
func GetVolumeMountPoint(poolName string, volType VolumeType, volName string) string {
if shared.IsSnapshot(volName) {
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), project.Prefix("default", volName))
}
return shared.VarPath("storage-pools", poolName, string(volType), project.Prefix("default", volName))
}
// GetVolumeSnapshotDir gets the snapshot mount directory for the parent volume.
func GetVolumeSnapshotDir(poolName string, volType VolumeType, volName string) (string, error) {
if shared.IsSnapshot(volName) {
return "", fmt.Errorf("Volume cannot be a snapshot")
}
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), project.Prefix("default", volName)), nil
}
// GetSnapshotVolumeName returns the full volume name for a parent volume and snapshot name.
func GetSnapshotVolumeName(parentName, snapshotName string) string {
return fmt.Sprintf("%s%s%s", parentName, shared.SnapshotDelimiter, snapshotName)
}
// DeleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
// It accepts the volume name of a snapshot in the form "volume/snap" and the volume path of the
// snapshot. It will then remove the snapshots directory above "/snap" if it is empty.
func DeleteParentSnapshotDirIfEmpty(volName string, volPath string) error {
_, snapName, isSnap := shared.ContainerGetParentAndSnapshotName(volName)
if !isSnap {
return fmt.Errorf("Volume is not a snapshot")
}
// Extract just the snapshot name from the volume name and then remove that suffix
// from the volume path. This will get us the parent snapshots directory we need.
snapshotsPath := strings.TrimSuffix(volPath, snapName)
isEmpty, err := shared.PathIsEmpty(snapshotsPath)
if err != nil {
return err
}
if isEmpty {
err := os.Remove(snapshotsPath)
if err != nil {
return err
}
}
return nil
}
lxd/storage/drivers/utils: Renames GetVolumeMountPath and GetPoolMountPath
Uses "path" suffix to better indicate what is returned. "point" seemed rather vague.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package drivers
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
)
func wipeDirectory(path string) error {
// List all entries
entries, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
}
// Individually wipe all entries
for _, entry := range entries {
entryPath := filepath.Join(path, entry.Name())
err := os.RemoveAll(entryPath)
if err != nil {
return err
}
}
return nil
}
func forceUnmount(path string) (bool, error) {
unmounted := false
for {
// Check if already unmounted
if !shared.IsMountPoint(path) {
return unmounted, nil
}
// Try a clean unmount first
err := unix.Unmount(path, 0)
if err != nil {
// Fallback to lazy unmounting
err = unix.Unmount(path, unix.MNT_DETACH)
if err != nil {
return false, err
}
}
unmounted = true
}
}
func sameMount(srcPath string, dstPath string) bool {
// Get the source vfs path information
var srcFsStat unix.Statfs_t
err := unix.Statfs(srcPath, &srcFsStat)
if err != nil {
return false
}
// Get the destination vfs path information
var dstFsStat unix.Statfs_t
err = unix.Statfs(dstPath, &dstFsStat)
if err != nil {
return false
}
// Compare statfs
if srcFsStat.Type != dstFsStat.Type || srcFsStat.Fsid != dstFsStat.Fsid {
return false
}
// Get the source path information
var srcStat unix.Stat_t
err = unix.Stat(srcPath, &srcStat)
if err != nil {
return false
}
// Get the destination path information
var dstStat unix.Stat_t
err = unix.Stat(dstPath, &dstStat)
if err != nil {
return false
}
// Compare inode
if srcStat.Ino != dstStat.Ino {
return false
}
return true
}
func tryMount(src string, dst string, fs string, flags uintptr, options string) error {
var err error
// Attempt 20 mounts over 10s
for i := 0; i < 20; i++ {
err = unix.Mount(src, dst, fs, flags, options)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return err
}
return nil
}
func vfsResources(path string) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(path)
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// GetPoolMountPath returns the mountpoint of the given pool.
// {LXD_DIR}/storage-pools/<pool>
func GetPoolMountPath(poolName string) string {
return shared.VarPath("storage-pools", poolName)
}
// GetVolumeMountPath returns the mount path for a specific volume based on its pool and type and
// whether it is a snapshot or not.
// For VolumeTypeImage the volName is the image fingerprint.
func GetVolumeMountPath(poolName string, volType VolumeType, volName string) string {
if shared.IsSnapshot(volName) {
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), project.Prefix("default", volName))
}
return shared.VarPath("storage-pools", poolName, string(volType), project.Prefix("default", volName))
}
// GetVolumeSnapshotDir gets the snapshot mount directory for the parent volume.
func GetVolumeSnapshotDir(poolName string, volType VolumeType, volName string) (string, error) {
if shared.IsSnapshot(volName) {
return "", fmt.Errorf("Volume cannot be a snapshot")
}
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), project.Prefix("default", volName)), nil
}
// GetSnapshotVolumeName returns the full volume name for a parent volume and snapshot name.
func GetSnapshotVolumeName(parentName, snapshotName string) string {
return fmt.Sprintf("%s%s%s", parentName, shared.SnapshotDelimiter, snapshotName)
}
// DeleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
// It accepts the volume name of a snapshot in the form "volume/snap" and the volume path of the
// snapshot. It will then remove the snapshots directory above "/snap" if it is empty.
func DeleteParentSnapshotDirIfEmpty(volName string, volPath string) error {
_, snapName, isSnap := shared.ContainerGetParentAndSnapshotName(volName)
if !isSnap {
return fmt.Errorf("Volume is not a snapshot")
}
// Extract just the snapshot name from the volume name and then remove that suffix
// from the volume path. This will get us the parent snapshots directory we need.
snapshotsPath := strings.TrimSuffix(volPath, snapName)
isEmpty, err := shared.PathIsEmpty(snapshotsPath)
if err != nil {
return err
}
if isEmpty {
err := os.Remove(snapshotsPath)
if err != nil {
return err
}
}
return nil
}
|
package ticketmatic
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"reflect"
"strings"
"time"
)
// API server to use.
var Server = "https://apps.ticketmatic.com"
// API version
var Version = "1"
// Library Version
const Build = "1.0.108"
// Rate limit error
type RateLimitError struct {
Status *QueueStatus
}
func (r *RateLimitError) Error() string {
return "Rate Limit Exceeded"
}
// Request error
type RequestError struct {
StatusCode int `json:"code,omitempty"`
Body []byte `json:"-"`
Message string `json:"message,omitempty"`
ApplicationCode int `json:"applicationcode,omitempty"`
ApplicationData interface{} `json:"applicationdata,omitempty"`
}
func (r *RequestError) Error() string {
if r.Message != "" {
return fmt.Sprintf("Failed (%d): %s", r.StatusCode, r.Message)
} else {
return fmt.Sprintf("Failed (%d): %s", r.StatusCode, string(r.Body))
}
}
func init() {
s := os.Getenv("TM_TEST_SERVER")
if s != "" {
Server = s
}
}
// API Client
type Client struct {
AccountCode string
AccessKey string
SecretKey string
Language string
}
// API Request
type Request struct {
client *Client
method string
url string
resultContentType string
params map[string]interface{}
query map[string]interface{}
body interface{}
bodyContentType string
}
func NewClient(accountcode, accesskey, secretkey string) *Client {
client := &Client{
AccountCode: accountcode,
AccessKey: accesskey,
SecretKey: secretkey,
}
return client
}
func (c *Client) NewRequest(method, url, resultContentType string) *Request {
if resultContentType == "" {
resultContentType = "json"
}
return &Request{
client: c,
method: method,
url: url,
resultContentType: resultContentType,
query: make(map[string]interface{}),
}
}
func (r *Request) AddParameter(key string, val interface{}) {
// Try to omit empty parameters by not sending them when they're set to
// their default values.
v := reflect.ValueOf(val)
if v.Interface() != reflect.Zero(v.Type()).Interface() {
r.query[key] = val
}
}
func (r *Request) UrlParameters(params map[string]interface{}) {
r.params = params
}
func (r *Request) Body(body interface{}, bodyContentType string) {
r.body = body
r.bodyContentType = bodyContentType
}
func (r *Request) Run(obj interface{}) error {
resp, err := r.prepareRequest()
if err != nil {
return err
}
defer resp.Body.Close()
if obj != nil {
if r.resultContentType == "json" {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(data, obj)
if err != nil {
return fmt.Errorf("Deserialization failed: %s in %s", err, string(data))
}
} else {
buff, ok := obj.(*bytes.Buffer)
if !ok {
return errors.New("Given obj is not *bytes.Buffer")
}
_, err := buff.ReadFrom(resp.Body)
return err
}
}
return nil
}
func (r *Request) Stream() (*Stream, error) {
resp, err := r.prepareRequest()
if err != nil {
return nil, err
}
return NewStream(resp), nil
}
func (r *Request) prepareRequest() (*http.Response, error) {
var body io.Reader
if r.body != nil {
if r.bodyContentType == "json" {
d, err := json.Marshal(r.body)
if err != nil {
return nil, err
}
body = bytes.NewReader(d)
} else if r.bodyContentType == "svg" {
sBody, ok := r.body.(string)
if !ok {
return nil, errors.New("Supplied body is not a string, which is needed for body content type svg")
}
body = strings.NewReader(sBody)
}
}
u, err := r.prepareUrl()
if err != nil {
return nil, err
}
req, err := http.NewRequest(r.method, u, body)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", r.authHeader())
if r.bodyContentType == "json" {
req.Header.Add("Content-Type", "application/json")
} else if r.bodyContentType == "svg" {
req.Header.Add("Content-Type", "image/svg+xml")
}
req.Header.Add("User-Agent", fmt.Sprintf("ticketmatic/go (%s)", Build))
if r.client.Language != "" {
req.Header.Add("Accept-Language", r.client.Language)
}
req.Close = true
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp, nil
case 429:
status := &QueueStatus{}
err = json.NewDecoder(resp.Body).Decode(status)
defer resp.Body.Close()
if err != nil {
return nil, err
}
return nil, &RateLimitError{
Status: status,
}
default:
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
// Try to unmarshal the error, pass it back
r := &RequestError{}
err := json.Unmarshal(body, r)
if err == nil && r.StatusCode > 0 && r.Message != "" {
return nil, r
}
return nil, &RequestError{
StatusCode: resp.StatusCode,
Body: body,
}
}
}
func (r *Request) authHeader() string {
ts := time.Now().UTC().Format("2006-01-02T15:04:05")
hash := Sign(r.client.AccessKey, r.client.SecretKey, r.client.AccountCode, ts)
return fmt.Sprintf("TM-HMAC-SHA256 key=%s ts=%s sign=%s", r.client.AccessKey, ts, hash)
}
func (r *Request) prepareUrl() (string, error) {
u := r.url
for k, v := range r.params {
u = strings.Replace(u, fmt.Sprintf("{%s}", k), fmt.Sprintf("%v", v), 1)
}
u = strings.Replace(u, "{accountname}", r.client.AccountCode, 1)
result := fmt.Sprintf("%s/api/%s%s", Server, Version, u)
if len(r.query) > 0 {
query := url.Values{}
for k, v := range r.query {
kind := reflect.ValueOf(v).Kind()
if kind == reflect.Interface || kind == reflect.Map || kind == reflect.Ptr || kind == reflect.Slice || kind == reflect.Struct {
d, err := json.Marshal(v)
if err != nil {
return "", err
}
query.Add(k, fmt.Sprintf("%s", string(d)))
} else {
query.Add(k, fmt.Sprintf("%v", v))
}
}
result = fmt.Sprintf("%s?%s", result, query.Encode())
}
return result, nil
}
// Generates a signed authentication hash
func Sign(accesskey, secretkey, accountcode, ts string) string {
mac := hmac.New(sha256.New, []byte(secretkey))
mac.Write([]byte(fmt.Sprintf("%s%s%s", accesskey, accountcode, ts)))
return fmt.Sprintf("%x", mac.Sum(nil))
}
TMC-15690 Edit properties api example
package ticketmatic
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"reflect"
"strings"
"time"
)
// API server to use.
var Server = "https://apps.ticketmatic.com"
// API version
var Version = "1"
// Library Version
const Build = "1.0.109"
// Rate limit error
type RateLimitError struct {
Status *QueueStatus
}
func (r *RateLimitError) Error() string {
return "Rate Limit Exceeded"
}
// Request error
type RequestError struct {
StatusCode int `json:"code,omitempty"`
Body []byte `json:"-"`
Message string `json:"message,omitempty"`
ApplicationCode int `json:"applicationcode,omitempty"`
ApplicationData interface{} `json:"applicationdata,omitempty"`
}
func (r *RequestError) Error() string {
if r.Message != "" {
return fmt.Sprintf("Failed (%d): %s", r.StatusCode, r.Message)
} else {
return fmt.Sprintf("Failed (%d): %s", r.StatusCode, string(r.Body))
}
}
func init() {
s := os.Getenv("TM_TEST_SERVER")
if s != "" {
Server = s
}
}
// API Client
type Client struct {
AccountCode string
AccessKey string
SecretKey string
Language string
}
// API Request
type Request struct {
client *Client
method string
url string
resultContentType string
params map[string]interface{}
query map[string]interface{}
body interface{}
bodyContentType string
}
func NewClient(accountcode, accesskey, secretkey string) *Client {
client := &Client{
AccountCode: accountcode,
AccessKey: accesskey,
SecretKey: secretkey,
}
return client
}
func (c *Client) NewRequest(method, url, resultContentType string) *Request {
if resultContentType == "" {
resultContentType = "json"
}
return &Request{
client: c,
method: method,
url: url,
resultContentType: resultContentType,
query: make(map[string]interface{}),
}
}
func (r *Request) AddParameter(key string, val interface{}) {
// Try to omit empty parameters by not sending them when they're set to
// their default values.
v := reflect.ValueOf(val)
if v.Interface() != reflect.Zero(v.Type()).Interface() {
r.query[key] = val
}
}
func (r *Request) UrlParameters(params map[string]interface{}) {
r.params = params
}
func (r *Request) Body(body interface{}, bodyContentType string) {
r.body = body
r.bodyContentType = bodyContentType
}
func (r *Request) Run(obj interface{}) error {
resp, err := r.prepareRequest()
if err != nil {
return err
}
defer resp.Body.Close()
if obj != nil {
if r.resultContentType == "json" {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(data, obj)
if err != nil {
return fmt.Errorf("Deserialization failed: %s in %s", err, string(data))
}
} else {
buff, ok := obj.(*bytes.Buffer)
if !ok {
return errors.New("Given obj is not *bytes.Buffer")
}
_, err := buff.ReadFrom(resp.Body)
return err
}
}
return nil
}
func (r *Request) Stream() (*Stream, error) {
resp, err := r.prepareRequest()
if err != nil {
return nil, err
}
return NewStream(resp), nil
}
func (r *Request) prepareRequest() (*http.Response, error) {
var body io.Reader
if r.body != nil {
if r.bodyContentType == "json" {
d, err := json.Marshal(r.body)
if err != nil {
return nil, err
}
body = bytes.NewReader(d)
} else if r.bodyContentType == "svg" {
sBody, ok := r.body.(string)
if !ok {
return nil, errors.New("Supplied body is not a string, which is needed for body content type svg")
}
body = strings.NewReader(sBody)
}
}
u, err := r.prepareUrl()
if err != nil {
return nil, err
}
req, err := http.NewRequest(r.method, u, body)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", r.authHeader())
if r.bodyContentType == "json" {
req.Header.Add("Content-Type", "application/json")
} else if r.bodyContentType == "svg" {
req.Header.Add("Content-Type", "image/svg+xml")
}
req.Header.Add("User-Agent", fmt.Sprintf("ticketmatic/go (%s)", Build))
if r.client.Language != "" {
req.Header.Add("Accept-Language", r.client.Language)
}
req.Close = true
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp, nil
case 429:
status := &QueueStatus{}
err = json.NewDecoder(resp.Body).Decode(status)
defer resp.Body.Close()
if err != nil {
return nil, err
}
return nil, &RateLimitError{
Status: status,
}
default:
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
// Try to unmarshal the error, pass it back
r := &RequestError{}
err := json.Unmarshal(body, r)
if err == nil && r.StatusCode > 0 && r.Message != "" {
return nil, r
}
return nil, &RequestError{
StatusCode: resp.StatusCode,
Body: body,
}
}
}
func (r *Request) authHeader() string {
ts := time.Now().UTC().Format("2006-01-02T15:04:05")
hash := Sign(r.client.AccessKey, r.client.SecretKey, r.client.AccountCode, ts)
return fmt.Sprintf("TM-HMAC-SHA256 key=%s ts=%s sign=%s", r.client.AccessKey, ts, hash)
}
func (r *Request) prepareUrl() (string, error) {
u := r.url
for k, v := range r.params {
u = strings.Replace(u, fmt.Sprintf("{%s}", k), fmt.Sprintf("%v", v), 1)
}
u = strings.Replace(u, "{accountname}", r.client.AccountCode, 1)
result := fmt.Sprintf("%s/api/%s%s", Server, Version, u)
if len(r.query) > 0 {
query := url.Values{}
for k, v := range r.query {
kind := reflect.ValueOf(v).Kind()
if kind == reflect.Interface || kind == reflect.Map || kind == reflect.Ptr || kind == reflect.Slice || kind == reflect.Struct {
d, err := json.Marshal(v)
if err != nil {
return "", err
}
query.Add(k, fmt.Sprintf("%s", string(d)))
} else {
query.Add(k, fmt.Sprintf("%v", v))
}
}
result = fmt.Sprintf("%s?%s", result, query.Encode())
}
return result, nil
}
// Generates a signed authentication hash
func Sign(accesskey, secretkey, accountcode, ts string) string {
mac := hmac.New(sha256.New, []byte(secretkey))
mac.Write([]byte(fmt.Sprintf("%s%s%s", accesskey, accountcode, ts)))
return fmt.Sprintf("%x", mac.Sum(nil))
}
|
package handlers
import (
"fmt"
"github.com/alphagov/router/logger"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"syscall"
"time"
)
func NewBackendHandler(backendUrl *url.URL, connectTimeout, headerTimeout time.Duration, logger logger.Logger) http.Handler {
proxy := httputil.NewSingleHostReverseProxy(backendUrl)
proxy.Transport = newBackendTransport(connectTimeout, headerTimeout, logger)
defaultDirector := proxy.Director
proxy.Director = func(req *http.Request) {
defaultDirector(req)
// Set the Host header to match the backend hostname instead of the one from the incoming request.
req.Host = backendUrl.Host
// Setting a blank User-Agent causes the http lib not to output one, whereas if there
// is no header, it will output a default one.
// See: http://code.google.com/p/go/source/browse/src/pkg/net/http/request.go?name=go1.1.2#349
if _, present := req.Header["User-Agent"]; !present {
req.Header.Set("User-Agent", "")
}
populateViaHeader(req.Header, fmt.Sprintf("%d.%d", req.ProtoMajor, req.ProtoMinor))
}
return proxy
}
func populateViaHeader(header http.Header, httpVersion string) {
via := httpVersion + " router"
if prior, ok := header["Via"]; ok {
via = strings.Join(prior, ", ") + ", " + via
}
header.Set("Via", via)
}
type backendTransport struct {
wrapped *http.Transport
logger logger.Logger
}
// Construct a backendTransport that wraps an http.Transport and implements http.RoundTripper.
// This allows us to intercept the response from the backend and modify it before it's copied
// back to the client.
func newBackendTransport(connectTimeout, headerTimeout time.Duration, logger logger.Logger) (transport *backendTransport) {
transport = &backendTransport{&http.Transport{}, logger}
transport.wrapped.Dial = func(network, address string) (net.Conn, error) {
return net.DialTimeout(network, address, connectTimeout)
}
// Allow the proxy to keep more than the default (2) keepalive connections
// per upstream.
transport.wrapped.MaxIdleConnsPerHost = 20
transport.wrapped.ResponseHeaderTimeout = headerTimeout
return
}
func (bt *backendTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
resp, err = bt.wrapped.RoundTrip(req)
if err == nil {
populateViaHeader(resp.Header, fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor))
} else {
// Log the error (deferred to allow special case error handling to add/change details)
logDetails := map[string]interface{}{"error": err.Error(), "status": 500}
defer bt.logger.LogFromBackendRequest(logDetails, req)
// Intercept timeout errors and generate an HTTP error response
switch typedError := err.(type) {
case *net.OpError:
if typedError.Timeout() {
logDetails["status"] = 504
return newErrorResponse(504), nil
} else if typedError.Err == syscall.ECONNREFUSED {
logDetails["status"] = 502
return newErrorResponse(502), nil
}
default:
if err.Error() == "net/http: timeout awaiting response headers" {
logDetails["status"] = 504
return newErrorResponse(504), nil
}
}
}
return
}
func newErrorResponse(status int) (resp *http.Response) {
resp = &http.Response{StatusCode: status}
resp.Body = ioutil.NopCloser(strings.NewReader(""))
return
}
Tidy up backend error handling.
This handles all errors here, which will aviod duplicating generic
errors between the error json log and the STDERR output.
package handlers
import (
"fmt"
"github.com/alphagov/router/logger"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"syscall"
"time"
)
func NewBackendHandler(backendUrl *url.URL, connectTimeout, headerTimeout time.Duration, logger logger.Logger) http.Handler {
proxy := httputil.NewSingleHostReverseProxy(backendUrl)
proxy.Transport = newBackendTransport(connectTimeout, headerTimeout, logger)
defaultDirector := proxy.Director
proxy.Director = func(req *http.Request) {
defaultDirector(req)
// Set the Host header to match the backend hostname instead of the one from the incoming request.
req.Host = backendUrl.Host
// Setting a blank User-Agent causes the http lib not to output one, whereas if there
// is no header, it will output a default one.
// See: http://code.google.com/p/go/source/browse/src/pkg/net/http/request.go?name=go1.1.2#349
if _, present := req.Header["User-Agent"]; !present {
req.Header.Set("User-Agent", "")
}
populateViaHeader(req.Header, fmt.Sprintf("%d.%d", req.ProtoMajor, req.ProtoMinor))
}
return proxy
}
func populateViaHeader(header http.Header, httpVersion string) {
via := httpVersion + " router"
if prior, ok := header["Via"]; ok {
via = strings.Join(prior, ", ") + ", " + via
}
header.Set("Via", via)
}
type backendTransport struct {
wrapped *http.Transport
logger logger.Logger
}
// Construct a backendTransport that wraps an http.Transport and implements http.RoundTripper.
// This allows us to intercept the response from the backend and modify it before it's copied
// back to the client.
func newBackendTransport(connectTimeout, headerTimeout time.Duration, logger logger.Logger) (transport *backendTransport) {
transport = &backendTransport{&http.Transport{}, logger}
transport.wrapped.Dial = func(network, address string) (net.Conn, error) {
return net.DialTimeout(network, address, connectTimeout)
}
// Allow the proxy to keep more than the default (2) keepalive connections
// per upstream.
transport.wrapped.MaxIdleConnsPerHost = 20
transport.wrapped.ResponseHeaderTimeout = headerTimeout
return
}
func (bt *backendTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
resp, err = bt.wrapped.RoundTrip(req)
if err == nil {
populateViaHeader(resp.Header, fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor))
} else {
// Log the error (deferred to allow special case error handling to add/change details)
logDetails := map[string]interface{}{"error": err.Error(), "status": 500}
defer bt.logger.LogFromBackendRequest(logDetails, req)
// Intercept some specific errors and generate an appropriate HTTP error response
if opErr, ok := err.(*net.OpError); ok {
if opErr.Timeout() {
logDetails["status"] = 504
return newErrorResponse(504), nil
} else if opErr.Err == syscall.ECONNREFUSED {
logDetails["status"] = 502
return newErrorResponse(502), nil
}
}
if err.Error() == "net/http: timeout awaiting response headers" {
logDetails["status"] = 504
return newErrorResponse(504), nil
}
// 500 for all other errors
return newErrorResponse(500), nil
}
return
}
func newErrorResponse(status int) (resp *http.Response) {
resp = &http.Response{StatusCode: status}
resp.Body = ioutil.NopCloser(strings.NewReader(""))
return
}
|
// +build linux,cgo,!agent
package db
import (
"database/sql"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/osarch"
)
// Code generation directives.
//
//go:generate -command mapper lxd-generate db mapper -t images.mapper.go
//go:generate mapper reset
//
//go:generate mapper stmt -p db -e image objects
//go:generate mapper stmt -p db -e image objects-by-Project
//go:generate mapper stmt -p db -e image objects-by-Project-and-Public
//go:generate mapper stmt -p db -e image objects-by-Project-and-Fingerprint
//go:generate mapper stmt -p db -e image objects-by-Fingerprint
//go:generate mapper stmt -p db -e image objects-by-Cached
//
//go:generate mapper method -p db -e image List
//go:generate mapper method -p db -e image Get
// Image is a value object holding db-related details about an image.
type Image struct {
ID int
Project string `db:"primary=yes&join=projects.name"`
Fingerprint string `db:"primary=yes&comparison=like"`
Type int
Filename string
Size int64
Public bool
Architecture int
CreationDate time.Time
ExpiryDate time.Time
UploadDate time.Time
Cached bool
LastUseDate time.Time
AutoUpdate int
}
// ImageFilter can be used to filter results yielded by GetImages.
type ImageFilter struct {
Project string
Fingerprint string // Matched with LIKE
Public bool
Cached bool
}
// ImageSourceProtocol maps image source protocol codes to human-readable names.
var ImageSourceProtocol = map[int]string{
0: "lxd",
1: "direct",
2: "simplestreams",
}
// GetLocalImagesFingerprints returns the fingerprints of all local images.
func (c *ClusterTx) GetLocalImagesFingerprints() ([]string, error) {
q := `
SELECT images.fingerprint
FROM images_nodes
JOIN images ON images.id = images_nodes.image_id
WHERE node_id = ?
`
return query.SelectStrings(c.tx, q, c.nodeID)
}
// GetImagesFingerprints returns the names of all images (optionally only the public ones).
func (c *Cluster) GetImagesFingerprints(project string, public bool) ([]string, error) {
q := `
SELECT fingerprint
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?
`
if public == true {
q += " AND public=1"
}
var fingerprints []string
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
fingerprints, err = query.SelectStrings(tx.tx, q, project)
return err
})
if err != nil {
return nil, err
}
return fingerprints, nil
}
// ExpiredImage used to store expired image info.
type ExpiredImage struct {
Fingerprint string
ProjectName string
}
// GetExpiredImages returns the names and project name of all images that have expired since the given time.
func (c *Cluster) GetExpiredImages(expiry int64) ([]ExpiredImage, error) {
q := `
SELECT
fingerprint,
last_use_date,
upload_date,
projects.name as projectName
FROM images
JOIN projects ON projects.id = images.project_id
WHERE images.cached = 1`
var fpStr string
var useStr string
var uploadStr string
var projectName string
inargs := []interface{}{}
outfmt := []interface{}{fpStr, useStr, uploadStr, projectName}
dbResults, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return []ExpiredImage{}, err
}
results := []ExpiredImage{}
for _, r := range dbResults {
// Figure out the expiry
timestamp := r[2]
if r[1] != "" {
timestamp = r[1]
}
var imageExpiry time.Time
err = imageExpiry.UnmarshalText([]byte(timestamp.(string)))
if err != nil {
return []ExpiredImage{}, err
}
imageExpiry = imageExpiry.Add(time.Duration(expiry*24) * time.Hour)
// Check if expired
if imageExpiry.After(time.Now()) {
continue
}
result := ExpiredImage{
Fingerprint: r[0].(string),
ProjectName: r[3].(string),
}
results = append(results, result)
}
return results, nil
}
// CreateImageSource inserts a new image source.
func (c *Cluster) CreateImageSource(id int, server string, protocol string, certificate string, alias string) error {
stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)`
protocolInt := -1
for protoInt, protoString := range ImageSourceProtocol {
if protoString == protocol {
protocolInt = protoInt
}
}
if protocolInt == -1 {
return fmt.Errorf("Invalid protocol: %s", protocol)
}
err := exec(c, stmt, id, server, protocolInt, certificate, alias)
return err
}
// GetImageSource returns the image source with the given ID.
func (c *Cluster) GetImageSource(imageID int) (int, api.ImageSource, error) {
q := `SELECT id, server, protocol, certificate, alias FROM images_source WHERE image_id=?`
id := 0
protocolInt := -1
result := api.ImageSource{}
arg1 := []interface{}{imageID}
arg2 := []interface{}{&id, &result.Server, &protocolInt, &result.Certificate, &result.Alias}
err := dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return -1, api.ImageSource{}, ErrNoSuchObject
}
return -1, api.ImageSource{}, err
}
protocol, found := ImageSourceProtocol[protocolInt]
if !found {
return -1, api.ImageSource{}, fmt.Errorf("Invalid protocol: %d", protocolInt)
}
result.Protocol = protocol
return id, result, nil
}
// ImageSourceGetCachedFingerprint tries to find a source entry of a locally
// cached image that matches the given remote details (server, protocol and
// alias). Return the fingerprint linked to the matching entry, if any.
func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string, typeName string, architecture int) (string, error) {
imageType := instancetype.Any
if typeName != "" {
var err error
imageType, err = instancetype.New(typeName)
if err != nil {
return "", err
}
}
protocolInt := -1
for protoInt, protoString := range ImageSourceProtocol {
if protoString == protocol {
protocolInt = protoInt
}
}
if protocolInt == -1 {
return "", fmt.Errorf("Invalid protocol: %s", protocol)
}
q := `SELECT images.fingerprint
FROM images_source
INNER JOIN images
ON images_source.image_id=images.id
WHERE server=? AND protocol=? AND alias=? AND auto_update=1 AND images.architecture=?
`
arg1 := []interface{}{server, protocolInt, alias, architecture}
if imageType != instancetype.Any {
q += "AND images.type=?\n"
arg1 = []interface{}{server, protocolInt, alias, architecture, imageType}
}
q += "ORDER BY creation_date DESC"
fingerprint := ""
arg2 := []interface{}{&fingerprint}
err := dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return "", ErrNoSuchObject
}
return "", err
}
return fingerprint, nil
}
// ImageExists returns whether an image with the given fingerprint exists.
func (c *Cluster) ImageExists(project string, fingerprint string) (bool, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return false, err
}
var exists bool
query := `
SELECT COUNT(*) > 0
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ? AND fingerprint=?
`
inargs := []interface{}{project, fingerprint}
outargs := []interface{}{&exists}
err = dbQueryRowScan(c, query, inargs, outargs)
if err == sql.ErrNoRows {
return exists, ErrNoSuchObject
}
return exists, err
}
// ImageIsReferencedByOtherProjects returns true if the image with the given
// fingerprint is referenced by projects other than the given one.
func (c *Cluster) ImageIsReferencedByOtherProjects(project string, fingerprint string) (bool, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return false, err
}
var referenced bool
query := `
SELECT COUNT(*) > 0
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name != ? AND fingerprint=?
`
inargs := []interface{}{project, fingerprint}
outargs := []interface{}{&referenced}
err = dbQueryRowScan(c, query, inargs, outargs)
if err == sql.ErrNoRows {
return referenced, nil
}
return referenced, err
}
// GetImage gets an Image object from the database.
// If strictMatching is false, The fingerprint argument will be queried with a LIKE query, means you can
// pass a shortform and will get the full fingerprint.
// There can never be more than one image with a given fingerprint, as it is
// enforced by a UNIQUE constraint in the schema.
func (c *Cluster) GetImage(project, fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
profileProject := project
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return -1, nil, err
}
var create, expire, used, upload *time.Time // These hold the db-returned times
// The object we'll actually return
image := api.Image{}
id := -1
arch := -1
imageType := -1
// These two humongous things will be filled by the call to DbQueryRowScan
outfmt := []interface{}{&id, &image.Fingerprint, &image.Filename,
&image.Size, &image.Cached, &image.Public, &image.AutoUpdate, &arch,
&create, &expire, &used, &upload, &imageType}
inargs := []interface{}{project}
query := `
SELECT
images.id, fingerprint, filename, size, cached, public, auto_update, architecture,
creation_date, expiry_date, last_use_date, upload_date, type
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?`
if strictMatching {
inargs = append(inargs, fingerprint)
query += " AND fingerprint = ?"
} else {
inargs = append(inargs, fingerprint+"%")
query += " AND fingerprint LIKE ?"
}
if public {
query += " AND public=1"
}
err = dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
if err == sql.ErrNoRows {
return -1, nil, ErrNoSuchObject
}
return -1, nil, err // Likely: there are no rows for this fingerprint
}
// Validate we only have a single match
if !strictMatching {
query = `
SELECT COUNT(images.id)
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?
AND fingerprint LIKE ?
`
count := 0
outfmt := []interface{}{&count}
err = dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
return -1, nil, err
}
if count > 1 {
return -1, nil, fmt.Errorf("Partial fingerprint matches more than one image")
}
}
err = c.imageFill(id, &image, create, expire, used, upload, arch, imageType)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image details")
}
err = c.imageFillProfiles(id, &image, profileProject)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image profiles")
}
return id, &image, nil
}
// GetImageFromAnyProject returns an image matching the given fingerprint, if
// it exists in any project.
func (c *Cluster) GetImageFromAnyProject(fingerprint string) (int, *api.Image, error) {
var create, expire, used, upload *time.Time // These hold the db-returned times
// The object we'll actually return
image := api.Image{}
id := -1
arch := -1
imageType := -1
// These two humongous things will be filled by the call to DbQueryRowScan
outfmt := []interface{}{&id, &image.Fingerprint, &image.Filename,
&image.Size, &image.Cached, &image.Public, &image.AutoUpdate, &arch,
&create, &expire, &used, &upload, &imageType}
inargs := []interface{}{fingerprint}
query := `
SELECT
images.id, fingerprint, filename, size, cached, public, auto_update, architecture,
creation_date, expiry_date, last_use_date, upload_date, type
FROM images
WHERE fingerprint = ?
LIMIT 1`
err := dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
if err == sql.ErrNoRows {
return -1, nil, ErrNoSuchObject
}
return -1, nil, err // Likely: there are no rows for this fingerprint
}
err = c.imageFill(id, &image, create, expire, used, upload, arch, imageType)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image details")
}
return id, &image, nil
}
// Fill extra image fields such as properties and alias. This is called after
// fetching a single row from the images table.
func (c *Cluster) imageFill(id int, image *api.Image, create, expire, used, upload *time.Time, arch int, imageType int) error {
// Some of the dates can be nil in the DB, let's process them.
if create != nil {
image.CreatedAt = *create
} else {
image.CreatedAt = time.Time{}
}
if expire != nil {
image.ExpiresAt = *expire
} else {
image.ExpiresAt = time.Time{}
}
if used != nil {
image.LastUsedAt = *used
} else {
image.LastUsedAt = time.Time{}
}
image.Architecture, _ = osarch.ArchitectureName(arch)
image.Type = instancetype.Type(imageType).String()
// The upload date is enforced by NOT NULL in the schema, so it can never be nil.
image.UploadedAt = *upload
// Get the properties
q := "SELECT key, value FROM images_properties where image_id=?"
var key, value, name, desc string
inargs := []interface{}{id}
outfmt := []interface{}{key, value}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
properties := map[string]string{}
for _, r := range results {
key = r[0].(string)
value = r[1].(string)
properties[key] = value
}
image.Properties = properties
// Get the aliases
q = "SELECT name, description FROM images_aliases WHERE image_id=?"
inargs = []interface{}{id}
outfmt = []interface{}{name, desc}
results, err = queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
aliases := []api.ImageAlias{}
for _, r := range results {
name = r[0].(string)
desc = r[1].(string)
a := api.ImageAlias{Name: name, Description: desc}
aliases = append(aliases, a)
}
image.Aliases = aliases
_, source, err := c.GetImageSource(id)
if err == nil {
image.UpdateSource = &source
}
return nil
}
func (c *Cluster) imageFillProfiles(id int, image *api.Image, project string) error {
// Check which project name to use
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
// Get the profiles
q := `
SELECT profiles.name FROM profiles
JOIN images_profiles ON images_profiles.profile_id = profiles.id
JOIN projects ON profiles.project_id = projects.id
WHERE images_profiles.image_id = ? AND projects.name = ?
`
var name string
inargs := []interface{}{id, project}
outfmt := []interface{}{name}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
profiles := make([]string, 0)
for _, r := range results {
name = r[0].(string)
profiles = append(profiles, name)
}
image.Profiles = profiles
return nil
}
// LocateImage returns the address of an online node that has a local copy of
// the given image, or an empty string if the image is already available on this
// node.
//
// If the image is not available on any online node, an error is returned.
func (c *Cluster) LocateImage(fingerprint string) (string, error) {
stmt := `
SELECT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?
`
var localAddress string // Address of this node
var addresses []string // Addresses of online nodes with the image
err := c.Transaction(func(tx *ClusterTx) error {
offlineThreshold, err := tx.GetNodeOfflineThreshold()
if err != nil {
return err
}
localAddress, err = tx.GetLocalNodeAddress()
if err != nil {
return err
}
allAddresses, err := query.SelectStrings(tx.tx, stmt, fingerprint)
if err != nil {
return err
}
for _, address := range allAddresses {
node, err := tx.GetNodeByAddress(address)
if err != nil {
return err
}
if address != localAddress && node.IsOffline(offlineThreshold) {
continue
}
addresses = append(addresses, address)
}
return err
})
if err != nil {
return "", err
}
if len(addresses) == 0 {
return "", fmt.Errorf("image not available on any online node")
}
for _, address := range addresses {
if address == localAddress {
return "", nil
}
}
return addresses[0], nil
}
// AddImageToLocalNode creates a new entry in the images_nodes table for
// tracking that the local node has the given image.
func (c *Cluster) AddImageToLocalNode(project, fingerprint string) error {
imageID, _, err := c.GetImage(project, fingerprint, false, true)
if err != nil {
return err
}
err = c.Transaction(func(tx *ClusterTx) error {
_, err := tx.tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", imageID, c.nodeID)
return err
})
return err
}
// DeleteImage deletes the image with the given ID.
func (c *Cluster) DeleteImage(id int) error {
err := exec(c, "DELETE FROM images WHERE id=?", id)
if err != nil {
return err
}
return nil
}
// GetImageAliases returns the names of the aliases of all images.
func (c *Cluster) GetImageAliases(project string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := `
SELECT images_aliases.name
FROM images_aliases
JOIN projects ON projects.id=images_aliases.project_id
WHERE projects.name=?
`
var name string
inargs := []interface{}{project}
outfmt := []interface{}{name}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return nil, err
}
names := []string{}
for _, res := range results {
names = append(names, res[0].(string))
}
return names, nil
}
// GetImageAlias returns the alias with the given name in the given project.
func (c *Cluster) GetImageAlias(project, name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
id := -1
entry := api.ImageAliasesEntry{}
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return id, entry, err
}
q := `SELECT images_aliases.id, images.fingerprint, images.type, images_aliases.description
FROM images_aliases
INNER JOIN images
ON images_aliases.image_id=images.id
INNER JOIN projects
ON images_aliases.project_id=projects.id
WHERE projects.name=? AND images_aliases.name=?`
if !isTrustedClient {
q = q + ` AND images.public=1`
}
var fingerprint, description string
var imageType int
arg1 := []interface{}{project, name}
arg2 := []interface{}{&id, &fingerprint, &imageType, &description}
err = dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return -1, entry, ErrNoSuchObject
}
return -1, entry, err
}
entry.Name = name
entry.Target = fingerprint
entry.Description = description
entry.Type = instancetype.Type(imageType).String()
return id, entry, nil
}
// RenameImageAlias renames the alias with the given ID.
func (c *Cluster) RenameImageAlias(id int, name string) error {
err := exec(c, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
return err
}
// DeleteImageAlias deletes the alias with the given name.
func (c *Cluster) DeleteImageAlias(project, name string) error {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
err = exec(c, `
DELETE
FROM images_aliases
WHERE project_id = (SELECT id FROM projects WHERE name = ?) AND name = ?
`, project, name)
return err
}
// MoveImageAlias changes the image ID associated with an alias.
func (c *Cluster) MoveImageAlias(source int, destination int) error {
err := exec(c, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
return err
}
// CreateImageAlias inserts an alias ento the database.
func (c *Cluster) CreateImageAlias(project, name string, imageID int, desc string) error {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
stmt := `
INSERT INTO images_aliases (name, image_id, description, project_id)
VALUES (?, ?, ?, (SELECT id FROM projects WHERE name = ?))
`
err = exec(c, stmt, name, imageID, desc, project)
return err
}
// UpdateImageAlias updates the alias with the given ID.
func (c *Cluster) UpdateImageAlias(id int, imageID int, desc string) error {
stmt := `UPDATE images_aliases SET image_id=?, description=? WHERE id=?`
err := exec(c, stmt, imageID, desc, id)
return err
}
// CopyDefaultImageProfiles copies default profiles from id to new_id.
func (c *Cluster) CopyDefaultImageProfiles(id int, newID int) error {
err := c.Transaction(func(tx *ClusterTx) error {
// Delete all current associations.
_, err := tx.tx.Exec("DELETE FROM images_profiles WHERE image_id=?", newID)
if err != nil {
return err
}
// Copy the entries over.
_, err = tx.tx.Exec("INSERT INTO images_profiles (image_id, profile_id) SELECT ?, profile_id FROM images_profiles WHERE image_id=?", newID, id)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// UpdateImageLastUseDate updates the last_use_date field of the image with the
// given fingerprint.
func (c *Cluster) UpdateImageLastUseDate(fingerprint string, date time.Time) error {
stmt := `UPDATE images SET last_use_date=? WHERE fingerprint=?`
err := exec(c, stmt, date, fingerprint)
return err
}
// InitImageLastUseDate inits the last_use_date field of the image with the given fingerprint.
func (c *Cluster) InitImageLastUseDate(fingerprint string) error {
stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?`
err := exec(c, stmt, fingerprint)
return err
}
// UpdateImage updates the image with the given ID.
func (c *Cluster) UpdateImage(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string, project string, profileIds []int64) error {
arch, err := osarch.ArchitectureId(architecture)
if err != nil {
arch = 0
}
err = c.Transaction(func(tx *ClusterTx) error {
publicInt := 0
if public {
publicInt = 1
}
autoUpdateInt := 0
if autoUpdate {
autoUpdateInt = 1
}
stmt, err := tx.tx.Prepare(`UPDATE images SET filename=?, size=?, public=?, auto_update=?, architecture=?, creation_date=?, expiry_date=? WHERE id=?`)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(fname, sz, publicInt, autoUpdateInt, arch, createdAt, expiresAt, id)
if err != nil {
return err
}
_, err = tx.tx.Exec(`DELETE FROM images_properties WHERE image_id=?`, id)
if err != nil {
return err
}
stmt2, err := tx.tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
if err != nil {
return err
}
defer stmt2.Close()
for key, value := range properties {
_, err = stmt2.Exec(id, 0, key, value)
if err != nil {
return err
}
}
if project != "" && profileIds != nil {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return err
}
if !enabled {
project = "default"
}
q := `DELETE FROM images_profiles
WHERE image_id = ? AND profile_id IN (
SELECT profiles.id FROM profiles
JOIN projects ON profiles.project_id = projects.id
WHERE projects.name = ?
)`
_, err = tx.tx.Exec(q, id, project)
if err != nil {
return err
}
stmt3, err := tx.tx.Prepare(`INSERT INTO images_profiles (image_id, profile_id) VALUES (?, ?)`)
if err != nil {
return err
}
defer stmt3.Close()
for _, profileID := range profileIds {
_, err = stmt3.Exec(id, profileID)
if err != nil {
return err
}
}
}
return nil
})
return err
}
// CreateImage creates a new image.
func (c *Cluster) CreateImage(project, fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string, typeName string) error {
profileProject := project
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
arch, err := osarch.ArchitectureId(architecture)
if err != nil {
arch = 0
}
imageType := instancetype.Any
if typeName != "" {
var err error
imageType, err = instancetype.New(typeName)
if err != nil {
return err
}
}
if imageType == -1 {
return fmt.Errorf("Invalid image type: %v", typeName)
}
defaultProfileID, _, err := c.GetProfile(profileProject, "default")
if err != nil {
return err
}
err = c.Transaction(func(tx *ClusterTx) error {
publicInt := 0
if public {
publicInt = 1
}
autoUpdateInt := 0
if autoUpdate {
autoUpdateInt = 1
}
stmt, err := tx.tx.Prepare(`INSERT INTO images (project_id, fingerprint, filename, size, public, auto_update, architecture, creation_date, expiry_date, upload_date, type) VALUES ((SELECT id FROM projects WHERE name = ?), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil {
return err
}
defer stmt.Close()
result, err := stmt.Exec(project, fp, fname, sz, publicInt, autoUpdateInt, arch, createdAt, expiresAt, time.Now().UTC(), imageType)
if err != nil {
return err
}
id64, err := result.LastInsertId()
if err != nil {
return err
}
id := int(id64)
if len(properties) > 0 {
pstmt, err := tx.tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, 0, ?, ?)`)
if err != nil {
return err
}
defer pstmt.Close()
for k, v := range properties {
// we can assume, that there is just one
// value per key
_, err = pstmt.Exec(id, k, v)
if err != nil {
return err
}
}
}
_, err = tx.tx.Exec("INSERT INTO images_profiles(image_id, profile_id) VALUES(?, ?)", id, defaultProfileID)
if err != nil {
return err
}
_, err = tx.tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.nodeID)
if err != nil {
return err
}
return nil
})
return err
}
// GetPoolsWithImage get the names of all storage pools on which a given image exists.
func (c *Cluster) GetPoolsWithImage(imageFingerprint string) ([]int64, error) {
poolID := int64(-1)
query := "SELECT storage_pool_id FROM storage_volumes WHERE node_id=? AND name=? AND type=?"
inargs := []interface{}{c.nodeID, imageFingerprint, StoragePoolVolumeTypeImage}
outargs := []interface{}{poolID}
result, err := queryScan(c, query, inargs, outargs)
if err != nil {
return []int64{}, err
}
poolIDs := []int64{}
for _, r := range result {
poolIDs = append(poolIDs, r[0].(int64))
}
return poolIDs, nil
}
// GetPoolNamesFromIDs get the names of all storage pools on which a given image exists.
func (c *Cluster) GetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
var poolName string
query := "SELECT name FROM storage_pools WHERE id=?"
poolNames := []string{}
for _, poolID := range poolIDs {
inargs := []interface{}{poolID}
outargs := []interface{}{poolName}
result, err := queryScan(c, query, inargs, outargs)
if err != nil {
return []string{}, err
}
for _, r := range result {
poolNames = append(poolNames, r[0].(string))
}
}
return poolNames, nil
}
// UpdateImageUploadDate updates the upload_date column and an image row.
func (c *Cluster) UpdateImageUploadDate(id int, uploadedAt time.Time) error {
err := exec(c, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
return err
}
// GetImagesOnLocalNode returns all images that the local LXD node has.
func (c *Cluster) GetImagesOnLocalNode() (map[string][]string, error) {
return c.GetImagesOnNode(c.nodeID)
}
// GetImagesOnNode returns all images that the node with the given id has.
func (c *Cluster) GetImagesOnNode(id int64) (map[string][]string, error) {
images := make(map[string][]string) // key is fingerprint, value is list of projects
err := c.Transaction(func(tx *ClusterTx) error {
stmt := `
SELECT images.fingerprint, projects.name FROM images
LEFT JOIN images_nodes ON images.id = images_nodes.image_id
LEFT JOIN nodes ON images_nodes.node_id = nodes.id
LEFT JOIN projects ON images.project_id = projects.id
WHERE nodes.id = ?
`
rows, err := tx.tx.Query(stmt, id)
if err != nil {
return err
}
var fingerprint string
var projectName string
for rows.Next() {
err := rows.Scan(&fingerprint, &projectName)
if err != nil {
return err
}
images[fingerprint] = append(images[fingerprint], projectName)
}
return rows.Err()
})
return images, err
}
// GetNodesWithImage returns the addresses of online nodes which already have the image.
func (c *Cluster) GetNodesWithImage(fingerprint string) ([]string, error) {
q := `
SELECT DISTINCT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?
`
return c.getNodesByImageFingerprint(q, fingerprint)
}
// GetNodesWithoutImage returns the addresses of online nodes which don't have the image.
func (c *Cluster) GetNodesWithoutImage(fingerprint string) ([]string, error) {
q := `
SELECT DISTINCT nodes.address FROM nodes WHERE nodes.address NOT IN (
SELECT DISTINCT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?)
`
return c.getNodesByImageFingerprint(q, fingerprint)
}
func (c *Cluster) getNodesByImageFingerprint(stmt, fingerprint string) ([]string, error) {
var addresses []string // Addresses of online nodes with the image
err := c.Transaction(func(tx *ClusterTx) error {
offlineThreshold, err := tx.GetNodeOfflineThreshold()
if err != nil {
return err
}
allAddresses, err := query.SelectStrings(tx.tx, stmt, fingerprint)
if err != nil {
return err
}
for _, address := range allAddresses {
node, err := tx.GetNodeByAddress(address)
if err != nil {
return err
}
if node.IsOffline(offlineThreshold) {
continue
}
addresses = append(addresses, address)
}
return err
})
return addresses, err
}
lxd/db: Use the generated GetImages code to implement GetExpiredImages
Signed-off-by: Free Ekanayaka <04111f73b2d444cf053b50d877d79556bf34f55a@canonical.com>
// +build linux,cgo,!agent
package db
import (
"database/sql"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/osarch"
)
// Code generation directives.
//
//go:generate -command mapper lxd-generate db mapper -t images.mapper.go
//go:generate mapper reset
//
//go:generate mapper stmt -p db -e image objects
//go:generate mapper stmt -p db -e image objects-by-Project
//go:generate mapper stmt -p db -e image objects-by-Project-and-Public
//go:generate mapper stmt -p db -e image objects-by-Project-and-Fingerprint
//go:generate mapper stmt -p db -e image objects-by-Fingerprint
//go:generate mapper stmt -p db -e image objects-by-Cached
//
//go:generate mapper method -p db -e image List
//go:generate mapper method -p db -e image Get
// Image is a value object holding db-related details about an image.
type Image struct {
ID int
Project string `db:"primary=yes&join=projects.name"`
Fingerprint string `db:"primary=yes&comparison=like"`
Type int
Filename string
Size int64
Public bool
Architecture int
CreationDate time.Time
ExpiryDate time.Time
UploadDate time.Time
Cached bool
LastUseDate time.Time
AutoUpdate int
}
// ImageFilter can be used to filter results yielded by GetImages.
type ImageFilter struct {
Project string
Fingerprint string // Matched with LIKE
Public bool
Cached bool
}
// ImageSourceProtocol maps image source protocol codes to human-readable names.
var ImageSourceProtocol = map[int]string{
0: "lxd",
1: "direct",
2: "simplestreams",
}
// GetLocalImagesFingerprints returns the fingerprints of all local images.
func (c *ClusterTx) GetLocalImagesFingerprints() ([]string, error) {
q := `
SELECT images.fingerprint
FROM images_nodes
JOIN images ON images.id = images_nodes.image_id
WHERE node_id = ?
`
return query.SelectStrings(c.tx, q, c.nodeID)
}
// GetImagesFingerprints returns the names of all images (optionally only the public ones).
func (c *Cluster) GetImagesFingerprints(project string, public bool) ([]string, error) {
q := `
SELECT fingerprint
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?
`
if public == true {
q += " AND public=1"
}
var fingerprints []string
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
fingerprints, err = query.SelectStrings(tx.tx, q, project)
return err
})
if err != nil {
return nil, err
}
return fingerprints, nil
}
// ExpiredImage used to store expired image info.
type ExpiredImage struct {
Fingerprint string
ProjectName string
}
// GetExpiredImages returns the names and project name of all images that have expired since the given time.
func (c *Cluster) GetExpiredImages(expiry int64) ([]ExpiredImage, error) {
var images []Image
err := c.Transaction(func(tx *ClusterTx) error {
var err error
images, err = tx.GetImages(ImageFilter{Cached: true})
return err
})
if err != nil {
return nil, err
}
results := []ExpiredImage{}
for _, r := range images {
// Figure out the expiry
timestamp := r.UploadDate
if !r.LastUseDate.IsZero() {
timestamp = r.LastUseDate
}
imageExpiry := timestamp
imageExpiry = imageExpiry.Add(time.Duration(expiry*24) * time.Hour)
// Check if expired
if imageExpiry.After(time.Now()) {
continue
}
result := ExpiredImage{
Fingerprint: r.Fingerprint,
ProjectName: r.Project,
}
results = append(results, result)
}
return results, nil
}
// CreateImageSource inserts a new image source.
func (c *Cluster) CreateImageSource(id int, server string, protocol string, certificate string, alias string) error {
stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)`
protocolInt := -1
for protoInt, protoString := range ImageSourceProtocol {
if protoString == protocol {
protocolInt = protoInt
}
}
if protocolInt == -1 {
return fmt.Errorf("Invalid protocol: %s", protocol)
}
err := exec(c, stmt, id, server, protocolInt, certificate, alias)
return err
}
// GetImageSource returns the image source with the given ID.
func (c *Cluster) GetImageSource(imageID int) (int, api.ImageSource, error) {
q := `SELECT id, server, protocol, certificate, alias FROM images_source WHERE image_id=?`
id := 0
protocolInt := -1
result := api.ImageSource{}
arg1 := []interface{}{imageID}
arg2 := []interface{}{&id, &result.Server, &protocolInt, &result.Certificate, &result.Alias}
err := dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return -1, api.ImageSource{}, ErrNoSuchObject
}
return -1, api.ImageSource{}, err
}
protocol, found := ImageSourceProtocol[protocolInt]
if !found {
return -1, api.ImageSource{}, fmt.Errorf("Invalid protocol: %d", protocolInt)
}
result.Protocol = protocol
return id, result, nil
}
// ImageSourceGetCachedFingerprint tries to find a source entry of a locally
// cached image that matches the given remote details (server, protocol and
// alias). Return the fingerprint linked to the matching entry, if any.
func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string, typeName string, architecture int) (string, error) {
imageType := instancetype.Any
if typeName != "" {
var err error
imageType, err = instancetype.New(typeName)
if err != nil {
return "", err
}
}
protocolInt := -1
for protoInt, protoString := range ImageSourceProtocol {
if protoString == protocol {
protocolInt = protoInt
}
}
if protocolInt == -1 {
return "", fmt.Errorf("Invalid protocol: %s", protocol)
}
q := `SELECT images.fingerprint
FROM images_source
INNER JOIN images
ON images_source.image_id=images.id
WHERE server=? AND protocol=? AND alias=? AND auto_update=1 AND images.architecture=?
`
arg1 := []interface{}{server, protocolInt, alias, architecture}
if imageType != instancetype.Any {
q += "AND images.type=?\n"
arg1 = []interface{}{server, protocolInt, alias, architecture, imageType}
}
q += "ORDER BY creation_date DESC"
fingerprint := ""
arg2 := []interface{}{&fingerprint}
err := dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return "", ErrNoSuchObject
}
return "", err
}
return fingerprint, nil
}
// ImageExists returns whether an image with the given fingerprint exists.
func (c *Cluster) ImageExists(project string, fingerprint string) (bool, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return false, err
}
var exists bool
query := `
SELECT COUNT(*) > 0
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ? AND fingerprint=?
`
inargs := []interface{}{project, fingerprint}
outargs := []interface{}{&exists}
err = dbQueryRowScan(c, query, inargs, outargs)
if err == sql.ErrNoRows {
return exists, ErrNoSuchObject
}
return exists, err
}
// ImageIsReferencedByOtherProjects returns true if the image with the given
// fingerprint is referenced by projects other than the given one.
func (c *Cluster) ImageIsReferencedByOtherProjects(project string, fingerprint string) (bool, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return false, err
}
var referenced bool
query := `
SELECT COUNT(*) > 0
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name != ? AND fingerprint=?
`
inargs := []interface{}{project, fingerprint}
outargs := []interface{}{&referenced}
err = dbQueryRowScan(c, query, inargs, outargs)
if err == sql.ErrNoRows {
return referenced, nil
}
return referenced, err
}
// GetImage gets an Image object from the database.
// If strictMatching is false, The fingerprint argument will be queried with a LIKE query, means you can
// pass a shortform and will get the full fingerprint.
// There can never be more than one image with a given fingerprint, as it is
// enforced by a UNIQUE constraint in the schema.
func (c *Cluster) GetImage(project, fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
profileProject := project
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return -1, nil, err
}
var create, expire, used, upload *time.Time // These hold the db-returned times
// The object we'll actually return
image := api.Image{}
id := -1
arch := -1
imageType := -1
// These two humongous things will be filled by the call to DbQueryRowScan
outfmt := []interface{}{&id, &image.Fingerprint, &image.Filename,
&image.Size, &image.Cached, &image.Public, &image.AutoUpdate, &arch,
&create, &expire, &used, &upload, &imageType}
inargs := []interface{}{project}
query := `
SELECT
images.id, fingerprint, filename, size, cached, public, auto_update, architecture,
creation_date, expiry_date, last_use_date, upload_date, type
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?`
if strictMatching {
inargs = append(inargs, fingerprint)
query += " AND fingerprint = ?"
} else {
inargs = append(inargs, fingerprint+"%")
query += " AND fingerprint LIKE ?"
}
if public {
query += " AND public=1"
}
err = dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
if err == sql.ErrNoRows {
return -1, nil, ErrNoSuchObject
}
return -1, nil, err // Likely: there are no rows for this fingerprint
}
// Validate we only have a single match
if !strictMatching {
query = `
SELECT COUNT(images.id)
FROM images
JOIN projects ON projects.id = images.project_id
WHERE projects.name = ?
AND fingerprint LIKE ?
`
count := 0
outfmt := []interface{}{&count}
err = dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
return -1, nil, err
}
if count > 1 {
return -1, nil, fmt.Errorf("Partial fingerprint matches more than one image")
}
}
err = c.imageFill(id, &image, create, expire, used, upload, arch, imageType)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image details")
}
err = c.imageFillProfiles(id, &image, profileProject)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image profiles")
}
return id, &image, nil
}
// GetImageFromAnyProject returns an image matching the given fingerprint, if
// it exists in any project.
func (c *Cluster) GetImageFromAnyProject(fingerprint string) (int, *api.Image, error) {
var create, expire, used, upload *time.Time // These hold the db-returned times
// The object we'll actually return
image := api.Image{}
id := -1
arch := -1
imageType := -1
// These two humongous things will be filled by the call to DbQueryRowScan
outfmt := []interface{}{&id, &image.Fingerprint, &image.Filename,
&image.Size, &image.Cached, &image.Public, &image.AutoUpdate, &arch,
&create, &expire, &used, &upload, &imageType}
inargs := []interface{}{fingerprint}
query := `
SELECT
images.id, fingerprint, filename, size, cached, public, auto_update, architecture,
creation_date, expiry_date, last_use_date, upload_date, type
FROM images
WHERE fingerprint = ?
LIMIT 1`
err := dbQueryRowScan(c, query, inargs, outfmt)
if err != nil {
if err == sql.ErrNoRows {
return -1, nil, ErrNoSuchObject
}
return -1, nil, err // Likely: there are no rows for this fingerprint
}
err = c.imageFill(id, &image, create, expire, used, upload, arch, imageType)
if err != nil {
return -1, nil, errors.Wrapf(err, "Fill image details")
}
return id, &image, nil
}
// Fill extra image fields such as properties and alias. This is called after
// fetching a single row from the images table.
func (c *Cluster) imageFill(id int, image *api.Image, create, expire, used, upload *time.Time, arch int, imageType int) error {
// Some of the dates can be nil in the DB, let's process them.
if create != nil {
image.CreatedAt = *create
} else {
image.CreatedAt = time.Time{}
}
if expire != nil {
image.ExpiresAt = *expire
} else {
image.ExpiresAt = time.Time{}
}
if used != nil {
image.LastUsedAt = *used
} else {
image.LastUsedAt = time.Time{}
}
image.Architecture, _ = osarch.ArchitectureName(arch)
image.Type = instancetype.Type(imageType).String()
// The upload date is enforced by NOT NULL in the schema, so it can never be nil.
image.UploadedAt = *upload
// Get the properties
q := "SELECT key, value FROM images_properties where image_id=?"
var key, value, name, desc string
inargs := []interface{}{id}
outfmt := []interface{}{key, value}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
properties := map[string]string{}
for _, r := range results {
key = r[0].(string)
value = r[1].(string)
properties[key] = value
}
image.Properties = properties
// Get the aliases
q = "SELECT name, description FROM images_aliases WHERE image_id=?"
inargs = []interface{}{id}
outfmt = []interface{}{name, desc}
results, err = queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
aliases := []api.ImageAlias{}
for _, r := range results {
name = r[0].(string)
desc = r[1].(string)
a := api.ImageAlias{Name: name, Description: desc}
aliases = append(aliases, a)
}
image.Aliases = aliases
_, source, err := c.GetImageSource(id)
if err == nil {
image.UpdateSource = &source
}
return nil
}
func (c *Cluster) imageFillProfiles(id int, image *api.Image, project string) error {
// Check which project name to use
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
// Get the profiles
q := `
SELECT profiles.name FROM profiles
JOIN images_profiles ON images_profiles.profile_id = profiles.id
JOIN projects ON profiles.project_id = projects.id
WHERE images_profiles.image_id = ? AND projects.name = ?
`
var name string
inargs := []interface{}{id, project}
outfmt := []interface{}{name}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return err
}
profiles := make([]string, 0)
for _, r := range results {
name = r[0].(string)
profiles = append(profiles, name)
}
image.Profiles = profiles
return nil
}
// LocateImage returns the address of an online node that has a local copy of
// the given image, or an empty string if the image is already available on this
// node.
//
// If the image is not available on any online node, an error is returned.
func (c *Cluster) LocateImage(fingerprint string) (string, error) {
stmt := `
SELECT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?
`
var localAddress string // Address of this node
var addresses []string // Addresses of online nodes with the image
err := c.Transaction(func(tx *ClusterTx) error {
offlineThreshold, err := tx.GetNodeOfflineThreshold()
if err != nil {
return err
}
localAddress, err = tx.GetLocalNodeAddress()
if err != nil {
return err
}
allAddresses, err := query.SelectStrings(tx.tx, stmt, fingerprint)
if err != nil {
return err
}
for _, address := range allAddresses {
node, err := tx.GetNodeByAddress(address)
if err != nil {
return err
}
if address != localAddress && node.IsOffline(offlineThreshold) {
continue
}
addresses = append(addresses, address)
}
return err
})
if err != nil {
return "", err
}
if len(addresses) == 0 {
return "", fmt.Errorf("image not available on any online node")
}
for _, address := range addresses {
if address == localAddress {
return "", nil
}
}
return addresses[0], nil
}
// AddImageToLocalNode creates a new entry in the images_nodes table for
// tracking that the local node has the given image.
func (c *Cluster) AddImageToLocalNode(project, fingerprint string) error {
imageID, _, err := c.GetImage(project, fingerprint, false, true)
if err != nil {
return err
}
err = c.Transaction(func(tx *ClusterTx) error {
_, err := tx.tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", imageID, c.nodeID)
return err
})
return err
}
// DeleteImage deletes the image with the given ID.
func (c *Cluster) DeleteImage(id int) error {
err := exec(c, "DELETE FROM images WHERE id=?", id)
if err != nil {
return err
}
return nil
}
// GetImageAliases returns the names of the aliases of all images.
func (c *Cluster) GetImageAliases(project string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := `
SELECT images_aliases.name
FROM images_aliases
JOIN projects ON projects.id=images_aliases.project_id
WHERE projects.name=?
`
var name string
inargs := []interface{}{project}
outfmt := []interface{}{name}
results, err := queryScan(c, q, inargs, outfmt)
if err != nil {
return nil, err
}
names := []string{}
for _, res := range results {
names = append(names, res[0].(string))
}
return names, nil
}
// GetImageAlias returns the alias with the given name in the given project.
func (c *Cluster) GetImageAlias(project, name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
id := -1
entry := api.ImageAliasesEntry{}
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return id, entry, err
}
q := `SELECT images_aliases.id, images.fingerprint, images.type, images_aliases.description
FROM images_aliases
INNER JOIN images
ON images_aliases.image_id=images.id
INNER JOIN projects
ON images_aliases.project_id=projects.id
WHERE projects.name=? AND images_aliases.name=?`
if !isTrustedClient {
q = q + ` AND images.public=1`
}
var fingerprint, description string
var imageType int
arg1 := []interface{}{project, name}
arg2 := []interface{}{&id, &fingerprint, &imageType, &description}
err = dbQueryRowScan(c, q, arg1, arg2)
if err != nil {
if err == sql.ErrNoRows {
return -1, entry, ErrNoSuchObject
}
return -1, entry, err
}
entry.Name = name
entry.Target = fingerprint
entry.Description = description
entry.Type = instancetype.Type(imageType).String()
return id, entry, nil
}
// RenameImageAlias renames the alias with the given ID.
func (c *Cluster) RenameImageAlias(id int, name string) error {
err := exec(c, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
return err
}
// DeleteImageAlias deletes the alias with the given name.
func (c *Cluster) DeleteImageAlias(project, name string) error {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
err = exec(c, `
DELETE
FROM images_aliases
WHERE project_id = (SELECT id FROM projects WHERE name = ?) AND name = ?
`, project, name)
return err
}
// MoveImageAlias changes the image ID associated with an alias.
func (c *Cluster) MoveImageAlias(source int, destination int) error {
err := exec(c, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
return err
}
// CreateImageAlias inserts an alias ento the database.
func (c *Cluster) CreateImageAlias(project, name string, imageID int, desc string) error {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
stmt := `
INSERT INTO images_aliases (name, image_id, description, project_id)
VALUES (?, ?, ?, (SELECT id FROM projects WHERE name = ?))
`
err = exec(c, stmt, name, imageID, desc, project)
return err
}
// UpdateImageAlias updates the alias with the given ID.
func (c *Cluster) UpdateImageAlias(id int, imageID int, desc string) error {
stmt := `UPDATE images_aliases SET image_id=?, description=? WHERE id=?`
err := exec(c, stmt, imageID, desc, id)
return err
}
// CopyDefaultImageProfiles copies default profiles from id to new_id.
func (c *Cluster) CopyDefaultImageProfiles(id int, newID int) error {
err := c.Transaction(func(tx *ClusterTx) error {
// Delete all current associations.
_, err := tx.tx.Exec("DELETE FROM images_profiles WHERE image_id=?", newID)
if err != nil {
return err
}
// Copy the entries over.
_, err = tx.tx.Exec("INSERT INTO images_profiles (image_id, profile_id) SELECT ?, profile_id FROM images_profiles WHERE image_id=?", newID, id)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// UpdateImageLastUseDate updates the last_use_date field of the image with the
// given fingerprint.
func (c *Cluster) UpdateImageLastUseDate(fingerprint string, date time.Time) error {
stmt := `UPDATE images SET last_use_date=? WHERE fingerprint=?`
err := exec(c, stmt, date, fingerprint)
return err
}
// InitImageLastUseDate inits the last_use_date field of the image with the given fingerprint.
func (c *Cluster) InitImageLastUseDate(fingerprint string) error {
stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?`
err := exec(c, stmt, fingerprint)
return err
}
// UpdateImage updates the image with the given ID.
func (c *Cluster) UpdateImage(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string, project string, profileIds []int64) error {
arch, err := osarch.ArchitectureId(architecture)
if err != nil {
arch = 0
}
err = c.Transaction(func(tx *ClusterTx) error {
publicInt := 0
if public {
publicInt = 1
}
autoUpdateInt := 0
if autoUpdate {
autoUpdateInt = 1
}
stmt, err := tx.tx.Prepare(`UPDATE images SET filename=?, size=?, public=?, auto_update=?, architecture=?, creation_date=?, expiry_date=? WHERE id=?`)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(fname, sz, publicInt, autoUpdateInt, arch, createdAt, expiresAt, id)
if err != nil {
return err
}
_, err = tx.tx.Exec(`DELETE FROM images_properties WHERE image_id=?`, id)
if err != nil {
return err
}
stmt2, err := tx.tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
if err != nil {
return err
}
defer stmt2.Close()
for key, value := range properties {
_, err = stmt2.Exec(id, 0, key, value)
if err != nil {
return err
}
}
if project != "" && profileIds != nil {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return err
}
if !enabled {
project = "default"
}
q := `DELETE FROM images_profiles
WHERE image_id = ? AND profile_id IN (
SELECT profiles.id FROM profiles
JOIN projects ON profiles.project_id = projects.id
WHERE projects.name = ?
)`
_, err = tx.tx.Exec(q, id, project)
if err != nil {
return err
}
stmt3, err := tx.tx.Prepare(`INSERT INTO images_profiles (image_id, profile_id) VALUES (?, ?)`)
if err != nil {
return err
}
defer stmt3.Close()
for _, profileID := range profileIds {
_, err = stmt3.Exec(id, profileID)
if err != nil {
return err
}
}
}
return nil
})
return err
}
// CreateImage creates a new image.
func (c *Cluster) CreateImage(project, fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string, typeName string) error {
profileProject := project
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasImages(project)
if err != nil {
return errors.Wrap(err, "Check if project has images")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return err
}
arch, err := osarch.ArchitectureId(architecture)
if err != nil {
arch = 0
}
imageType := instancetype.Any
if typeName != "" {
var err error
imageType, err = instancetype.New(typeName)
if err != nil {
return err
}
}
if imageType == -1 {
return fmt.Errorf("Invalid image type: %v", typeName)
}
defaultProfileID, _, err := c.GetProfile(profileProject, "default")
if err != nil {
return err
}
err = c.Transaction(func(tx *ClusterTx) error {
publicInt := 0
if public {
publicInt = 1
}
autoUpdateInt := 0
if autoUpdate {
autoUpdateInt = 1
}
stmt, err := tx.tx.Prepare(`INSERT INTO images (project_id, fingerprint, filename, size, public, auto_update, architecture, creation_date, expiry_date, upload_date, type) VALUES ((SELECT id FROM projects WHERE name = ?), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil {
return err
}
defer stmt.Close()
result, err := stmt.Exec(project, fp, fname, sz, publicInt, autoUpdateInt, arch, createdAt, expiresAt, time.Now().UTC(), imageType)
if err != nil {
return err
}
id64, err := result.LastInsertId()
if err != nil {
return err
}
id := int(id64)
if len(properties) > 0 {
pstmt, err := tx.tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, 0, ?, ?)`)
if err != nil {
return err
}
defer pstmt.Close()
for k, v := range properties {
// we can assume, that there is just one
// value per key
_, err = pstmt.Exec(id, k, v)
if err != nil {
return err
}
}
}
_, err = tx.tx.Exec("INSERT INTO images_profiles(image_id, profile_id) VALUES(?, ?)", id, defaultProfileID)
if err != nil {
return err
}
_, err = tx.tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.nodeID)
if err != nil {
return err
}
return nil
})
return err
}
// GetPoolsWithImage get the names of all storage pools on which a given image exists.
func (c *Cluster) GetPoolsWithImage(imageFingerprint string) ([]int64, error) {
poolID := int64(-1)
query := "SELECT storage_pool_id FROM storage_volumes WHERE node_id=? AND name=? AND type=?"
inargs := []interface{}{c.nodeID, imageFingerprint, StoragePoolVolumeTypeImage}
outargs := []interface{}{poolID}
result, err := queryScan(c, query, inargs, outargs)
if err != nil {
return []int64{}, err
}
poolIDs := []int64{}
for _, r := range result {
poolIDs = append(poolIDs, r[0].(int64))
}
return poolIDs, nil
}
// GetPoolNamesFromIDs get the names of all storage pools on which a given image exists.
func (c *Cluster) GetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
var poolName string
query := "SELECT name FROM storage_pools WHERE id=?"
poolNames := []string{}
for _, poolID := range poolIDs {
inargs := []interface{}{poolID}
outargs := []interface{}{poolName}
result, err := queryScan(c, query, inargs, outargs)
if err != nil {
return []string{}, err
}
for _, r := range result {
poolNames = append(poolNames, r[0].(string))
}
}
return poolNames, nil
}
// UpdateImageUploadDate updates the upload_date column and an image row.
func (c *Cluster) UpdateImageUploadDate(id int, uploadedAt time.Time) error {
err := exec(c, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
return err
}
// GetImagesOnLocalNode returns all images that the local LXD node has.
func (c *Cluster) GetImagesOnLocalNode() (map[string][]string, error) {
return c.GetImagesOnNode(c.nodeID)
}
// GetImagesOnNode returns all images that the node with the given id has.
func (c *Cluster) GetImagesOnNode(id int64) (map[string][]string, error) {
images := make(map[string][]string) // key is fingerprint, value is list of projects
err := c.Transaction(func(tx *ClusterTx) error {
stmt := `
SELECT images.fingerprint, projects.name FROM images
LEFT JOIN images_nodes ON images.id = images_nodes.image_id
LEFT JOIN nodes ON images_nodes.node_id = nodes.id
LEFT JOIN projects ON images.project_id = projects.id
WHERE nodes.id = ?
`
rows, err := tx.tx.Query(stmt, id)
if err != nil {
return err
}
var fingerprint string
var projectName string
for rows.Next() {
err := rows.Scan(&fingerprint, &projectName)
if err != nil {
return err
}
images[fingerprint] = append(images[fingerprint], projectName)
}
return rows.Err()
})
return images, err
}
// GetNodesWithImage returns the addresses of online nodes which already have the image.
func (c *Cluster) GetNodesWithImage(fingerprint string) ([]string, error) {
q := `
SELECT DISTINCT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?
`
return c.getNodesByImageFingerprint(q, fingerprint)
}
// GetNodesWithoutImage returns the addresses of online nodes which don't have the image.
func (c *Cluster) GetNodesWithoutImage(fingerprint string) ([]string, error) {
q := `
SELECT DISTINCT nodes.address FROM nodes WHERE nodes.address NOT IN (
SELECT DISTINCT nodes.address FROM nodes
LEFT JOIN images_nodes ON images_nodes.node_id = nodes.id
LEFT JOIN images ON images_nodes.image_id = images.id
WHERE images.fingerprint = ?)
`
return c.getNodesByImageFingerprint(q, fingerprint)
}
func (c *Cluster) getNodesByImageFingerprint(stmt, fingerprint string) ([]string, error) {
var addresses []string // Addresses of online nodes with the image
err := c.Transaction(func(tx *ClusterTx) error {
offlineThreshold, err := tx.GetNodeOfflineThreshold()
if err != nil {
return err
}
allAddresses, err := query.SelectStrings(tx.tx, stmt, fingerprint)
if err != nil {
return err
}
for _, address := range allAddresses {
node, err := tx.GetNodeByAddress(address)
if err != nil {
return err
}
if node.IsOffline(offlineThreshold) {
continue
}
addresses = append(addresses, address)
}
return err
})
return addresses, err
}
|
package slackscot
// GENERATED and MANAGED by giddyup (https://github.com/alexandre-normand/giddyup)
const (
// VERSION represents the current slackscot version
VERSION = "1.46.0"
)
📈 Bump Version
package slackscot
// GENERATED and MANAGED by giddyup (https://github.com/alexandre-normand/giddyup)
const (
// VERSION represents the current slackscot version
VERSION = "1.47.0"
)
|
package main
const VERSION = "0.14"
Release version 0.15
package main
const VERSION = "0.15"
|
// Originally derived from: btcsuite/btcd/version.go
// Copyright (c) 2013-2015 The btcsuite developers
// Copyright (c) 2015 Monetas.
// Copyright 2016 Daniel Krawisz.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"strings"
)
// semanticAlphabet
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
appMajor uint = 0
appMinor uint = 1
appPatch uint = 0
// appPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
appPreRelease = "alpha"
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
// contain characters from semanticAlphabet per the semantic versioning spec.
var appBuild string
// version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(appPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append build metadata if there is any. The plus called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the build metadata string. The build metadata
// string is not appended if it contains invalid characters.
build := normalizeVerString(appBuild)
if build != "" {
version = fmt.Sprintf("%s+%s", version, build)
}
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
result.WriteRune(r)
}
}
return result.String()
}
Update version message.
// Originally derived from: btcsuite/btcd/version.go
// Copyright (c) 2013-2015 The btcsuite developers
// Copyright (c) 2015 Monetas.
// Copyright 2016 Daniel Krawisz.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"strings"
)
// semanticAlphabet
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
appMajor uint = 1
appMinor uint = 0
appPatch uint = 0
// appPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
appPreRelease = ""
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
// contain characters from semanticAlphabet per the semantic versioning spec.
var appBuild string
// version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(appPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append build metadata if there is any. The plus called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the build metadata string. The build metadata
// string is not appended if it contains invalid characters.
build := normalizeVerString(appBuild)
if build != "" {
version = fmt.Sprintf("%s+%s", version, build)
}
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
result.WriteRune(r)
}
}
return result.String()
}
|
package main
/*
Given a version number MAJOR.MINOR.PATCH, increment the:
MAJOR version when you make incompatible API changes,
MINOR version when you add functionality in a backwards-compatible manner, and
PATCH version when you make backwards-compatible bug fixes.
*/
const VersionMajor = 4
const VersionMinor = 9
const VersionPatch = 3
Version bump
package main
/*
Given a version number MAJOR.MINOR.PATCH, increment the:
MAJOR version when you make incompatible API changes,
MINOR version when you add functionality in a backwards-compatible manner, and
PATCH version when you make backwards-compatible bug fixes.
*/
const VersionMajor = 4
const VersionMinor = 9
const VersionPatch = 4
|
package main
const VERSION = "0.11.0-alpha1"
:+1: Bump up the version 0.11.0-alpha2
package main
const VERSION = "0.11.0-alpha2"
|
package main
import (
"fmt"
"strconv"
"strings"
"time"
)
func GetVersion() string {
if e, err := strconv.ParseInt(buildTimestamp, 10, 64); err == nil {
buildTimestamp = time.Unix(e, 0).Format(time.RFC3339)
}
return fmt.Sprintf("%s built from %s at %s",
strings.TrimPrefix(version, "v"), gitVersion,
buildTimestamp)
}
var (
version = "v0.1.7"
gitVersion = "unknown-git-version"
buildTimestamp = "unknown-time"
)
v0.1.8
package main
import (
"fmt"
"strconv"
"strings"
"time"
)
func GetVersion() string {
if e, err := strconv.ParseInt(buildTimestamp, 10, 64); err == nil {
buildTimestamp = time.Unix(e, 0).Format(time.RFC3339)
}
return fmt.Sprintf("%s built from %s at %s",
strings.TrimPrefix(version, "v"), gitVersion,
buildTimestamp)
}
var (
version = "v0.1.8"
gitVersion = "unknown-git-version"
buildTimestamp = "unknown-time"
)
|
package sdk
const VERSION = "v1.14.1"
Incremented version.
package sdk
const VERSION = "v1.14.3"
|
package join
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/store"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"github.com/sttts/elastic-etcd/node"
"golang.org/x/net/context/ctxhttp"
)
// Strategy describes the member add strategy.
type Strategy string
const (
livenessTimeout = time.Second * 5
etcdTimeout = time.Second * 5
discoveryTimeout = time.Second * 10
// PreparedStrategy assumes that the admin prepares new member entries.
PreparedStrategy = Strategy("prepared")
// PruneStrategy aggressively removes dead members.
PruneStrategy = Strategy("prune")
// ReplaceStrategy defensively removes a dead member only when a cluster is full.
ReplaceStrategy = Strategy("replace")
// AddStrategy only adds a member until the cluster is full, never removes old members.
AddStrategy = Strategy("add")
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
)
// EtcdConfig is the result of the join algorithm, turned into etcd flags or env vars.
type EtcdConfig struct {
InitialCluster []string
InitialClusterState string
AdvertisePeerURLs string
Discovery string
Name string
}
func alive(ctx context.Context, m client.Member) bool {
ctx, _ = context.WithTimeout(ctx, livenessTimeout)
glog.V(6).Infof("Testing liveness of %s=%v", m.Name, m.PeerURLs)
for _, u := range m.PeerURLs {
resp, err := ctxhttp.Get(ctx, http.DefaultClient, u+rafthttp.ProbingPrefix)
if err == nil && resp.StatusCode == http.StatusOK {
return true
}
}
return false
}
func active(ctx context.Context, m client.Member) (bool, error) {
ctx, _ = context.WithTimeout(ctx, etcdTimeout)
c, err := client.New(client.Config{
Endpoints: m.ClientURLs,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: 5 * time.Second,
})
if err != nil {
return false, err
}
mapi := client.NewMembersAPI(c)
glog.V(6).Infof("Testing whether %s=%v knows the leader", m.Name, m.PeerURLs)
leader, err := mapi.Leader(ctx)
if err != nil {
return false, err
}
return leader != nil, nil
}
func clusterExistingHeuristic(
ctx context.Context,
size int, nodes []node.DiscoveryNode,
) ([]node.DiscoveryNode, error) {
quorum := size/2 + 1
if nodes == nil {
glog.V(4).Infof("No nodes found in discovery service. Assuming new cluster.")
return nil, nil
}
wg := sync.WaitGroup{}
wg.Add(len(nodes))
lock := sync.Mutex{}
activeNodes := make([]node.DiscoveryNode, 0, len(nodes))
for _, n := range nodes {
go func(n node.DiscoveryNode) {
defer wg.Done()
if !alive(ctx, n.Member) {
glog.Infof("Node %s looks dead", n.NamedPeerURLs())
return
}
if ok, err := active(ctx, n.Member); !ok {
if err != nil {
glog.Error(err)
}
glog.Infof("Node %s is not in a healthy cluster.", n.NamedPeerURLs())
return
}
glog.Infof("Node %s looks alive and active in a cluster", n.NamedPeerURLs())
lock.Lock()
defer lock.Unlock()
activeNodes = append(activeNodes, n)
}(n)
}
wg.Wait()
if len(nodes) < quorum {
glog.V(4).Infof(
"Only %d nodes found in discovery service, less than a quorum of %d. Assuming new cluster.",
len(nodes),
quorum,
)
return nil, nil
}
if len(nodes) == size {
glog.V(4).Infof("Cluster is full. Assuming existing cluster.")
return activeNodes, nil
}
if len(activeNodes) > 0 {
return activeNodes, nil
}
return nil, nil
}
func discoveryValue(ctx context.Context, baseURL, key string) (*store.Event, error) {
ctx, _ = context.WithTimeout(ctx, discoveryTimeout)
url := baseURL + key
glog.V(6).Infof("Getting %s", url)
resp, err := ctxhttp.Get(ctx, http.DefaultClient, url)
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("status code %d from %q: %s", resp.StatusCode, url, body)
}
var res store.Event
err = json.NewDecoder(resp.Body).Decode(&res)
if err != nil {
return nil, fmt.Errorf("invalid answer from %q: %v", url, err)
}
glog.V(9).Infof("Got: %s", spew.Sdump(res))
return &res, nil
}
func deleteDiscoveryMachine(ctx context.Context, baseURL, id string) (bool, error) {
ctx, _ = context.WithTimeout(ctx, discoveryTimeout)
url := baseURL + "/" + strings.TrimLeft(id, "/")
req, err := http.NewRequest("DELETE", url, strings.NewReader(""))
if err != nil {
return false, err
}
resp, err := ctxhttp.Do(ctx, http.DefaultClient, req)
if err != nil {
return false, err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return false, fmt.Errorf("status code %d on DELETE for %q: %s", resp.StatusCode, url, body)
}
return true, nil
}
// Join adds a new member depending on the strategy and returns a matching etcd configuration.
func Join(
discoveryURL, name, initialAdvertisePeerURLs string,
fresh bool,
clientPort, clusterSize int,
strategy Strategy,
) (*EtcdConfig, error) {
ctx := context.Background()
res, err := discoveryValue(ctx, discoveryURL, "/")
if err != nil {
return nil, err
}
nodes := make([]node.DiscoveryNode, 0, len(res.Node.Nodes))
for _, nn := range res.Node.Nodes {
if nn.Value == nil {
glog.V(5).Infof("Skipping %q because no value exists", nn.Key)
}
n, err := node.NewDiscoveryNode(*nn.Value, clientPort)
if err != nil {
glog.Warningf("invalid peer url %q in discovery service: %v", *nn.Value, err)
continue
}
nodes = append(nodes, *n)
}
if clusterSize < 0 {
res, err = discoveryValue(ctx, discoveryURL, "/_config/size")
if err != nil {
return nil, fmt.Errorf("cannot get discovery url cluster size: %v", err)
}
size, _ := strconv.ParseInt(*res.Node.Value, 10, 16)
clusterSize = int(size)
glog.V(2).Infof("Got a target cluster size of %d from the discovery url", clusterSize)
} else if clusterSize == 0 {
clusterSize = maxInt
}
activeNodes, err := clusterExistingHeuristic(ctx, clusterSize, nodes)
if err != nil {
return nil, err
}
if activeNodes != nil && len(activeNodes) == 0 {
// cluster down. Restarting nodes with the same config.
if fresh {
return nil, errors.New("Cluster is down. A new node cannot join now.")
}
return &EtcdConfig{
InitialClusterState: "existing",
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
} else if activeNodes != nil {
activeNamedURLs := make([]string, 0, len(nodes))
for _, n := range activeNodes {
activeNamedURLs = append(activeNamedURLs, n.NamedPeerURLs()...)
}
advertisedURLs := strings.Split(initialAdvertisePeerURLs, ",")
advertisedNamedURLs := make([]string, 0, len(initialAdvertisePeerURLs))
for _, u := range advertisedURLs {
advertisedNamedURLs = append(advertisedNamedURLs, fmt.Sprintf("%s=%s", name, u))
}
initialNamedURLs := []string{advertisedNamedURLs[0]}
if strategy != PreparedStrategy && fresh {
adder, err := newMemberAdder(
activeNodes,
strategy,
clientPort,
clusterSize,
discoveryURL,
)
if err != nil {
return nil, err
}
initialURLs, err := adder.Add(ctx, name, advertisedURLs)
if err != nil {
return nil, fmt.Errorf("unable to add node %q with peer urls %q to the cluster: %v", name, initialAdvertisePeerURLs, err)
}
initialNamedURLs = []string{}
for _, u := range initialURLs {
initialNamedURLs = append(initialNamedURLs, fmt.Sprintf("%s=%s", name, u))
}
}
return &EtcdConfig{
InitialCluster: append(initialNamedURLs, activeNamedURLs...),
InitialClusterState: "existing",
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
} else {
return &EtcdConfig{
InitialClusterState: "new",
Discovery: discoveryURL,
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
}
}
Be more verbose about the actual algorithm
package join
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/store"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"github.com/sttts/elastic-etcd/node"
"golang.org/x/net/context/ctxhttp"
)
// Strategy describes the member add strategy.
type Strategy string
const (
livenessTimeout = time.Second * 5
etcdTimeout = time.Second * 5
discoveryTimeout = time.Second * 10
// PreparedStrategy assumes that the admin prepares new member entries.
PreparedStrategy = Strategy("prepared")
// PruneStrategy aggressively removes dead members.
PruneStrategy = Strategy("prune")
// ReplaceStrategy defensively removes a dead member only when a cluster is full.
ReplaceStrategy = Strategy("replace")
// AddStrategy only adds a member until the cluster is full, never removes old members.
AddStrategy = Strategy("add")
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
)
// EtcdConfig is the result of the join algorithm, turned into etcd flags or env vars.
type EtcdConfig struct {
InitialCluster []string
InitialClusterState string
AdvertisePeerURLs string
Discovery string
Name string
}
func alive(ctx context.Context, m client.Member) bool {
ctx, _ = context.WithTimeout(ctx, livenessTimeout)
glog.V(6).Infof("Testing liveness of %s=%v", m.Name, m.PeerURLs)
for _, u := range m.PeerURLs {
resp, err := ctxhttp.Get(ctx, http.DefaultClient, u+rafthttp.ProbingPrefix)
if err == nil && resp.StatusCode == http.StatusOK {
return true
}
}
return false
}
func active(ctx context.Context, m client.Member) (bool, error) {
ctx, _ = context.WithTimeout(ctx, etcdTimeout)
c, err := client.New(client.Config{
Endpoints: m.ClientURLs,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: 5 * time.Second,
})
if err != nil {
return false, err
}
mapi := client.NewMembersAPI(c)
glog.V(6).Infof("Testing whether %s=%v knows the leader", m.Name, m.PeerURLs)
leader, err := mapi.Leader(ctx)
if err != nil {
return false, err
}
return leader != nil, nil
}
func clusterExistingHeuristic(
ctx context.Context,
size int, nodes []node.DiscoveryNode,
) ([]node.DiscoveryNode, error) {
quorum := size/2 + 1
if nodes == nil {
glog.V(4).Infof("No nodes found in discovery service. Assuming new cluster.")
return nil, nil
}
wg := sync.WaitGroup{}
wg.Add(len(nodes))
lock := sync.Mutex{}
activeNodes := make([]node.DiscoveryNode, 0, len(nodes))
for _, n := range nodes {
go func(n node.DiscoveryNode) {
defer wg.Done()
if !alive(ctx, n.Member) {
glog.Infof("Node %s looks dead", n.NamedPeerURLs())
return
}
if ok, err := active(ctx, n.Member); !ok {
if err != nil {
glog.Error(err)
}
glog.Infof("Node %s is not in a healthy cluster.", n.NamedPeerURLs())
return
}
glog.Infof("Node %s looks alive and active in a cluster", n.NamedPeerURLs())
lock.Lock()
defer lock.Unlock()
activeNodes = append(activeNodes, n)
}(n)
}
wg.Wait()
if len(nodes) < quorum {
glog.V(4).Infof(
"Only %d nodes found in discovery service, less than a quorum of %d. Assuming new cluster.",
len(nodes),
quorum,
)
return nil, nil
}
if len(nodes) == size {
glog.V(4).Infof("Cluster is full. Assuming existing cluster.")
return activeNodes, nil
}
if len(activeNodes) > 0 {
return activeNodes, nil
}
return nil, nil
}
func discoveryValue(ctx context.Context, baseURL, key string) (*store.Event, error) {
ctx, _ = context.WithTimeout(ctx, discoveryTimeout)
url := baseURL + key
glog.V(6).Infof("Getting %s", url)
resp, err := ctxhttp.Get(ctx, http.DefaultClient, url)
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("status code %d from %q: %s", resp.StatusCode, url, body)
}
var res store.Event
err = json.NewDecoder(resp.Body).Decode(&res)
if err != nil {
return nil, fmt.Errorf("invalid answer from %q: %v", url, err)
}
glog.V(9).Infof("Got: %s", spew.Sdump(res))
return &res, nil
}
func deleteDiscoveryMachine(ctx context.Context, baseURL, id string) (bool, error) {
ctx, _ = context.WithTimeout(ctx, discoveryTimeout)
url := baseURL + "/" + strings.TrimLeft(id, "/")
req, err := http.NewRequest("DELETE", url, strings.NewReader(""))
if err != nil {
return false, err
}
resp, err := ctxhttp.Do(ctx, http.DefaultClient, req)
if err != nil {
return false, err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return false, fmt.Errorf("status code %d on DELETE for %q: %s", resp.StatusCode, url, body)
}
return true, nil
}
// Join adds a new member depending on the strategy and returns a matching etcd configuration.
func Join(
discoveryURL, name, initialAdvertisePeerURLs string,
fresh bool,
clientPort, clusterSize int,
strategy Strategy,
) (*EtcdConfig, error) {
ctx := context.Background()
res, err := discoveryValue(ctx, discoveryURL, "/")
if err != nil {
return nil, err
}
nodes := make([]node.DiscoveryNode, 0, len(res.Node.Nodes))
for _, nn := range res.Node.Nodes {
if nn.Value == nil {
glog.V(5).Infof("Skipping %q because no value exists", nn.Key)
}
n, err := node.NewDiscoveryNode(*nn.Value, clientPort)
if err != nil {
glog.Warningf("invalid peer url %q in discovery service: %v", *nn.Value, err)
continue
}
nodes = append(nodes, *n)
}
if clusterSize < 0 {
res, err = discoveryValue(ctx, discoveryURL, "/_config/size")
if err != nil {
return nil, fmt.Errorf("cannot get discovery url cluster size: %v", err)
}
size, _ := strconv.ParseInt(*res.Node.Value, 10, 16)
clusterSize = int(size)
glog.V(2).Infof("Got a target cluster size of %d from the discovery url", clusterSize)
} else if clusterSize == 0 {
clusterSize = maxInt
}
activeNodes, err := clusterExistingHeuristic(ctx, clusterSize, nodes)
if err != nil {
return nil, err
}
if activeNodes != nil && len(activeNodes) == 0 {
// cluster down. Restarting nodes with the same config.
if fresh {
return nil, errors.New("Cluster is down. A new node cannot join now.")
} else {
glog.Infof("Existing cluster seems to be done. No healthy node found. Trying to resume cluster.")
}
return &EtcdConfig{
InitialClusterState: "existing",
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
} else if activeNodes != nil {
activeNamedURLs := make([]string, 0, len(nodes))
for _, n := range activeNodes {
activeNamedURLs = append(activeNamedURLs, n.NamedPeerURLs()...)
}
advertisedURLs := strings.Split(initialAdvertisePeerURLs, ",")
advertisedNamedURLs := make([]string, 0, len(initialAdvertisePeerURLs))
for _, u := range advertisedURLs {
advertisedNamedURLs = append(advertisedNamedURLs, fmt.Sprintf("%s=%s", name, u))
}
initialNamedURLs := []string{advertisedNamedURLs[0]}
if strategy != PreparedStrategy && fresh {
glog.Infof("Existing cluster found. Trying to join with %q strategy.")
adder, err := newMemberAdder(
activeNodes,
strategy,
clientPort,
clusterSize,
discoveryURL,
)
if err != nil {
return nil, err
}
initialURLs, err := adder.Add(ctx, name, advertisedURLs)
if err != nil {
return nil, fmt.Errorf("unable to add node %q with peer urls %q to the cluster: %v", name, initialAdvertisePeerURLs, err)
}
initialNamedURLs = []string{}
for _, u := range initialURLs {
initialNamedURLs = append(initialNamedURLs, fmt.Sprintf("%s=%s", name, u))
}
} else {
glog.Infof("Existing cluster found. Trying to join without adding this instance as a member.")
}
return &EtcdConfig{
InitialCluster: append(initialNamedURLs, activeNamedURLs...),
InitialClusterState: "existing",
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
} else {
glog.Infof("Trying to launch new cluster.")
return &EtcdConfig{
InitialClusterState: "new",
Discovery: discoveryURL,
AdvertisePeerURLs: initialAdvertisePeerURLs,
Name: name,
}, nil
}
}
|
package proxy
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
"github.com/fsouza/go-dockerclient"
"github.com/weaveworks/weave/common"
)
var (
containerIDRegexp = regexp.MustCompile("^(/v[0-9\\.]*)?/containers/([^/]*)/.*")
weaveWaitEntrypoint = []string{"/w/w"}
weaveEntrypoint = "/home/weave/weaver"
weaveContainerName = "/weave"
Log = common.Log
)
func callWeave(args ...string) ([]byte, []byte, error) {
args = append([]string{"--local"}, args...)
cmd := exec.Command("./weave", args...)
cmd.Env = []string{
"PATH=/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
}
propagateEnv := func(key string) {
if val := os.Getenv(key); val != "" {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, val))
}
}
propagateEnv("DOCKER_HOST")
// Propagate WEAVE_DEBUG, to make debugging easier.
propagateEnv("WEAVE_DEBUG")
// This prevents the code coverage contortions in our
// integration test suite breaking things.
propagateEnv("COVERAGE")
// In case the router control endpoint address is non-standard.
propagateEnv("WEAVE_HTTP_ADDR")
Log.Debug("Calling weave args: ", args, "env: ", cmd.Env)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
func unmarshalRequestBody(r *http.Request, target interface{}) error {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
Log.Debugf("->requestBody: %s", body)
if err := r.Body.Close(); err != nil {
return err
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
d := json.NewDecoder(bytes.NewReader(body))
d.UseNumber() // don't want large numbers in scientific format
return d.Decode(&target)
}
func marshalRequestBody(r *http.Request, body interface{}) error {
newBody, err := json.Marshal(body)
if err != nil {
return err
}
Log.Debugf("<-requestBody: %s", newBody)
r.Body = ioutil.NopCloser(bytes.NewReader(newBody))
r.ContentLength = int64(len(newBody))
return nil
}
func unmarshalResponseBody(r *http.Response, target interface{}) error {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
Log.Debugf("->responseBody: %s", body)
if err := r.Body.Close(); err != nil {
return err
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
d := json.NewDecoder(bytes.NewReader(body))
d.UseNumber() // don't want large numbers in scientific format
return d.Decode(&target)
}
func marshalResponseBody(r *http.Response, body interface{}) error {
newBody, err := json.Marshal(body)
if err != nil {
return err
}
Log.Debugf("<-responseBody: %s", newBody)
r.Body = ioutil.NopCloser(bytes.NewReader(newBody))
r.ContentLength = int64(len(newBody))
// Stop it being chunked, because that hangs
r.TransferEncoding = nil
return nil
}
func inspectContainerInPath(client *docker.Client, path string) (*docker.Container, error) {
subs := containerIDRegexp.FindStringSubmatch(path)
if subs == nil {
err := fmt.Errorf("No container id found in request with path %s", path)
Log.Warningln(err)
return nil, err
}
containerID := subs[2]
container, err := client.InspectContainer(containerID)
if err != nil {
Log.Warningf("Error inspecting container %s: %v", containerID, err)
}
return container, err
}
func addVolume(hostConfig jsonObject, source, target, mode string) error {
configBinds, err := hostConfig.StringArray("Binds")
if err != nil {
return err
}
var binds []string
for _, bind := range configBinds {
s := strings.Split(bind, ":")
if len(s) >= 2 && s[1] == target {
continue
}
binds = append(binds, bind)
}
bind := source + ":" + target
if mode != "" {
bind += ":" + mode
}
hostConfig["Binds"] = append(binds, bind)
return nil
}
Debug log any error returned by exec-ing weave
package proxy
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
"github.com/fsouza/go-dockerclient"
"github.com/weaveworks/weave/common"
)
var (
containerIDRegexp = regexp.MustCompile("^(/v[0-9\\.]*)?/containers/([^/]*)/.*")
weaveWaitEntrypoint = []string{"/w/w"}
weaveEntrypoint = "/home/weave/weaver"
weaveContainerName = "/weave"
Log = common.Log
)
func callWeave(args ...string) ([]byte, []byte, error) {
args = append([]string{"--local"}, args...)
cmd := exec.Command("./weave", args...)
cmd.Env = []string{
"PATH=/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
}
propagateEnv := func(key string) {
if val := os.Getenv(key); val != "" {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, val))
}
}
propagateEnv("DOCKER_HOST")
// Propagate WEAVE_DEBUG, to make debugging easier.
propagateEnv("WEAVE_DEBUG")
// This prevents the code coverage contortions in our
// integration test suite breaking things.
propagateEnv("COVERAGE")
// In case the router control endpoint address is non-standard.
propagateEnv("WEAVE_HTTP_ADDR")
Log.Debug("Calling weave args: ", args, "env: ", cmd.Env)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
Log.Debug("weave returned error: ", err)
}
return stdout.Bytes(), stderr.Bytes(), err
}
func unmarshalRequestBody(r *http.Request, target interface{}) error {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
Log.Debugf("->requestBody: %s", body)
if err := r.Body.Close(); err != nil {
return err
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
d := json.NewDecoder(bytes.NewReader(body))
d.UseNumber() // don't want large numbers in scientific format
return d.Decode(&target)
}
func marshalRequestBody(r *http.Request, body interface{}) error {
newBody, err := json.Marshal(body)
if err != nil {
return err
}
Log.Debugf("<-requestBody: %s", newBody)
r.Body = ioutil.NopCloser(bytes.NewReader(newBody))
r.ContentLength = int64(len(newBody))
return nil
}
func unmarshalResponseBody(r *http.Response, target interface{}) error {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
Log.Debugf("->responseBody: %s", body)
if err := r.Body.Close(); err != nil {
return err
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
d := json.NewDecoder(bytes.NewReader(body))
d.UseNumber() // don't want large numbers in scientific format
return d.Decode(&target)
}
func marshalResponseBody(r *http.Response, body interface{}) error {
newBody, err := json.Marshal(body)
if err != nil {
return err
}
Log.Debugf("<-responseBody: %s", newBody)
r.Body = ioutil.NopCloser(bytes.NewReader(newBody))
r.ContentLength = int64(len(newBody))
// Stop it being chunked, because that hangs
r.TransferEncoding = nil
return nil
}
func inspectContainerInPath(client *docker.Client, path string) (*docker.Container, error) {
subs := containerIDRegexp.FindStringSubmatch(path)
if subs == nil {
err := fmt.Errorf("No container id found in request with path %s", path)
Log.Warningln(err)
return nil, err
}
containerID := subs[2]
container, err := client.InspectContainer(containerID)
if err != nil {
Log.Warningf("Error inspecting container %s: %v", containerID, err)
}
return container, err
}
func addVolume(hostConfig jsonObject, source, target, mode string) error {
configBinds, err := hostConfig.StringArray("Binds")
if err != nil {
return err
}
var binds []string
for _, bind := range configBinds {
s := strings.Split(bind, ":")
if len(s) >= 2 && s[1] == target {
continue
}
binds = append(binds, bind)
}
bind := source + ":" + target
if mode != "" {
bind += ":" + mode
}
hostConfig["Binds"] = append(binds, bind)
return nil
}
|
package rxgo
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/reactivex/rxgo/options"
)
var _ = Describe("Connectable Observable", func() {
Context("when creating two subscriptions to a connectable observable", func() {
in := make(chan interface{}, 2)
out1 := make(chan interface{}, 2)
out2 := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(next(out1), options.WithBufferBackpressureStrategy(2))
connectableObs.Subscribe(next(out2), options.WithBufferBackpressureStrategy(2))
in <- 1
in <- 2
It("should not trigger the next handlers", func() {
Expect(get(out1, timeout)).Should(Equal(noData))
Expect(get(out2, timeout)).Should(Equal(noData))
})
Context("when connect is called", func() {
It("should trigger the next handlers", func() {
connectableObs.Connect()
Expect(get(out1, timeout)).Should(Equal(1))
Expect(get(out1, timeout)).Should(Equal(2))
Expect(get(out2, timeout)).Should(Equal(1))
Expect(get(out2, timeout)).Should(Equal(2))
})
})
})
Context("when creating a subscription to a connectable observable", func() {
in := make(chan interface{}, 2)
out := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(next(out), options.WithBufferBackpressureStrategy(2))
Context("when connect is called", func() {
It("should not be blocking", func() {
connectableObs.Connect()
in <- 1
in <- 2
Expect(get(out, timeout)).Should(Equal(1))
Expect(get(out, timeout)).Should(Equal(2))
})
})
})
})
Back pressure strategy test
package rxgo
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/reactivex/rxgo/handlers"
"github.com/reactivex/rxgo/options"
"time"
)
var _ = Describe("Connectable Observable", func() {
Context("when creating two subscriptions to a connectable observable", func() {
in := make(chan interface{}, 2)
out1 := make(chan interface{}, 2)
out2 := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(next(out1), options.WithBufferBackpressureStrategy(2))
connectableObs.Subscribe(next(out2), options.WithBufferBackpressureStrategy(2))
in <- 1
in <- 2
It("should not trigger the next handlers", func() {
Expect(get(out1, timeout)).Should(Equal(noData))
Expect(get(out2, timeout)).Should(Equal(noData))
})
Context("when connect is called", func() {
It("should trigger the next handlers", func() {
connectableObs.Connect()
Expect(get(out1, timeout)).Should(Equal(1))
Expect(get(out1, timeout)).Should(Equal(2))
Expect(get(out2, timeout)).Should(Equal(1))
Expect(get(out2, timeout)).Should(Equal(2))
})
})
})
Context("when creating a subscription to a connectable observable", func() {
in := make(chan interface{}, 2)
out := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(next(out), options.WithBufferBackpressureStrategy(2))
Context("when connect is called", func() {
It("should not be blocking", func() {
connectableObs.Connect()
in <- 1
in <- 2
Expect(get(out, timeout)).Should(Equal(1))
Expect(get(out, timeout)).Should(Equal(2))
})
})
})
Context("when creating a subscription to a connectable observable", func() {
Context("with back pressure strategy", func() {
in := make(chan interface{}, 2)
out := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(next(out), options.WithBufferBackpressureStrategy(2))
connectableObs.Connect()
in <- 1
in <- 2
in <- 3
It("should buffer items", func() {
Expect(get(out, timeout)).Should(Equal(1))
Expect(get(out, timeout)).Should(Equal(2))
Expect(get(out, timeout)).Should(Equal(3))
Expect(get(out, timeout)).Should(Equal(noData))
})
})
Context("without back pressure strategy", func() {
in := make(chan interface{}, 2)
out := make(chan interface{}, 2)
connectableObs := FromChannel(in).Publish()
connectableObs.Subscribe(handlers.NextFunc(func(i interface{}) {
out <- i
time.Sleep(timeout)
}))
connectableObs.Connect()
in <- 1
in <- 2
time.Sleep(timeout)
in <- 3
It("should drop items", func() {
Expect(get(out, timeout)).Should(Equal(1))
Expect(get(out, timeout)).Should(Equal(3))
Expect(get(out, timeout)).Should(Equal(noData))
})
})
})
})
|
package viewLib
import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"sync"
"time"
"github.com/boltdb/bolt"
)
//counter is an instance of a [pageName]pageView hash map. This is implemented with a
//mutex RW lock to stop goroutine data races
var counter = struct {
sync.RWMutex
m map[string]int
}{m: make(map[string]int)}
//ips is an instance of a [ipAdress]bool hash map. We don't care about the bool,
//using a hash map in this case just for the IP Key, as it offers a
//easy implementation on a set with quick insertion. This struct has a
//mutex RW lock to stop goroutine data races
var ips = struct {
sync.RWMutex
m map[string]bool
}{m: make(map[string]bool)}
//IPList struct is used to marshal/unmarshal IP visitor data into JSON
//to be sent to current storage
type IPList struct {
IPs map[string]bool
}
//SavePoint struct is used to marshal/unmarshal pageview data into JSON
//to be sent to current and historic storage
type SavePoint struct {
PageCounts map[string]int
UniqueViews int
}
//main checks checks for previos data, sets up multithreading and then
//initiates the HTTP server
func viewLibInit() {
//checks for present DB storage and loads it into memory
checkForRecords()
//start goroutine to periodicly write IP and page view sets to disk
go periodicMemoryWriter()
}
//countHandler locks the counter and ip set mutexes, writes to both then unlocks
func viewInc(ip string, page string) {
log.Println(ip + " requests " + page)
counter.Lock()
counter.m[page]++
counter.Unlock()
ips.Lock()
ips.m[ip] = true
ips.Unlock()
}
//periodicMemoryWriter initiates a BoltDB client, sets up a ticker and
//then wrties the IP and pageView maps to on persistant memory via BoltDB.
//This means that in the highly unlikely ;) case that the program crashes,
//a restart will reload the data and your view count won't vanish.
func periodicMemoryWriter() {
//start the bolt client
boltClient, err := bolt.Open("viewCounter.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer boltClient.Close()
//check and create a bucket in bolt to store the data
boltClient.Update(func(tx *bolt.Tx) error {
tx.CreateBucketIfNotExists([]byte("historicData"))
return nil
})
//start a ticker for auto uploading the ips and view count to bolt
//that triggers every ten minutes
ticker := time.NewTicker(time.Minute * 10)
for {
<-ticker.C
log.Println("Tick")
fmt.Println("start:", time.Now())
date := strconv.Itoa((time.Now().YearDay() * 10000) + time.Now().Year())
fmt.Println(date)
counter.RLock()
ips.RLock()
m1 := SavePoint{
PageCounts: counter.m,
UniqueViews: len(ips.m),
}
m2 := IPList{
IPs: ips.m,
}
counter.RUnlock()
ips.RUnlock()
m1json, err := json.Marshal(m1)
errLog(err)
m2json, err := json.Marshal(m2)
errLog(err)
boltClient.Update(func(tx *bolt.Tx) error {
err = tx.Bucket([]byte("historicData")).Put([]byte(date), []byte(m1json))
errLog(err)
err = tx.Bucket([]byte("historicData")).Put([]byte("current"), []byte(m1json))
errLog(err)
err = tx.Bucket([]byte("historicData")).Put([]byte("IPs"), []byte(m2json))
errLog(err)
return nil
})
fmt.Println("end:", time.Now())
}
}
//checkForRecords is used to see if a BoltDB database is present in the file system,
//and if it is then to load the IP and pageview sets into program memory.
func checkForRecords() {
if _, err := os.Stat("viewCounter.db"); err == nil {
log.Println("viewCount.db database already exists; processing old entries")
boltClient, err := bolt.Open("viewCounter.db", 0600, nil) //maybe change the 600 to a read only value
if err != nil {
log.Fatal(err)
}
defer boltClient.Close()
var b1, b2 []byte
boltClient.View(func(tx *bolt.Tx) error {
// Set the value "bar" for the key "foo".
b1 = tx.Bucket([]byte("historicData")).Get([]byte("current"))
errLog(err)
b2 = tx.Bucket([]byte("historicData")).Get([]byte("IPs"))
errLog(err)
return nil
})
var mjson1 SavePoint
err = json.Unmarshal(b1, &mjson1)
errLog(err)
for k, v := range mjson1.PageCounts {
counter.m[k] = v
}
var mjson2 IPList
err = json.Unmarshal(b2, &mjson2)
errLog(err)
for k := range mjson2.IPs {
ips.m[k] = true
}
} else {
log.Println("viewCount.db not present; creating database")
}
}
func errLog(err error) {
if err != nil {
log.Print(err)
}
}
updated with correct mothod names and docs
package viewLib
import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"sync"
"time"
"github.com/boltdb/bolt"
)
//counter is an instance of a [pageName]pageView hash map. This is implemented with a
//mutex RW lock to stop goroutine data races
var counter = struct {
sync.RWMutex
m map[string]int
}{m: make(map[string]int)}
//ips is an instance of a [ipAdress]bool hash map. We don't care about the bool,
//using a hash map in this case just for the IP Key, as it offers a
//easy implementation on a set with quick insertion. This struct has a
//mutex RW lock to stop goroutine data races
var ips = struct {
sync.RWMutex
m map[string]bool
}{m: make(map[string]bool)}
//IPList struct is used to marshal/unmarshal IP visitor data into JSON
//to be sent to current storage
type IPList struct {
IPs map[string]bool
}
//SavePoint struct is used to marshal/unmarshal pageview data into JSON
//to be sent to current and historic storage
type SavePoint struct {
PageCounts map[string]int
UniqueViews int
}
//Init checks checks for previos data, sets up multithreading and then
//initiates the HTTP server
func Init() {
//checks for present DB storage and loads it into memory
checkForRecords()
//start goroutine to periodicly write IP and page view sets to disk
go periodicMemoryWriter()
}
//ViewInc locks the counter and ip set mutexes, writes to both then unlocks
func ViewInc(ip string, page string) {
log.Println(ip + " requests " + page)
counter.Lock()
counter.m[page]++
counter.Unlock()
ips.Lock()
ips.m[ip] = true
ips.Unlock()
}
//periodicMemoryWriter initiates a BoltDB client, sets up a ticker and
//then wrties the IP and pageView maps to on persistant memory via BoltDB.
//This means that in the highly unlikely ;) case that the program crashes,
//a restart will reload the data and your view count won't vanish.
func periodicMemoryWriter() {
//start the bolt client
boltClient, err := bolt.Open("viewCounter.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer boltClient.Close()
//check and create a bucket in bolt to store the data
boltClient.Update(func(tx *bolt.Tx) error {
tx.CreateBucketIfNotExists([]byte("historicData"))
return nil
})
//start a ticker for auto uploading the ips and view count to bolt
//that triggers every ten minutes
ticker := time.NewTicker(time.Minute * 10)
for {
<-ticker.C
log.Println("Tick")
fmt.Println("start:", time.Now())
date := strconv.Itoa((time.Now().YearDay() * 10000) + time.Now().Year())
fmt.Println(date)
counter.RLock()
ips.RLock()
m1 := SavePoint{
PageCounts: counter.m,
UniqueViews: len(ips.m),
}
m2 := IPList{
IPs: ips.m,
}
counter.RUnlock()
ips.RUnlock()
m1json, err := json.Marshal(m1)
errLog(err)
m2json, err := json.Marshal(m2)
errLog(err)
boltClient.Update(func(tx *bolt.Tx) error {
err = tx.Bucket([]byte("historicData")).Put([]byte(date), []byte(m1json))
errLog(err)
err = tx.Bucket([]byte("historicData")).Put([]byte("current"), []byte(m1json))
errLog(err)
err = tx.Bucket([]byte("historicData")).Put([]byte("IPs"), []byte(m2json))
errLog(err)
return nil
})
fmt.Println("end:", time.Now())
}
}
//checkForRecords is used to see if a BoltDB database is present in the file system,
//and if it is then to load the IP and pageview sets into program memory.
func checkForRecords() {
if _, err := os.Stat("viewCounter.db"); err == nil {
log.Println("viewCount.db database already exists; processing old entries")
boltClient, err := bolt.Open("viewCounter.db", 0600, nil) //maybe change the 600 to a read only value
if err != nil {
log.Fatal(err)
}
defer boltClient.Close()
var b1, b2 []byte
boltClient.View(func(tx *bolt.Tx) error {
// Set the value "bar" for the key "foo".
b1 = tx.Bucket([]byte("historicData")).Get([]byte("current"))
errLog(err)
b2 = tx.Bucket([]byte("historicData")).Get([]byte("IPs"))
errLog(err)
return nil
})
var mjson1 SavePoint
err = json.Unmarshal(b1, &mjson1)
errLog(err)
for k, v := range mjson1.PageCounts {
counter.m[k] = v
}
var mjson2 IPList
err = json.Unmarshal(b2, &mjson2)
errLog(err)
for k := range mjson2.IPs {
ips.m[k] = true
}
} else {
log.Println("viewCount.db not present; creating database")
}
}
func errLog(err error) {
if err != nil {
log.Print(err)
}
}
|
package discoverd
import (
"fmt"
"os"
"strings"
"sync"
"sync/atomic"
"time"
hh "github.com/flynn/flynn/pkg/httphelper"
)
// EnvInstanceMeta are environment variables which will be automatically added
// to instance metadata if present.
var EnvInstanceMeta = map[string]struct{}{
"FLYNN_APP_ID": {},
"FLYNN_RELEASE_ID": {},
"FLYNN_PROCESS_TYPE": {},
"FLYNN_JOB_ID": {},
}
type Heartbeater interface {
SetMeta(map[string]string) error
Close() error
Addr() string
SetClient(*Client)
}
func (c *Client) maybeAddService(service string) error {
if err := c.AddService(service, nil); err != nil {
if !hh.IsObjectExistsError(err) {
return err
}
}
return nil
}
func (c *Client) AddServiceAndRegister(service, addr string) (Heartbeater, error) {
if err := c.maybeAddService(service); err != nil {
return nil, err
}
return c.Register(service, addr)
}
func (c *Client) AddServiceAndRegisterInstance(service string, inst *Instance) (Heartbeater, error) {
if err := c.maybeAddService(service); err != nil {
return nil, err
}
return c.RegisterInstance(service, inst)
}
func (c *Client) Register(service, addr string) (Heartbeater, error) {
return c.RegisterInstance(service, &Instance{Addr: addr})
}
func (c *Client) RegisterInstance(service string, inst *Instance) (Heartbeater, error) {
inst.Addr = expandAddr(inst.Addr)
if inst.Proto == "" {
inst.Proto = "tcp"
}
inst.ID = inst.id()
// add EnvInstanceMeta if present
for _, env := range os.Environ() {
kv := strings.SplitN(env, "=", 2)
if _, ok := EnvInstanceMeta[kv[0]]; !ok {
continue
}
if inst.Meta == nil {
inst.Meta = make(map[string]string)
}
inst.Meta[kv[0]] = kv[1]
}
h := newHeartbeater(c, service, inst)
firstErr := make(chan error)
go h.run(firstErr)
return h, <-firstErr
}
func newHeartbeater(c *Client, service string, inst *Instance) *heartbeater {
h := &heartbeater{
service: service,
stop: make(chan struct{}),
done: make(chan struct{}),
inst: inst.Clone(),
}
h.c.Store(c)
return h
}
type heartbeater struct {
c atomic.Value // *Client
stop chan struct{}
done chan struct{}
// Mutex protects inst.Meta
sync.Mutex
inst *Instance
service string
closeOnce sync.Once
}
func (h *heartbeater) Close() error {
h.closeOnce.Do(func() {
close(h.stop)
<-h.done
})
return nil
}
func (h *heartbeater) SetMeta(meta map[string]string) error {
h.Lock()
defer h.Unlock()
h.inst.Meta = meta
return h.client().c.Put(fmt.Sprintf("/services/%s/instances/%s", h.service, h.inst.ID), h.inst, nil)
}
func (h *heartbeater) Addr() string {
return h.inst.Addr
}
func (h *heartbeater) SetClient(c *Client) {
h.c.Store(c)
}
func (h *heartbeater) client() *Client {
return h.c.Load().(*Client)
}
const (
heartbeatInterval = 5 * time.Second
heartbeatFailingInterval = 200 * time.Millisecond
)
func (h *heartbeater) run(firstErr chan<- error) {
path := fmt.Sprintf("/services/%s/instances/%s", h.service, h.inst.ID)
register := func() error {
h.Lock()
defer h.Unlock()
return h.client().c.Put(path, h.inst, nil)
}
err := register()
firstErr <- err
if err != nil {
return
}
timer := time.NewTimer(heartbeatInterval)
for {
select {
case <-timer.C:
if err := register(); err != nil {
h.client().Logger.Error("heartbeat failed", "service", h.service, "addr", h.inst.Addr, "err", err)
timer.Reset(heartbeatFailingInterval)
break
}
timer.Reset(heartbeatInterval)
case <-h.stop:
h.client().c.Delete(path)
close(h.done)
return
}
}
}
func expandAddr(addr string) string {
if addr[0] == ':' {
return os.Getenv("EXTERNAL_IP") + addr
}
return addr
}
discoverd/client: Don’t return heartbeater if error is encountered
This avoids mistakes where the heartbeater is assigned somewhere and
later checked if nil.
Signed-off-by: Jonathan Rudenberg <3692bfa45759a67d83aedf0045f6cb635a966abf@titanous.com>
package discoverd
import (
"fmt"
"os"
"strings"
"sync"
"sync/atomic"
"time"
hh "github.com/flynn/flynn/pkg/httphelper"
)
// EnvInstanceMeta are environment variables which will be automatically added
// to instance metadata if present.
var EnvInstanceMeta = map[string]struct{}{
"FLYNN_APP_ID": {},
"FLYNN_RELEASE_ID": {},
"FLYNN_PROCESS_TYPE": {},
"FLYNN_JOB_ID": {},
}
type Heartbeater interface {
SetMeta(map[string]string) error
Close() error
Addr() string
SetClient(*Client)
}
func (c *Client) maybeAddService(service string) error {
if err := c.AddService(service, nil); err != nil {
if !hh.IsObjectExistsError(err) {
return err
}
}
return nil
}
func (c *Client) AddServiceAndRegister(service, addr string) (Heartbeater, error) {
if err := c.maybeAddService(service); err != nil {
return nil, err
}
return c.Register(service, addr)
}
func (c *Client) AddServiceAndRegisterInstance(service string, inst *Instance) (Heartbeater, error) {
if err := c.maybeAddService(service); err != nil {
return nil, err
}
return c.RegisterInstance(service, inst)
}
func (c *Client) Register(service, addr string) (Heartbeater, error) {
return c.RegisterInstance(service, &Instance{Addr: addr})
}
func (c *Client) RegisterInstance(service string, inst *Instance) (Heartbeater, error) {
inst.Addr = expandAddr(inst.Addr)
if inst.Proto == "" {
inst.Proto = "tcp"
}
inst.ID = inst.id()
// add EnvInstanceMeta if present
for _, env := range os.Environ() {
kv := strings.SplitN(env, "=", 2)
if _, ok := EnvInstanceMeta[kv[0]]; !ok {
continue
}
if inst.Meta == nil {
inst.Meta = make(map[string]string)
}
inst.Meta[kv[0]] = kv[1]
}
h := newHeartbeater(c, service, inst)
firstErr := make(chan error)
go h.run(firstErr)
if err := <-firstErr; err != nil {
return nil, err
}
return h, nil
}
func newHeartbeater(c *Client, service string, inst *Instance) *heartbeater {
h := &heartbeater{
service: service,
stop: make(chan struct{}),
done: make(chan struct{}),
inst: inst.Clone(),
}
h.c.Store(c)
return h
}
type heartbeater struct {
c atomic.Value // *Client
stop chan struct{}
done chan struct{}
// Mutex protects inst.Meta
sync.Mutex
inst *Instance
service string
closeOnce sync.Once
}
func (h *heartbeater) Close() error {
h.closeOnce.Do(func() {
close(h.stop)
<-h.done
})
return nil
}
func (h *heartbeater) SetMeta(meta map[string]string) error {
h.Lock()
defer h.Unlock()
h.inst.Meta = meta
return h.client().c.Put(fmt.Sprintf("/services/%s/instances/%s", h.service, h.inst.ID), h.inst, nil)
}
func (h *heartbeater) Addr() string {
return h.inst.Addr
}
func (h *heartbeater) SetClient(c *Client) {
h.c.Store(c)
}
func (h *heartbeater) client() *Client {
return h.c.Load().(*Client)
}
const (
heartbeatInterval = 5 * time.Second
heartbeatFailingInterval = 200 * time.Millisecond
)
func (h *heartbeater) run(firstErr chan<- error) {
path := fmt.Sprintf("/services/%s/instances/%s", h.service, h.inst.ID)
register := func() error {
h.Lock()
defer h.Unlock()
return h.client().c.Put(path, h.inst, nil)
}
err := register()
firstErr <- err
if err != nil {
return
}
timer := time.NewTimer(heartbeatInterval)
for {
select {
case <-timer.C:
if err := register(); err != nil {
h.client().Logger.Error("heartbeat failed", "service", h.service, "addr", h.inst.Addr, "err", err)
timer.Reset(heartbeatFailingInterval)
break
}
timer.Reset(heartbeatInterval)
case <-h.stop:
h.client().c.Delete(path)
close(h.done)
return
}
}
}
func expandAddr(addr string) string {
if addr[0] == ':' {
return os.Getenv("EXTERNAL_IP") + addr
}
return addr
}
|
package zog
import (
"fmt"
"strings"
)
type Instruction interface {
String() string
Encode() []byte
}
type LD8 struct {
InstBin8
}
func NewLD8(dst Loc8, src Loc8) *LD8 {
return &LD8{InstBin8{dst: dst, src: src}}
}
func (l *LD8) String() string {
return fmt.Sprintf("LD %s, %s", l.dst, l.src)
}
func (l *LD8) Encode() []byte {
// ED special cases
switch true {
case l.dst == I && l.src == A:
return []byte{0xed, 0x47}
case l.dst == A && l.src == I:
return []byte{0xed, 0x57}
case l.dst == R && l.src == A:
return []byte{0xed, 0x4f}
case l.dst == A && l.src == R:
return []byte{0xed, 0x5f}
}
l.inspect()
switch l.dstInfo.ltype {
case BCDEContents:
// LD (BC), A or LD (DE), A
p := byte(1)
if l.srcInfo.isBC {
p = 0
}
buf := []byte{encodeXPQZ(0, p, 0, 2)}
return buf
case ImmediateContents:
// LD (nn), A
buf := []byte{encodeXPQZ(0, 3, 0, 2)}
buf = append(buf, l.dstInfo.imm16...)
return buf
}
if l.dstInfo.ltype != tableR {
panic("Non-tableR dst in LD8")
}
switch l.srcInfo.ltype {
case tableR:
b := encodeXYZ(1, l.dstInfo.idxTable, l.srcInfo.idxTable)
fmt.Printf("JB x %d y %d z %d: b %02X\n", 1, l.dstInfo.idxTable, l.srcInfo.idxTable, b)
return idxEncodeHelper([]byte{b}, l.idx)
case Immediate:
b := encodeXYZ(0, l.dstInfo.idxTable, 6)
return idxEncodeHelper([]byte{b, l.srcInfo.imm8}, l.idx)
case BCDEContents:
// LD A, (BC) or LD A, (DE)
p := byte(1)
if l.srcInfo.isBC {
p = 0
}
b := encodeXPQZ(0, p, 1, 2)
return []byte{b}
case ImmediateContents:
// LD A, (nn)
buf := []byte{encodeXPQZ(0, 3, 1, 2)}
buf = append(buf, l.srcInfo.imm16...)
return buf
default:
panic("Unknown src type in LD8")
}
}
type INC8 struct {
InstU8
}
func NewINC8(l Loc8) *INC8 {
return &INC8{InstU8{l: l}}
}
func (i *INC8) String() string {
return fmt.Sprintf("INC %s", i.l)
}
func (i *INC8) Encode() []byte {
i.inspect()
if i.lInfo.ltype != tableR {
panic("Non-tableR INC8")
}
b := encodeXYZ(0, i.lInfo.idxTable, 4)
return idxEncodeHelper([]byte{b}, i.idx)
}
type DEC8 struct {
InstU8
}
func NewDEC8(l Loc8) *DEC8 {
return &DEC8{InstU8{l: l}}
}
func (d *DEC8) String() string {
return fmt.Sprintf("DEC %s", d.l)
}
func (d *DEC8) Encode() []byte {
d.inspect()
if d.lInfo.ltype != tableR {
panic("Non-tableR DEC8")
}
b := encodeXYZ(0, d.lInfo.idxTable, 5)
return idxEncodeHelper([]byte{b}, d.idx)
}
type LD16 struct {
InstBin16
}
func NewLD16(dst, src Loc16) *LD16 {
return &LD16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (l *LD16) String() string {
return fmt.Sprintf("LD %s, %s", l.dst, l.src)
}
func (l *LD16) Encode() []byte {
l.inspect()
switch l.dstInfo.ltype {
case ImmediateContents:
// LD (nn), HL has multiple encodings, we choose the non-ED one
if l.srcInfo.isHLLike() {
buf := []byte{encodeXPQZ(0, 2, 0, 2)}
buf = append(buf, l.dstInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
} else {
if l.srcInfo.ltype != tableRP {
panic("Non-tableRP src in LD16 (NN), src")
}
buf := []byte{0xed, encodeXPQZ(1, l.srcInfo.idxTable, 0, 3)}
buf = append(buf, l.dstInfo.imm16...)
return buf
}
}
if l.dstInfo.ltype != tableRP {
panic("Non-tableRP dst in LD16")
}
switch l.srcInfo.ltype {
case Immediate:
buf := []byte{encodeXPQZ(0, l.dstInfo.idxTable, 0, 1)}
buf = append(buf, l.srcInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
case ImmediateContents:
// LD HL, (nn) has multiple encodings
if l.dstInfo.isHLLike() {
buf := []byte{encodeXPQZ(0, 2, 1, 2)}
buf = append(buf, l.srcInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
} else {
if l.dstInfo.ltype != tableRP {
panic("Non-tableRP src in LD16 (NN), src")
}
buf := []byte{0xed, encodeXPQZ(1, l.dstInfo.idxTable, 1, 3)}
buf = append(buf, l.srcInfo.imm16...)
return buf
}
case tableRP:
if l.srcInfo.isHLLike() {
if l.dst != SP {
panic("HL-like load to non-SP")
}
buf := []byte{encodeXPQZ(3, 3, 1, 1)}
return idxEncodeHelper(buf, l.idx)
} else {
panic("Non-HL like load to something")
}
default:
panic("Unknown src type in LD16")
}
}
type ADD16 struct {
InstBin16
}
func NewADD16(dst, src Loc16) *ADD16 {
return &ADD16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (a *ADD16) String() string {
return fmt.Sprintf("ADD %s, %s", a.dst, a.src)
}
func (a *ADD16) Encode() []byte {
a.inspect()
if a.dstInfo.ltype != tableRP {
panic("Non-tableRP dst in ADD16")
}
if a.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADD16")
}
// TODO: support other ADD16
if !a.dstInfo.isHLLike() {
panic("Non-HL dst in ADD16")
}
switch a.srcInfo.ltype {
case tableRP:
buf := []byte{encodeXPQZ(0, a.srcInfo.idxTable, 1, 1)}
return idxEncodeHelper(buf, a.idx)
default:
panic("Unknown src type in ADD16")
}
}
type ADC16 struct {
InstBin16
}
func NewADC16(dst, src Loc16) *ADC16 {
return &ADC16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (a *ADC16) String() string {
return fmt.Sprintf("ADC %s, %s", a.dst, a.src)
}
func (a *ADC16) Encode() []byte {
a.inspect()
if a.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADC16")
}
buf := []byte{0xed, encodeXPQZ(1, a.srcInfo.idxTable, 1, 2)}
return idxEncodeHelper(buf, a.idx)
}
type SBC16 struct {
InstBin16
}
func NewSBC16(dst, src Loc16) *SBC16 {
return &SBC16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (s *SBC16) String() string {
return fmt.Sprintf("SBC %s, %s", s.dst, s.src)
}
func (s *SBC16) Encode() []byte {
s.inspect()
if s.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADC16")
}
buf := []byte{0xed, encodeXPQZ(1, s.srcInfo.idxTable, 0, 2)}
return idxEncodeHelper(buf, s.idx)
}
type INC16 struct {
InstU16
}
func NewINC16(l Loc16) *INC16 {
return &INC16{InstU16{l: l}}
}
func (i *INC16) String() string {
return fmt.Sprintf("INC %s", i.l)
}
func (i *INC16) Encode() []byte {
i.inspect()
if i.lInfo.ltype != tableRP {
panic("Non-tableRP INC16")
}
b := encodeXPQZ(0, i.lInfo.idxTable, 0, 3)
return idxEncodeHelper([]byte{b}, i.idx)
}
type DEC16 struct {
InstU16
}
func NewDEC16(l Loc16) *DEC16 {
return &DEC16{InstU16{l: l}}
}
func (d *DEC16) String() string {
return fmt.Sprintf("DEC %s", d.l)
}
func (d *DEC16) Encode() []byte {
d.inspect()
if d.lInfo.ltype != tableRP {
panic("Non-tableRP DEC16")
}
b := encodeXPQZ(0, d.lInfo.idxTable, 1, 3)
return idxEncodeHelper([]byte{b}, d.idx)
}
type EX struct {
InstBin16
}
func NewEX(dst, src Loc16) *EX {
return &EX{InstBin16: InstBin16{dst: dst, src: src}}
}
func (ex *EX) String() string {
return fmt.Sprintf("EX %s, %s", ex.dst, ex.src)
}
func (ex *EX) Encode() []byte {
if ex.dst == AF && ex.src == AF_PRIME {
return []byte{0x08}
} else if ex.dst.String() == (Contents{SP}).String() {
var info loc16Info
var idx idxInfo
inspectLoc16(ex.src, &info, &idx, false)
buf := []byte{encodeXYZ(3, 4, 3)}
return idxEncodeHelper(buf, idx)
} else if ex.dst == DE && ex.src == HL {
// EX DE,HL is an excpetion to the IX/IY rule
return []byte{encodeXYZ(3, 5, 3)}
}
panic("Unrecognised EX instruction")
}
type DJNZ struct {
d Disp
}
func (d *DJNZ) String() string {
return fmt.Sprintf("DJNZ %s", d.d)
}
func (d *DJNZ) Encode() []byte {
b := encodeXYZ(0, 2, 0)
return []byte{b, byte(d.d)}
}
type JR struct {
c Conditional
d Disp
}
func (j *JR) String() string {
if j.c == True || j.c == nil {
return fmt.Sprintf("JR %s", j.d)
} else {
return fmt.Sprintf("JR %s, %s", j.c, j.d)
}
}
func (j *JR) Encode() []byte {
var y byte
if j.c == True || j.c == nil {
y = 3
} else {
y = findInTableCC(j.c)
y += 4
}
b := encodeXYZ(0, y, 0)
return []byte{b, byte(j.d)}
}
type JP struct {
InstU16
c Conditional
}
func NewJP(c Conditional, l Loc16) *JP {
return &JP{InstU16: InstU16{l: l}, c: c}
}
func (jp *JP) String() string {
if jp.c == True || jp.c == nil {
return fmt.Sprintf("JP %s", jp.l)
} else {
return fmt.Sprintf("JP %s, %s", jp.c, jp.l)
}
}
func (jp *JP) Encode() []byte {
jp.inspect()
if jp.c == True || jp.c == nil {
if jp.lInfo.isHLLike() {
buf := []byte{encodeXPQZ(3, 2, 1, 1)}
return idxEncodeHelper(buf, jp.idx)
}
}
if jp.lInfo.ltype != Immediate {
panic("Non-immediate (or direct HL-like) JP")
}
var buf []byte
if jp.c == True || jp.c == nil {
buf = []byte{encodeXYZ(3, 0, 3)}
} else {
y := findInTableCC(jp.c)
buf = []byte{encodeXYZ(3, y, 2)}
}
buf = append(buf, jp.lInfo.imm16...)
return buf
}
type CALL struct {
InstU16
c Conditional
}
func NewCALL(c Conditional, l Loc16) *CALL {
return &CALL{InstU16: InstU16{l: l}, c: c}
}
func (c *CALL) String() string {
if c.c == True || c.c == nil {
return fmt.Sprintf("CALL %s", c.l)
} else {
return fmt.Sprintf("CALL %s, %s", c.c, c.l)
}
}
func (c *CALL) Encode() []byte {
c.inspect()
var buf []byte
if c.c == nil || c.c == True {
buf = []byte{encodeXPQZ(3, 0, 1, 5)}
} else {
y := findInTableCC(c.c)
buf = []byte{encodeXYZ(3, y, 4)}
}
buf = append(buf, c.lInfo.imm16...)
return buf
}
type OUT struct {
port Loc8
value Loc8
}
func (o *OUT) String() string {
return fmt.Sprintf("OUT (%s), %s", o.port, o.value)
}
func (o *OUT) Encode() []byte {
if o.port == C {
var info loc8Info
var idx idxInfo
inspectLoc8(o.value, &info, &idx)
if info.ltype != tableR {
panic("Non-tableR value in OUT")
}
// (HL)? IX?
return []byte{0xed, encodeXYZ(1, info.idxTable, 1)}
} else {
imm8 := o.port.(Imm8)
return []byte{encodeXYZ(3, 2, 3), byte(imm8)}
}
}
type IN struct {
dst Loc8
port Loc8
}
func (i *IN) String() string {
return fmt.Sprintf("IN %s, (%s)", i.dst, i.port)
}
func (i *IN) Encode() []byte {
if i.port == C {
var info loc8Info
var idx idxInfo
inspectLoc8(i.dst, &info, &idx)
if info.ltype != tableR {
panic("Non-tableR dst in IN")
}
// (HL)? IX?
return []byte{0xed, encodeXYZ(1, info.idxTable, 0)}
} else {
imm8 := i.port.(Imm8)
return []byte{encodeXYZ(3, 3, 3), byte(imm8)}
}
}
type PUSH struct {
InstU16
}
func NewPUSH(l Loc16) *PUSH {
return &PUSH{InstU16{l: l}}
}
func (p *PUSH) String() string {
return fmt.Sprintf("PUSH %s", p.l)
}
func (p *PUSH) Encode() []byte {
p.inspectRP2()
if p.lInfo.ltype != tableRP2 {
panic("Non-tableRP PUSH")
}
buf := []byte{encodeXPQZ(3, p.lInfo.idxTable, 0, 5)}
return idxEncodeHelper(buf, p.idx)
}
type POP struct {
InstU16
}
func NewPOP(l Loc16) *POP {
return &POP{InstU16{l: l}}
}
func (p *POP) String() string {
return fmt.Sprintf("POP %s", p.l)
}
func (p *POP) Encode() []byte {
p.inspectRP2()
if p.lInfo.ltype != tableRP2 {
panic("Non-tableRP PUSH")
}
buf := []byte{encodeXPQZ(3, p.lInfo.idxTable, 0, 1)}
return idxEncodeHelper(buf, p.idx)
}
type RST struct {
addr byte
}
func (r *RST) String() string {
return fmt.Sprintf("RST %d", r.addr)
}
func (r *RST) Encode() []byte {
y := r.addr / 8
return []byte{encodeXYZ(3, y, 7)}
}
type RET struct {
c Conditional
}
func (r *RET) String() string {
if r.c == True || r.c == nil {
return "RET"
} else {
return fmt.Sprintf("RET %s", r.c)
}
}
func (r *RET) Encode() []byte {
if r.c == True || r.c == nil {
return []byte{encodeXPQZ(3, 0, 1, 1)}
}
y := findInTableCC(r.c)
return []byte{encodeXYZ(3, y, 0)}
}
func NewAccum(name string, l Loc8) *accum {
// TODO: lookup func by name, panic on unknown
return &accum{name: name, InstU8: InstU8{l: l}}
}
type accumFunc func(a, b byte) byte
type accum struct {
// f AccumFunc
InstU8
name string
}
func (a accum) String() string {
switch a.name {
case "ADD", "ADC", "SBC":
return fmt.Sprintf("%s A, %s", a.name, a.l)
default:
return fmt.Sprintf("%s %s", a.name, a.l)
}
}
func (a accum) Encode() []byte {
a.inspect()
y := findInTableALU(a.name)
var buf []byte
switch a.lInfo.ltype {
case tableR:
buf = []byte{encodeXYZ(2, y, a.lInfo.idxTable)}
case Immediate:
buf = []byte{encodeXYZ(3, y, 6)}
buf = append(buf, a.lInfo.imm8)
default:
panic("Unknown accum location type")
}
return idxEncodeHelper(buf, a.idx)
}
type rot struct {
InstU8
name string
}
func NewRot(name string, l Loc8) *rot {
return &rot{InstU8: InstU8{l: l}, name: name}
}
func (r *rot) String() string {
return fmt.Sprintf("%s %s", r.name, r.l)
}
func (r *rot) Encode() []byte {
r.inspect()
if r.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
y := findInTableROT(r.name)
buf := []byte{0xcb, encodeXYZ(0, y, r.lInfo.idxTable)}
return idxEncodeHelper(buf, r.idx)
}
type BIT struct {
InstU8
num byte
}
func NewBIT(num byte, l Loc8) *BIT {
return &BIT{InstU8: InstU8{l: l}, num: num}
}
func (b *BIT) String() string {
return fmt.Sprintf("BIT %d, %s", b.num, b.l)
}
func (b *BIT) Encode() []byte {
b.inspect()
if b.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(1, b.num, b.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, b.idx)
}
type RES struct {
InstU8
num byte
}
func NewRES(num byte, l Loc8) *RES {
return &RES{InstU8: InstU8{l: l}, num: num}
}
func (r *RES) String() string {
return fmt.Sprintf("RES %d, %s", r.num, r.l)
}
func (r *RES) Encode() []byte {
r.inspect()
if r.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(2, r.num, r.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, r.idx)
}
type SET struct {
InstU8
num byte
}
func NewSET(num byte, l Loc8) *SET {
return &SET{InstU8: InstU8{l: l}, num: num}
}
func (s *SET) String() string {
return fmt.Sprintf("SET %d, %s", s.num, s.l)
}
func (s *SET) Encode() []byte {
s.inspect()
if s.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(3, s.num, s.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, s.idx)
}
type Simple byte
const (
NOP Simple = 0x00
HALT Simple = 0x76
RLCA Simple = 0x07
RRCA Simple = 0x0f
RLA Simple = 0x17
RRA Simple = 0x1f
DAA Simple = 0x27
CPL Simple = 0x2f
SCF Simple = 0x37
CCF Simple = 0x3f
EXX Simple = 0xd9
DI Simple = 0xf3
EI Simple = 0xfb
)
type simpleName struct {
inst Simple
name string
}
var simpleNames []simpleName = []simpleName{
{NOP, "NOP"},
{HALT, "HALT"},
{RLCA, "RLCA"},
{RRCA, "RRCA"},
{RLA, "RLA"},
{RRA, "RRA"},
{DAA, "DAA"},
{CPL, "CPL"},
{SCF, "SCF"},
{CCF, "CCF"},
{EXX, "EXX"},
{DI, "DI"},
{EI, "EI"},
}
func (s Simple) String() string {
for _, simpleName := range simpleNames {
if simpleName.inst == s {
return simpleName.name
}
}
panic(fmt.Sprintf("Unknown simple instruction: %02X", byte(s)))
}
func (s Simple) Encode() []byte {
return []byte{byte(s)}
}
func LookupSimpleName(name string) Simple {
name = strings.ToUpper(name)
for _, simpleName := range simpleNames {
if simpleName.name == name {
return simpleName.inst
}
}
panic(fmt.Errorf("Unrecognised Simple instruction name : [%s]", name))
}
type EDSimple byte
const (
NEG EDSimple = 0x44
RETN EDSimple = 0x45
RETI EDSimple = 0x4d
RRD EDSimple = 0x67
RLD EDSimple = 0x6f
IM0 EDSimple = 0x46
IM1 EDSimple = 0x56
IM2 EDSimple = 0x5e
LDI EDSimple = 0xa0
CPI EDSimple = 0xa1
LDD EDSimple = 0xa8
CPD EDSimple = 0xa9
LDIR EDSimple = 0xb0
CPIR EDSimple = 0xb1
LDDR EDSimple = 0xb8
CPDR EDSimple = 0xb9
INI EDSimple = 0xa2
OUTI EDSimple = 0xa3
IND EDSimple = 0xaa
OUTD EDSimple = 0xab
INIR EDSimple = 0xb2
OTIR EDSimple = 0xb3
INDR EDSimple = 0xba
OTDR EDSimple = 0xbb
)
type edSimpleName struct {
inst EDSimple
name string
}
var EDSimpleNames []edSimpleName = []edSimpleName{
{NEG, "NEG"},
{RETN, "RETN"},
{RETI, "RETI"},
{RRD, "RRD"},
{RLD, "RLD"},
{IM0, "IM 0"},
{IM1, "IM 1"},
{IM2, "IM 2"},
{LDI, "LDI"},
{CPI, "CPI"},
{LDD, "LDD"},
{CPD, "CPD"},
{LDIR, "LDIR"},
{CPIR, "CPIR"},
{LDDR, "LDDR"},
{CPDR, "CPDR"},
{INI, "INI"},
{OUTI, "OUTI"},
{IND, "IND"},
{OUTD, "OUTD"},
{INIR, "INIR"},
{OTIR, "OTIR"},
{INDR, "INDR"},
{OTDR, "OTDR"},
}
func (s EDSimple) String() string {
for _, simpleName := range EDSimpleNames {
if simpleName.inst == s {
return simpleName.name
}
}
panic(fmt.Sprintf("Unknown EDSimple instruction: %02X", byte(s)))
}
func (s EDSimple) Encode() []byte {
return []byte{0xed, byte(s)}
}
func LookupEDSimpleName(name string) EDSimple {
name = strings.ToUpper(name)
for _, simpleName := range EDSimpleNames {
if simpleName.name == name {
return simpleName.inst
}
}
panic(fmt.Errorf("Unrecognised EDSimple instruction name : [%s]", name))
}
handle IN F,(c) exception
package zog
import (
"fmt"
"strings"
)
type Instruction interface {
String() string
Encode() []byte
}
type LD8 struct {
InstBin8
}
func NewLD8(dst Loc8, src Loc8) *LD8 {
return &LD8{InstBin8{dst: dst, src: src}}
}
func (l *LD8) String() string {
return fmt.Sprintf("LD %s, %s", l.dst, l.src)
}
func (l *LD8) Encode() []byte {
// ED special cases
switch true {
case l.dst == I && l.src == A:
return []byte{0xed, 0x47}
case l.dst == A && l.src == I:
return []byte{0xed, 0x57}
case l.dst == R && l.src == A:
return []byte{0xed, 0x4f}
case l.dst == A && l.src == R:
return []byte{0xed, 0x5f}
}
l.inspect()
switch l.dstInfo.ltype {
case BCDEContents:
// LD (BC), A or LD (DE), A
p := byte(1)
if l.srcInfo.isBC {
p = 0
}
buf := []byte{encodeXPQZ(0, p, 0, 2)}
return buf
case ImmediateContents:
// LD (nn), A
buf := []byte{encodeXPQZ(0, 3, 0, 2)}
buf = append(buf, l.dstInfo.imm16...)
return buf
}
if l.dstInfo.ltype != tableR {
panic("Non-tableR dst in LD8")
}
switch l.srcInfo.ltype {
case tableR:
b := encodeXYZ(1, l.dstInfo.idxTable, l.srcInfo.idxTable)
fmt.Printf("JB x %d y %d z %d: b %02X\n", 1, l.dstInfo.idxTable, l.srcInfo.idxTable, b)
return idxEncodeHelper([]byte{b}, l.idx)
case Immediate:
b := encodeXYZ(0, l.dstInfo.idxTable, 6)
return idxEncodeHelper([]byte{b, l.srcInfo.imm8}, l.idx)
case BCDEContents:
// LD A, (BC) or LD A, (DE)
p := byte(1)
if l.srcInfo.isBC {
p = 0
}
b := encodeXPQZ(0, p, 1, 2)
return []byte{b}
case ImmediateContents:
// LD A, (nn)
buf := []byte{encodeXPQZ(0, 3, 1, 2)}
buf = append(buf, l.srcInfo.imm16...)
return buf
default:
panic("Unknown src type in LD8")
}
}
type INC8 struct {
InstU8
}
func NewINC8(l Loc8) *INC8 {
return &INC8{InstU8{l: l}}
}
func (i *INC8) String() string {
return fmt.Sprintf("INC %s", i.l)
}
func (i *INC8) Encode() []byte {
i.inspect()
if i.lInfo.ltype != tableR {
panic("Non-tableR INC8")
}
b := encodeXYZ(0, i.lInfo.idxTable, 4)
return idxEncodeHelper([]byte{b}, i.idx)
}
type DEC8 struct {
InstU8
}
func NewDEC8(l Loc8) *DEC8 {
return &DEC8{InstU8{l: l}}
}
func (d *DEC8) String() string {
return fmt.Sprintf("DEC %s", d.l)
}
func (d *DEC8) Encode() []byte {
d.inspect()
if d.lInfo.ltype != tableR {
panic("Non-tableR DEC8")
}
b := encodeXYZ(0, d.lInfo.idxTable, 5)
return idxEncodeHelper([]byte{b}, d.idx)
}
type LD16 struct {
InstBin16
}
func NewLD16(dst, src Loc16) *LD16 {
return &LD16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (l *LD16) String() string {
return fmt.Sprintf("LD %s, %s", l.dst, l.src)
}
func (l *LD16) Encode() []byte {
l.inspect()
switch l.dstInfo.ltype {
case ImmediateContents:
// LD (nn), HL has multiple encodings, we choose the non-ED one
if l.srcInfo.isHLLike() {
buf := []byte{encodeXPQZ(0, 2, 0, 2)}
buf = append(buf, l.dstInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
} else {
if l.srcInfo.ltype != tableRP {
panic("Non-tableRP src in LD16 (NN), src")
}
buf := []byte{0xed, encodeXPQZ(1, l.srcInfo.idxTable, 0, 3)}
buf = append(buf, l.dstInfo.imm16...)
return buf
}
}
if l.dstInfo.ltype != tableRP {
panic("Non-tableRP dst in LD16")
}
switch l.srcInfo.ltype {
case Immediate:
buf := []byte{encodeXPQZ(0, l.dstInfo.idxTable, 0, 1)}
buf = append(buf, l.srcInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
case ImmediateContents:
// LD HL, (nn) has multiple encodings
if l.dstInfo.isHLLike() {
buf := []byte{encodeXPQZ(0, 2, 1, 2)}
buf = append(buf, l.srcInfo.imm16...)
return idxEncodeHelper(buf, l.idx)
} else {
if l.dstInfo.ltype != tableRP {
panic("Non-tableRP src in LD16 (NN), src")
}
buf := []byte{0xed, encodeXPQZ(1, l.dstInfo.idxTable, 1, 3)}
buf = append(buf, l.srcInfo.imm16...)
return buf
}
case tableRP:
if l.srcInfo.isHLLike() {
if l.dst != SP {
panic("HL-like load to non-SP")
}
buf := []byte{encodeXPQZ(3, 3, 1, 1)}
return idxEncodeHelper(buf, l.idx)
} else {
panic("Non-HL like load to something")
}
default:
panic("Unknown src type in LD16")
}
}
type ADD16 struct {
InstBin16
}
func NewADD16(dst, src Loc16) *ADD16 {
return &ADD16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (a *ADD16) String() string {
return fmt.Sprintf("ADD %s, %s", a.dst, a.src)
}
func (a *ADD16) Encode() []byte {
a.inspect()
if a.dstInfo.ltype != tableRP {
panic("Non-tableRP dst in ADD16")
}
if a.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADD16")
}
// TODO: support other ADD16
if !a.dstInfo.isHLLike() {
panic("Non-HL dst in ADD16")
}
switch a.srcInfo.ltype {
case tableRP:
buf := []byte{encodeXPQZ(0, a.srcInfo.idxTable, 1, 1)}
return idxEncodeHelper(buf, a.idx)
default:
panic("Unknown src type in ADD16")
}
}
type ADC16 struct {
InstBin16
}
func NewADC16(dst, src Loc16) *ADC16 {
return &ADC16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (a *ADC16) String() string {
return fmt.Sprintf("ADC %s, %s", a.dst, a.src)
}
func (a *ADC16) Encode() []byte {
a.inspect()
if a.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADC16")
}
buf := []byte{0xed, encodeXPQZ(1, a.srcInfo.idxTable, 1, 2)}
return idxEncodeHelper(buf, a.idx)
}
type SBC16 struct {
InstBin16
}
func NewSBC16(dst, src Loc16) *SBC16 {
return &SBC16{InstBin16: InstBin16{dst: dst, src: src}}
}
func (s *SBC16) String() string {
return fmt.Sprintf("SBC %s, %s", s.dst, s.src)
}
func (s *SBC16) Encode() []byte {
s.inspect()
if s.srcInfo.ltype != tableRP {
panic("Non-tableRP src in ADC16")
}
buf := []byte{0xed, encodeXPQZ(1, s.srcInfo.idxTable, 0, 2)}
return idxEncodeHelper(buf, s.idx)
}
type INC16 struct {
InstU16
}
func NewINC16(l Loc16) *INC16 {
return &INC16{InstU16{l: l}}
}
func (i *INC16) String() string {
return fmt.Sprintf("INC %s", i.l)
}
func (i *INC16) Encode() []byte {
i.inspect()
if i.lInfo.ltype != tableRP {
panic("Non-tableRP INC16")
}
b := encodeXPQZ(0, i.lInfo.idxTable, 0, 3)
return idxEncodeHelper([]byte{b}, i.idx)
}
type DEC16 struct {
InstU16
}
func NewDEC16(l Loc16) *DEC16 {
return &DEC16{InstU16{l: l}}
}
func (d *DEC16) String() string {
return fmt.Sprintf("DEC %s", d.l)
}
func (d *DEC16) Encode() []byte {
d.inspect()
if d.lInfo.ltype != tableRP {
panic("Non-tableRP DEC16")
}
b := encodeXPQZ(0, d.lInfo.idxTable, 1, 3)
return idxEncodeHelper([]byte{b}, d.idx)
}
type EX struct {
InstBin16
}
func NewEX(dst, src Loc16) *EX {
return &EX{InstBin16: InstBin16{dst: dst, src: src}}
}
func (ex *EX) String() string {
return fmt.Sprintf("EX %s, %s", ex.dst, ex.src)
}
func (ex *EX) Encode() []byte {
if ex.dst == AF && ex.src == AF_PRIME {
return []byte{0x08}
} else if ex.dst.String() == (Contents{SP}).String() {
var info loc16Info
var idx idxInfo
inspectLoc16(ex.src, &info, &idx, false)
buf := []byte{encodeXYZ(3, 4, 3)}
return idxEncodeHelper(buf, idx)
} else if ex.dst == DE && ex.src == HL {
// EX DE,HL is an excpetion to the IX/IY rule
return []byte{encodeXYZ(3, 5, 3)}
}
panic("Unrecognised EX instruction")
}
type DJNZ struct {
d Disp
}
func (d *DJNZ) String() string {
return fmt.Sprintf("DJNZ %s", d.d)
}
func (d *DJNZ) Encode() []byte {
b := encodeXYZ(0, 2, 0)
return []byte{b, byte(d.d)}
}
type JR struct {
c Conditional
d Disp
}
func (j *JR) String() string {
if j.c == True || j.c == nil {
return fmt.Sprintf("JR %s", j.d)
} else {
return fmt.Sprintf("JR %s, %s", j.c, j.d)
}
}
func (j *JR) Encode() []byte {
var y byte
if j.c == True || j.c == nil {
y = 3
} else {
y = findInTableCC(j.c)
y += 4
}
b := encodeXYZ(0, y, 0)
return []byte{b, byte(j.d)}
}
type JP struct {
InstU16
c Conditional
}
func NewJP(c Conditional, l Loc16) *JP {
return &JP{InstU16: InstU16{l: l}, c: c}
}
func (jp *JP) String() string {
if jp.c == True || jp.c == nil {
return fmt.Sprintf("JP %s", jp.l)
} else {
return fmt.Sprintf("JP %s, %s", jp.c, jp.l)
}
}
func (jp *JP) Encode() []byte {
jp.inspect()
if jp.c == True || jp.c == nil {
if jp.lInfo.isHLLike() {
buf := []byte{encodeXPQZ(3, 2, 1, 1)}
return idxEncodeHelper(buf, jp.idx)
}
}
if jp.lInfo.ltype != Immediate {
panic("Non-immediate (or direct HL-like) JP")
}
var buf []byte
if jp.c == True || jp.c == nil {
buf = []byte{encodeXYZ(3, 0, 3)}
} else {
y := findInTableCC(jp.c)
buf = []byte{encodeXYZ(3, y, 2)}
}
buf = append(buf, jp.lInfo.imm16...)
return buf
}
type CALL struct {
InstU16
c Conditional
}
func NewCALL(c Conditional, l Loc16) *CALL {
return &CALL{InstU16: InstU16{l: l}, c: c}
}
func (c *CALL) String() string {
if c.c == True || c.c == nil {
return fmt.Sprintf("CALL %s", c.l)
} else {
return fmt.Sprintf("CALL %s, %s", c.c, c.l)
}
}
func (c *CALL) Encode() []byte {
c.inspect()
var buf []byte
if c.c == nil || c.c == True {
buf = []byte{encodeXPQZ(3, 0, 1, 5)}
} else {
y := findInTableCC(c.c)
buf = []byte{encodeXYZ(3, y, 4)}
}
buf = append(buf, c.lInfo.imm16...)
return buf
}
type OUT struct {
port Loc8
value Loc8
}
func (o *OUT) String() string {
return fmt.Sprintf("OUT (%s), %s", o.port, o.value)
}
func (o *OUT) Encode() []byte {
if o.port == C {
var info loc8Info
var idx idxInfo
inspectLoc8(o.value, &info, &idx)
if info.ltype != tableR {
panic("Non-tableR value in OUT")
}
// (HL)? IX?
return []byte{0xed, encodeXYZ(1, info.idxTable, 1)}
} else {
imm8 := o.port.(Imm8)
return []byte{encodeXYZ(3, 2, 3), byte(imm8)}
}
}
type IN struct {
dst Loc8
port Loc8
}
func (i *IN) String() string {
return fmt.Sprintf("IN %s, (%s)", i.dst, i.port)
}
func (i *IN) Encode() []byte {
if i.port == C {
var y byte
if i.dst == F {
y = 6
} else {
var info loc8Info
var idx idxInfo
inspectLoc8(i.dst, &info, &idx)
if info.ltype != tableR {
panic("Non-tableR dst in IN")
}
y = info.idxTable
}
return []byte{0xed, encodeXYZ(1, y, 0)}
} else {
imm8 := i.port.(Imm8)
return []byte{encodeXYZ(3, 3, 3), byte(imm8)}
}
}
type PUSH struct {
InstU16
}
func NewPUSH(l Loc16) *PUSH {
return &PUSH{InstU16{l: l}}
}
func (p *PUSH) String() string {
return fmt.Sprintf("PUSH %s", p.l)
}
func (p *PUSH) Encode() []byte {
p.inspectRP2()
if p.lInfo.ltype != tableRP2 {
panic("Non-tableRP PUSH")
}
buf := []byte{encodeXPQZ(3, p.lInfo.idxTable, 0, 5)}
return idxEncodeHelper(buf, p.idx)
}
type POP struct {
InstU16
}
func NewPOP(l Loc16) *POP {
return &POP{InstU16{l: l}}
}
func (p *POP) String() string {
return fmt.Sprintf("POP %s", p.l)
}
func (p *POP) Encode() []byte {
p.inspectRP2()
if p.lInfo.ltype != tableRP2 {
panic("Non-tableRP PUSH")
}
buf := []byte{encodeXPQZ(3, p.lInfo.idxTable, 0, 1)}
return idxEncodeHelper(buf, p.idx)
}
type RST struct {
addr byte
}
func (r *RST) String() string {
return fmt.Sprintf("RST %d", r.addr)
}
func (r *RST) Encode() []byte {
y := r.addr / 8
return []byte{encodeXYZ(3, y, 7)}
}
type RET struct {
c Conditional
}
func (r *RET) String() string {
if r.c == True || r.c == nil {
return "RET"
} else {
return fmt.Sprintf("RET %s", r.c)
}
}
func (r *RET) Encode() []byte {
if r.c == True || r.c == nil {
return []byte{encodeXPQZ(3, 0, 1, 1)}
}
y := findInTableCC(r.c)
return []byte{encodeXYZ(3, y, 0)}
}
func NewAccum(name string, l Loc8) *accum {
// TODO: lookup func by name, panic on unknown
return &accum{name: name, InstU8: InstU8{l: l}}
}
type accumFunc func(a, b byte) byte
type accum struct {
// f AccumFunc
InstU8
name string
}
func (a accum) String() string {
switch a.name {
case "ADD", "ADC", "SBC":
return fmt.Sprintf("%s A, %s", a.name, a.l)
default:
return fmt.Sprintf("%s %s", a.name, a.l)
}
}
func (a accum) Encode() []byte {
a.inspect()
y := findInTableALU(a.name)
var buf []byte
switch a.lInfo.ltype {
case tableR:
buf = []byte{encodeXYZ(2, y, a.lInfo.idxTable)}
case Immediate:
buf = []byte{encodeXYZ(3, y, 6)}
buf = append(buf, a.lInfo.imm8)
default:
panic("Unknown accum location type")
}
return idxEncodeHelper(buf, a.idx)
}
type rot struct {
InstU8
name string
}
func NewRot(name string, l Loc8) *rot {
return &rot{InstU8: InstU8{l: l}, name: name}
}
func (r *rot) String() string {
return fmt.Sprintf("%s %s", r.name, r.l)
}
func (r *rot) Encode() []byte {
r.inspect()
if r.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
y := findInTableROT(r.name)
buf := []byte{0xcb, encodeXYZ(0, y, r.lInfo.idxTable)}
return idxEncodeHelper(buf, r.idx)
}
type BIT struct {
InstU8
num byte
}
func NewBIT(num byte, l Loc8) *BIT {
return &BIT{InstU8: InstU8{l: l}, num: num}
}
func (b *BIT) String() string {
return fmt.Sprintf("BIT %d, %s", b.num, b.l)
}
func (b *BIT) Encode() []byte {
b.inspect()
if b.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(1, b.num, b.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, b.idx)
}
type RES struct {
InstU8
num byte
}
func NewRES(num byte, l Loc8) *RES {
return &RES{InstU8: InstU8{l: l}, num: num}
}
func (r *RES) String() string {
return fmt.Sprintf("RES %d, %s", r.num, r.l)
}
func (r *RES) Encode() []byte {
r.inspect()
if r.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(2, r.num, r.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, r.idx)
}
type SET struct {
InstU8
num byte
}
func NewSET(num byte, l Loc8) *SET {
return &SET{InstU8: InstU8{l: l}, num: num}
}
func (s *SET) String() string {
return fmt.Sprintf("SET %d, %s", s.num, s.l)
}
func (s *SET) Encode() []byte {
s.inspect()
if s.lInfo.ltype != tableR {
panic("Non-tableR src in BIT")
}
enc := encodeXYZ(3, s.num, s.lInfo.idxTable)
return idxEncodeHelper([]byte{0xcb, enc}, s.idx)
}
type Simple byte
const (
NOP Simple = 0x00
HALT Simple = 0x76
RLCA Simple = 0x07
RRCA Simple = 0x0f
RLA Simple = 0x17
RRA Simple = 0x1f
DAA Simple = 0x27
CPL Simple = 0x2f
SCF Simple = 0x37
CCF Simple = 0x3f
EXX Simple = 0xd9
DI Simple = 0xf3
EI Simple = 0xfb
)
type simpleName struct {
inst Simple
name string
}
var simpleNames []simpleName = []simpleName{
{NOP, "NOP"},
{HALT, "HALT"},
{RLCA, "RLCA"},
{RRCA, "RRCA"},
{RLA, "RLA"},
{RRA, "RRA"},
{DAA, "DAA"},
{CPL, "CPL"},
{SCF, "SCF"},
{CCF, "CCF"},
{EXX, "EXX"},
{DI, "DI"},
{EI, "EI"},
}
func (s Simple) String() string {
for _, simpleName := range simpleNames {
if simpleName.inst == s {
return simpleName.name
}
}
panic(fmt.Sprintf("Unknown simple instruction: %02X", byte(s)))
}
func (s Simple) Encode() []byte {
return []byte{byte(s)}
}
func LookupSimpleName(name string) Simple {
name = strings.ToUpper(name)
for _, simpleName := range simpleNames {
if simpleName.name == name {
return simpleName.inst
}
}
panic(fmt.Errorf("Unrecognised Simple instruction name : [%s]", name))
}
type EDSimple byte
const (
NEG EDSimple = 0x44
RETN EDSimple = 0x45
RETI EDSimple = 0x4d
RRD EDSimple = 0x67
RLD EDSimple = 0x6f
IM0 EDSimple = 0x46
IM1 EDSimple = 0x56
IM2 EDSimple = 0x5e
LDI EDSimple = 0xa0
CPI EDSimple = 0xa1
LDD EDSimple = 0xa8
CPD EDSimple = 0xa9
LDIR EDSimple = 0xb0
CPIR EDSimple = 0xb1
LDDR EDSimple = 0xb8
CPDR EDSimple = 0xb9
INI EDSimple = 0xa2
OUTI EDSimple = 0xa3
IND EDSimple = 0xaa
OUTD EDSimple = 0xab
INIR EDSimple = 0xb2
OTIR EDSimple = 0xb3
INDR EDSimple = 0xba
OTDR EDSimple = 0xbb
)
type edSimpleName struct {
inst EDSimple
name string
}
var EDSimpleNames []edSimpleName = []edSimpleName{
{NEG, "NEG"},
{RETN, "RETN"},
{RETI, "RETI"},
{RRD, "RRD"},
{RLD, "RLD"},
{IM0, "IM 0"},
{IM1, "IM 1"},
{IM2, "IM 2"},
{LDI, "LDI"},
{CPI, "CPI"},
{LDD, "LDD"},
{CPD, "CPD"},
{LDIR, "LDIR"},
{CPIR, "CPIR"},
{LDDR, "LDDR"},
{CPDR, "CPDR"},
{INI, "INI"},
{OUTI, "OUTI"},
{IND, "IND"},
{OUTD, "OUTD"},
{INIR, "INIR"},
{OTIR, "OTIR"},
{INDR, "INDR"},
{OTDR, "OTDR"},
}
func (s EDSimple) String() string {
for _, simpleName := range EDSimpleNames {
if simpleName.inst == s {
return simpleName.name
}
}
panic(fmt.Sprintf("Unknown EDSimple instruction: %02X", byte(s)))
}
func (s EDSimple) Encode() []byte {
return []byte{0xed, byte(s)}
}
func LookupEDSimpleName(name string) EDSimple {
name = strings.ToUpper(name)
for _, simpleName := range EDSimpleNames {
if simpleName.name == name {
return simpleName.inst
}
}
panic(fmt.Errorf("Unrecognised EDSimple instruction name : [%s]", name))
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"fmt"
"io"
"net"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"gopkg.in/gcfg.v1"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/controller"
)
// VSphere Cloud Provider constants
const (
ProviderName = "vsphere"
VolDir = "kubevols"
RoundTripperDefaultCount = 3
DummyVMPrefixName = "vsphere-k8s"
VSANDatastoreType = "vsan"
MacOuiVC = "00:50:56"
MacOuiEsx = "00:0c:29"
CleanUpDummyVMRoutineInterval = 5
UUIDPath = "/sys/class/dmi/id/product_serial"
UUIDPrefix = "VMware-"
)
var cleanUpRoutineInitialized = false
var datastoreFolderIDMap = make(map[string]map[string]string)
var clientLock sync.Mutex
var cleanUpRoutineInitLock sync.Mutex
var cleanUpDummyVMLock sync.RWMutex
// VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct {
conn *vclib.VSphereConnection
cfg *VSphereConfig
// InstanceID of the server where this VSphere object is instantiated.
localInstanceID string
}
// VSphereConfig information that is used by vSphere Cloud Provider to connect to VC
type VSphereConfig struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter IP.
VCenterIP string `gcfg:"server"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
Datacenter string `gcfg:"datacenter"`
// Datastore in which vmdks are stored.
Datastore string `gcfg:"datastore"`
// WorkingDir is path where VMs can be found.
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
VMUUID string `gcfg:"vm-uuid"`
// VMName is the VM name of virtual machine
// Combining the WorkingDir and VMName can form a unique InstanceID.
// When vm-name is set, no username/password is required on worker nodes.
VMName string `gcfg:"vm-name"`
}
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
}
type Volumes interface {
// AttachDisk attaches given disk to given node. Current node
// is used when nodeName is empty string.
AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error)
// DetachDisk detaches given disk to given node. Current node
// is used when nodeName is empty string.
// Assumption: If node doesn't exist, disk is already detached from node.
DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node.
// Assumption: If node doesn't exist, disk is not attached to the node.
DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
// DisksAreAttached checks if a list disks are attached to the given node.
// Assumption: If node doesn't exist, disks are not attached to the node.
DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error)
// CreateVolume creates a new vmdk with specified parameters.
CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error)
// DeleteVolume deletes vmdk.
DeleteVolume(vmDiskPath string) error
}
// Parses vSphere cloud config file and stores it into VSphereConfig.
func readConfig(config io.Reader) (VSphereConfig, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return VSphereConfig{}, err
}
var cfg VSphereConfig
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func init() {
vclib.RegisterMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
cfg, err := readConfig(config)
if err != nil {
return nil, err
}
return newVSphere(cfg)
})
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {}
func newVSphere(cfg VSphereConfig) (*VSphere, error) {
var err error
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType
} else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
}
if cfg.Global.WorkingDir != "" {
cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir)
}
if cfg.Global.RoundTripperCount == 0 {
cfg.Global.RoundTripperCount = RoundTripperDefaultCount
}
if cfg.Global.VCenterPort == "" {
cfg.Global.VCenterPort = "443"
}
if cfg.Global.VMUUID == "" {
// This needs root privileges on the host, and will fail otherwise.
cfg.Global.VMUUID, err = getvmUUID()
if err != nil {
glog.Errorf("Failed to get VM UUID. err: %+v", err)
return nil, err
}
}
vSphereConn := vclib.VSphereConnection{
Username: cfg.Global.User,
Password: cfg.Global.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: cfg.Global.RoundTripperCount,
Port: cfg.Global.VCenterPort,
}
var instanceID string
if cfg.Global.VMName == "" {
// if VMName is not set in the cloud config file, each nodes (including worker nodes) need credentials to obtain VMName from vCenter
glog.V(4).Infof("Cannot find VMName from cloud config file, start obtaining it from vCenter")
// Create context
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
err = vSphereConn.Connect(ctx)
if err != nil {
glog.Errorf("Failed to connect to vSphere")
return nil, err
}
dc, err := vclib.GetDatacenter(ctx, &vSphereConn, cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vm, err := dc.GetVMByUUID(ctx, cfg.Global.VMUUID)
if err != nil {
return nil, err
}
vmName, err := vm.ObjectName(ctx)
if err != nil {
return nil, err
}
instanceID = vmName
} else {
instanceID = cfg.Global.VMName
}
vs := VSphere{
conn: &vSphereConn,
cfg: &cfg,
localInstanceID: instanceID,
}
runtime.SetFinalizer(&vs, logout)
return &vs, nil
}
func logout(vs *VSphere) {
if vs.conn.GoVmomiClient != nil {
vs.conn.GoVmomiClient.Logout(context.TODO())
}
}
// Instances returns an implementation of Instances for vSphere.
func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
return vs, true
}
func getLocalIP() ([]v1.NodeAddress, error) {
addrs := []v1.NodeAddress{}
ifaces, err := net.Interfaces()
if err != nil {
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
return nil, err
}
for _, i := range ifaces {
localAddrs, err := i.Addrs()
if err != nil {
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
} else {
for _, addr := range localAddrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
// Filter external IP by MAC address OUIs from vCenter and from ESX
var addressType v1.NodeAddressType
if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) ||
strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ipnet.IP.String(),
},
v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ipnet.IP.String(),
},
)
}
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
}
}
}
}
}
return addrs, nil
}
// Get the VM Managed Object instance by from the node
func (vs *VSphere) getVMByName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) {
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vmPath := vs.cfg.Global.WorkingDir + "/" + nodeNameToVMName(nodeName)
vm, err := dc.GetVMByPath(ctx, vmPath)
if err != nil {
return nil, err
}
return vm, nil
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
// Get local IP addresses if node is local node
if vs.localInstanceID == nodeNameToVMName(nodeName) {
return getLocalIP()
}
addrs := []v1.NodeAddress{}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return nil, err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return nil, err
}
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return nil, err
}
// retrieve VM's ip(s)
for _, v := range vmMoList[0].Guest.Net {
if vs.cfg.Network.PublicNetwork == v.Network {
for _, ip := range v.IpAddress {
if net.ParseIP(ip).To4() != nil {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ip,
}, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ip,
},
)
}
}
}
}
return addrs, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
vmName := path.Base(providerID)
return vs.NodeAddresses(vmNameToNodeName(vmName))
}
// AddSSHKeyToAllInstances add SSH key to all instances
func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}
// CurrentNodeName gives the current node name
func (vs *VSphere) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
return vmNameToNodeName(vs.localInstanceID), nil
}
// nodeNameToVMName maps a NodeName to the vmware infrastructure name
func nodeNameToVMName(nodeName k8stypes.NodeName) string {
return string(nodeName)
}
// nodeNameToVMName maps a vmware infrastructure name to a NodeName
func vmNameToNodeName(vmName string) k8stypes.NodeName {
return k8stypes.NodeName(vmName)
}
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
func (vs *VSphere) ExternalID(nodeName k8stypes.NodeName) (string, error) {
return vs.InstanceID(nodeName)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (vs *VSphere) InstanceExistsByProviderID(providerID string) (bool, error) {
return false, errors.New("unimplemented")
}
// InstanceID returns the cloud provider ID of the node with the specified Name.
func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) {
if vs.localInstanceID == nodeNameToVMName(nodeName) {
return vs.cfg.Global.WorkingDir + "/" + vs.localInstanceID, nil
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
if vclib.IsNotFound(err) {
return "", cloudprovider.InstanceNotFound
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return "", err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
glog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeNameToVMName(nodeName), err)
return "", err
}
if isActive {
return "/" + vm.InventoryPath, nil
}
return "", fmt.Errorf("The node %q is not active", nodeNameToVMName(nodeName))
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) InstanceTypeByProviderID(providerID string) (string, error) {
return "", nil
}
func (vs *VSphere) InstanceType(name k8stypes.NodeName) (string, error) {
return "", nil
}
func (vs *VSphere) Clusters() (cloudprovider.Clusters, bool) {
return nil, true
}
// ProviderName returns the cloud provider ID.
func (vs *VSphere) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for vSphere.
func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return nil, false
}
// Zones returns an implementation of Zones for Google vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
}
// Routes returns a false since the interface is not supported for vSphere.
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// ScrubDNS filters DNS settings for pods.
func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
return nameservers, searches
}
// AttachDisk attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
attachDiskInternal := func(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
if nodeName == "" {
nodeName = vmNameToNodeName(vs.localInstanceID)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err = vs.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return "", err
}
diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyID: storagePolicyID})
if err != nil {
glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, nodeNameToVMName(nodeName), err)
return "", err
}
return diskUUID, nil
}
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyID, nodeName)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error {
if nodeName == "" {
nodeName = vmNameToNodeName(vs.localInstanceID)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if vclib.IsNotFound(err) {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", nodeNameToVMName(nodeName), volPath)
return nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return err
}
err = vm.DetachDisk(ctx, volPath)
if err != nil {
glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, nodeNameToVMName(nodeName), err)
return err
}
return nil
}
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, nil)
return err
}
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) {
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.localInstanceID
nodeName = vmNameToNodeName(vSphereInstance)
} else {
vSphereInstance = nodeNameToVMName(nodeName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return false, err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
if vclib.IsNotFound(err) {
glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
// make the disk as detached and return false without error.
return false, nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err)
return isAttached, err
}
// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
attached := make(map[k8stypes.NodeName]map[string]bool)
if len(nodeVolumes) == 0 {
return attached, nil
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return nil, err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
for i, volPath := range volPaths {
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return nil, err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
volPaths[i] = canonicalVolumePath
}
vmVolumes[nodeNameToVMName(nodeName)] = volPaths
}
// Check if the disks are attached to their respective nodes
disksAttachedList, err := dc.CheckDisksAttached(ctx, vmVolumes)
if err != nil {
return nil, err
}
for vmName, volPaths := range disksAttachedList {
attached[vmNameToNodeName(vmName)] = volPaths
}
return attached, nil
}
requestTime := time.Now()
attached, err := disksAreAttachedInternal(nodeVolumes)
vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err)
return attached, err
}
// CreateVolume creates a volume of given size (in KiB) and return the volume path.
// If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then
// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/<volume-name>.vmdk
// else return value will be [sharedVmfs-0] kubevols/<volume-name>.vmdk
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
var datastore string
// Default datastore is the datastore in the vSphere config file that is used to initialize vSphere cloud provider.
if volumeOptions.Datastore == "" {
datastore = vs.cfg.Global.Datastore
} else {
datastore = volumeOptions.Datastore
}
datastore = strings.TrimSpace(datastore)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err = vs.conn.Connect(ctx)
if err != nil {
return "", err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return "", err
}
var vmOptions *vclib.VMOptions
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
cleanUpDummyVMLock.RLock()
defer cleanUpDummyVMLock.RUnlock()
// Create a new background routine that will delete any dummy VM's that are left stale.
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
vmOptions, err = vs.setVMOptions(ctx, dc)
if err != nil {
glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
return "", err
}
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
datastore, err = getPbmCompatibleDatastore(ctx, dc.Client(), volumeOptions.StoragePolicyName, vmOptions.VMFolder)
if err != nil {
glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
}
ds, err := dc.GetDatastoreByName(ctx, datastore)
if err != nil {
return "", err
}
volumeOptions.Datastore = datastore
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
if err != nil && err != vclib.ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk"
disk := diskmanagers.VirtualDisk{
DiskPath: volumePath,
VolumeOptions: volumeOptions,
VMOptions: vmOptions,
}
volumePath, err = disk.Create(ctx, ds)
if err != nil {
glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
return "", err
}
// Get the canonical path for the volume path.
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
return "", err
}
if filepath.Base(datastore) != datastore {
// If datastore is within cluster, add cluster path to the volumePath
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1)
}
return canonicalVolumePath, nil
}
requestTime := time.Now()
canonicalVolumePath, err = createVolumeInternal(volumeOptions)
vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err)
glog.V(1).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
return canonicalVolumePath, err
}
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
deleteVolumeInternal := func(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return err
}
ds, err := dc.GetDatastoreByName(ctx, vs.cfg.Global.Datastore)
if err != nil {
return err
}
disk := diskmanagers.VirtualDisk{
DiskPath: vmDiskPath,
VolumeOptions: &vclib.VolumeOptions{},
VMOptions: &vclib.VMOptions{},
}
err = disk.Delete(ctx, ds)
if err != nil {
glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
}
return err
}
requestTime := time.Now()
err := deleteVolumeInternal(vmDiskPath)
vclib.RecordvSphereMetric(vclib.OperationDeleteVolume, requestTime, err)
return err
}
// HasClusterID returns true if the cluster has a clusterID
func (vs *VSphere) HasClusterID() bool {
return true
}
Use custom error for "unimplemented"
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"fmt"
"io"
"net"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"gopkg.in/gcfg.v1"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/controller"
)
// VSphere Cloud Provider constants
const (
ProviderName = "vsphere"
VolDir = "kubevols"
RoundTripperDefaultCount = 3
DummyVMPrefixName = "vsphere-k8s"
VSANDatastoreType = "vsan"
MacOuiVC = "00:50:56"
MacOuiEsx = "00:0c:29"
CleanUpDummyVMRoutineInterval = 5
UUIDPath = "/sys/class/dmi/id/product_serial"
UUIDPrefix = "VMware-"
)
var cleanUpRoutineInitialized = false
var datastoreFolderIDMap = make(map[string]map[string]string)
var clientLock sync.Mutex
var cleanUpRoutineInitLock sync.Mutex
var cleanUpDummyVMLock sync.RWMutex
// VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct {
conn *vclib.VSphereConnection
cfg *VSphereConfig
// InstanceID of the server where this VSphere object is instantiated.
localInstanceID string
}
// VSphereConfig information that is used by vSphere Cloud Provider to connect to VC
type VSphereConfig struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter IP.
VCenterIP string `gcfg:"server"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
Datacenter string `gcfg:"datacenter"`
// Datastore in which vmdks are stored.
Datastore string `gcfg:"datastore"`
// WorkingDir is path where VMs can be found.
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
VMUUID string `gcfg:"vm-uuid"`
// VMName is the VM name of virtual machine
// Combining the WorkingDir and VMName can form a unique InstanceID.
// When vm-name is set, no username/password is required on worker nodes.
VMName string `gcfg:"vm-name"`
}
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
}
type Volumes interface {
// AttachDisk attaches given disk to given node. Current node
// is used when nodeName is empty string.
AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error)
// DetachDisk detaches given disk to given node. Current node
// is used when nodeName is empty string.
// Assumption: If node doesn't exist, disk is already detached from node.
DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node.
// Assumption: If node doesn't exist, disk is not attached to the node.
DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
// DisksAreAttached checks if a list disks are attached to the given node.
// Assumption: If node doesn't exist, disks are not attached to the node.
DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error)
// CreateVolume creates a new vmdk with specified parameters.
CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error)
// DeleteVolume deletes vmdk.
DeleteVolume(vmDiskPath string) error
}
// Parses vSphere cloud config file and stores it into VSphereConfig.
func readConfig(config io.Reader) (VSphereConfig, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return VSphereConfig{}, err
}
var cfg VSphereConfig
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func init() {
vclib.RegisterMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
cfg, err := readConfig(config)
if err != nil {
return nil, err
}
return newVSphere(cfg)
})
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {}
func newVSphere(cfg VSphereConfig) (*VSphere, error) {
var err error
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType
} else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
}
if cfg.Global.WorkingDir != "" {
cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir)
}
if cfg.Global.RoundTripperCount == 0 {
cfg.Global.RoundTripperCount = RoundTripperDefaultCount
}
if cfg.Global.VCenterPort == "" {
cfg.Global.VCenterPort = "443"
}
if cfg.Global.VMUUID == "" {
// This needs root privileges on the host, and will fail otherwise.
cfg.Global.VMUUID, err = getvmUUID()
if err != nil {
glog.Errorf("Failed to get VM UUID. err: %+v", err)
return nil, err
}
}
vSphereConn := vclib.VSphereConnection{
Username: cfg.Global.User,
Password: cfg.Global.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: cfg.Global.RoundTripperCount,
Port: cfg.Global.VCenterPort,
}
var instanceID string
if cfg.Global.VMName == "" {
// if VMName is not set in the cloud config file, each nodes (including worker nodes) need credentials to obtain VMName from vCenter
glog.V(4).Infof("Cannot find VMName from cloud config file, start obtaining it from vCenter")
// Create context
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
err = vSphereConn.Connect(ctx)
if err != nil {
glog.Errorf("Failed to connect to vSphere")
return nil, err
}
dc, err := vclib.GetDatacenter(ctx, &vSphereConn, cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vm, err := dc.GetVMByUUID(ctx, cfg.Global.VMUUID)
if err != nil {
return nil, err
}
vmName, err := vm.ObjectName(ctx)
if err != nil {
return nil, err
}
instanceID = vmName
} else {
instanceID = cfg.Global.VMName
}
vs := VSphere{
conn: &vSphereConn,
cfg: &cfg,
localInstanceID: instanceID,
}
runtime.SetFinalizer(&vs, logout)
return &vs, nil
}
func logout(vs *VSphere) {
if vs.conn.GoVmomiClient != nil {
vs.conn.GoVmomiClient.Logout(context.TODO())
}
}
// Instances returns an implementation of Instances for vSphere.
func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
return vs, true
}
func getLocalIP() ([]v1.NodeAddress, error) {
addrs := []v1.NodeAddress{}
ifaces, err := net.Interfaces()
if err != nil {
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
return nil, err
}
for _, i := range ifaces {
localAddrs, err := i.Addrs()
if err != nil {
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
} else {
for _, addr := range localAddrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
// Filter external IP by MAC address OUIs from vCenter and from ESX
var addressType v1.NodeAddressType
if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) ||
strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ipnet.IP.String(),
},
v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ipnet.IP.String(),
},
)
}
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
}
}
}
}
}
return addrs, nil
}
// Get the VM Managed Object instance by from the node
func (vs *VSphere) getVMByName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) {
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vmPath := vs.cfg.Global.WorkingDir + "/" + nodeNameToVMName(nodeName)
vm, err := dc.GetVMByPath(ctx, vmPath)
if err != nil {
return nil, err
}
return vm, nil
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
// Get local IP addresses if node is local node
if vs.localInstanceID == nodeNameToVMName(nodeName) {
return getLocalIP()
}
addrs := []v1.NodeAddress{}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return nil, err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return nil, err
}
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return nil, err
}
// retrieve VM's ip(s)
for _, v := range vmMoList[0].Guest.Net {
if vs.cfg.Network.PublicNetwork == v.Network {
for _, ip := range v.IpAddress {
if net.ParseIP(ip).To4() != nil {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ip,
}, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ip,
},
)
}
}
}
}
return addrs, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
vmName := path.Base(providerID)
return vs.NodeAddresses(vmNameToNodeName(vmName))
}
// AddSSHKeyToAllInstances add SSH key to all instances
func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName gives the current node name
func (vs *VSphere) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
return vmNameToNodeName(vs.localInstanceID), nil
}
// nodeNameToVMName maps a NodeName to the vmware infrastructure name
func nodeNameToVMName(nodeName k8stypes.NodeName) string {
return string(nodeName)
}
// nodeNameToVMName maps a vmware infrastructure name to a NodeName
func vmNameToNodeName(vmName string) k8stypes.NodeName {
return k8stypes.NodeName(vmName)
}
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
func (vs *VSphere) ExternalID(nodeName k8stypes.NodeName) (string, error) {
return vs.InstanceID(nodeName)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (vs *VSphere) InstanceExistsByProviderID(providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
}
// InstanceID returns the cloud provider ID of the node with the specified Name.
func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) {
if vs.localInstanceID == nodeNameToVMName(nodeName) {
return vs.cfg.Global.WorkingDir + "/" + vs.localInstanceID, nil
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
if vclib.IsNotFound(err) {
return "", cloudprovider.InstanceNotFound
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return "", err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
glog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeNameToVMName(nodeName), err)
return "", err
}
if isActive {
return "/" + vm.InventoryPath, nil
}
return "", fmt.Errorf("The node %q is not active", nodeNameToVMName(nodeName))
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) InstanceTypeByProviderID(providerID string) (string, error) {
return "", nil
}
func (vs *VSphere) InstanceType(name k8stypes.NodeName) (string, error) {
return "", nil
}
func (vs *VSphere) Clusters() (cloudprovider.Clusters, bool) {
return nil, true
}
// ProviderName returns the cloud provider ID.
func (vs *VSphere) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for vSphere.
func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return nil, false
}
// Zones returns an implementation of Zones for Google vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
}
// Routes returns a false since the interface is not supported for vSphere.
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// ScrubDNS filters DNS settings for pods.
func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
return nameservers, searches
}
// AttachDisk attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
attachDiskInternal := func(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
if nodeName == "" {
nodeName = vmNameToNodeName(vs.localInstanceID)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err = vs.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return "", err
}
diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyID: storagePolicyID})
if err != nil {
glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, nodeNameToVMName(nodeName), err)
return "", err
}
return diskUUID, nil
}
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyID, nodeName)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error {
if nodeName == "" {
nodeName = vmNameToNodeName(vs.localInstanceID)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if vclib.IsNotFound(err) {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", nodeNameToVMName(nodeName), volPath)
return nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err)
return err
}
err = vm.DetachDisk(ctx, volPath)
if err != nil {
glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, nodeNameToVMName(nodeName), err)
return err
}
return nil
}
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, nil)
return err
}
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) {
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.localInstanceID
nodeName = vmNameToNodeName(vSphereInstance)
} else {
vSphereInstance = nodeNameToVMName(nodeName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return false, err
}
vm, err := vs.getVMByName(ctx, nodeName)
if err != nil {
if vclib.IsNotFound(err) {
glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
// make the disk as detached and return false without error.
return false, nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err)
return isAttached, err
}
// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
attached := make(map[k8stypes.NodeName]map[string]bool)
if len(nodeVolumes) == 0 {
return attached, nil
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return nil, err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return nil, err
}
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
for i, volPath := range volPaths {
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return nil, err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
volPaths[i] = canonicalVolumePath
}
vmVolumes[nodeNameToVMName(nodeName)] = volPaths
}
// Check if the disks are attached to their respective nodes
disksAttachedList, err := dc.CheckDisksAttached(ctx, vmVolumes)
if err != nil {
return nil, err
}
for vmName, volPaths := range disksAttachedList {
attached[vmNameToNodeName(vmName)] = volPaths
}
return attached, nil
}
requestTime := time.Now()
attached, err := disksAreAttachedInternal(nodeVolumes)
vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err)
return attached, err
}
// CreateVolume creates a volume of given size (in KiB) and return the volume path.
// If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then
// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/<volume-name>.vmdk
// else return value will be [sharedVmfs-0] kubevols/<volume-name>.vmdk
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
var datastore string
// Default datastore is the datastore in the vSphere config file that is used to initialize vSphere cloud provider.
if volumeOptions.Datastore == "" {
datastore = vs.cfg.Global.Datastore
} else {
datastore = volumeOptions.Datastore
}
datastore = strings.TrimSpace(datastore)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err = vs.conn.Connect(ctx)
if err != nil {
return "", err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return "", err
}
var vmOptions *vclib.VMOptions
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
cleanUpDummyVMLock.RLock()
defer cleanUpDummyVMLock.RUnlock()
// Create a new background routine that will delete any dummy VM's that are left stale.
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
vmOptions, err = vs.setVMOptions(ctx, dc)
if err != nil {
glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
return "", err
}
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
datastore, err = getPbmCompatibleDatastore(ctx, dc.Client(), volumeOptions.StoragePolicyName, vmOptions.VMFolder)
if err != nil {
glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
}
ds, err := dc.GetDatastoreByName(ctx, datastore)
if err != nil {
return "", err
}
volumeOptions.Datastore = datastore
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
if err != nil && err != vclib.ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk"
disk := diskmanagers.VirtualDisk{
DiskPath: volumePath,
VolumeOptions: volumeOptions,
VMOptions: vmOptions,
}
volumePath, err = disk.Create(ctx, ds)
if err != nil {
glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
return "", err
}
// Get the canonical path for the volume path.
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
return "", err
}
if filepath.Base(datastore) != datastore {
// If datastore is within cluster, add cluster path to the volumePath
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1)
}
return canonicalVolumePath, nil
}
requestTime := time.Now()
canonicalVolumePath, err = createVolumeInternal(volumeOptions)
vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err)
glog.V(1).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
return canonicalVolumePath, err
}
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
deleteVolumeInternal := func(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
return err
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
return err
}
ds, err := dc.GetDatastoreByName(ctx, vs.cfg.Global.Datastore)
if err != nil {
return err
}
disk := diskmanagers.VirtualDisk{
DiskPath: vmDiskPath,
VolumeOptions: &vclib.VolumeOptions{},
VMOptions: &vclib.VMOptions{},
}
err = disk.Delete(ctx, ds)
if err != nil {
glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
}
return err
}
requestTime := time.Now()
err := deleteVolumeInternal(vmDiskPath)
vclib.RecordvSphereMetric(vclib.OperationDeleteVolume, requestTime, err)
return err
}
// HasClusterID returns true if the cluster has a clusterID
func (vs *VSphere) HasClusterID() bool {
return true
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"gopkg.in/gcfg.v1"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/controller"
)
// VSphere Cloud Provider constants
const (
ProviderName = "vsphere"
VolDir = "kubevols"
RoundTripperDefaultCount = 3
DummyVMPrefixName = "vsphere-k8s"
MacOuiVC = "00:50:56"
MacOuiEsx = "00:0c:29"
CleanUpDummyVMRoutineInterval = 5
)
var cleanUpRoutineInitialized = false
var datastoreFolderIDMap = make(map[string]map[string]string)
var cleanUpRoutineInitLock sync.Mutex
var cleanUpDummyVMLock sync.RWMutex
// VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct {
cfg *VSphereConfig
hostName string
// Maps the VSphere IP address to VSphereInstance
vsphereInstanceMap map[string]*VSphereInstance
// Responsible for managing discovery of k8s node, their location etc.
nodeManager *NodeManager
vmUUID string
}
// Represents a vSphere instance where one or more kubernetes nodes are running.
type VSphereInstance struct {
conn *vclib.VSphereConnection
cfg *VirtualCenterConfig
}
// Structure that represents Virtual Center configuration
type VirtualCenterConfig struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
}
// Structure that represents the content of vsphere.conf file.
// Users specify the configuration of one or more Virtual Centers in vsphere.conf where
// the Kubernetes master and worker nodes are running.
type VSphereConfig struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// Deprecated. Use VirtualCenter to specify multiple vCenter Servers.
// vCenter IP.
VCenterIP string `gcfg:"server"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
// Deprecated. Use "datacenters" instead.
Datacenter string `gcfg:"datacenter"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Datastore in which vmdks are stored.
// Deprecated. See Workspace.DefaultDatastore
DefaultDatastore string `gcfg:"datastore"`
// WorkingDir is path where VMs can be found. Also used to create dummy VMs.
// Deprecated.
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// Deprecated as the virtual machines will be automatically discovered.
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
VMUUID string `gcfg:"vm-uuid"`
// Deprecated as virtual machine will be automatically discovered.
// VMName is the VM name of virtual machine
// Combining the WorkingDir and VMName can form a unique InstanceID.
// When vm-name is set, no username/password is required on worker nodes.
VMName string `gcfg:"vm-name"`
}
VirtualCenter map[string]*VirtualCenterConfig
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
// Endpoint used to create volumes
Workspace struct {
VCenterIP string `gcfg:"server"`
Datacenter string `gcfg:"datacenter"`
Folder string `gcfg:"folder"`
DefaultDatastore string `gcfg:"default-datastore"`
ResourcePoolPath string `gcfg:"resourcepool-path"`
}
}
type Volumes interface {
// AttachDisk attaches given disk to given node. Current node
// is used when nodeName is empty string.
AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error)
// DetachDisk detaches given disk to given node. Current node
// is used when nodeName is empty string.
// Assumption: If node doesn't exist, disk is already detached from node.
DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node.
// Assumption: If node doesn't exist, disk is not attached to the node.
DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
// DisksAreAttached checks if a list disks are attached to the given node.
// Assumption: If node doesn't exist, disks are not attached to the node.
DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error)
// CreateVolume creates a new vmdk with specified parameters.
CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error)
// DeleteVolume deletes vmdk.
DeleteVolume(vmDiskPath string) error
}
// Parses vSphere cloud config file and stores it into VSphereConfig.
func readConfig(config io.Reader) (VSphereConfig, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return VSphereConfig{}, err
}
var cfg VSphereConfig
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func init() {
vclib.RegisterMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
// If vSphere.conf file is not present then it is worker node.
if config == nil {
return newWorkerNode()
}
cfg, err := readConfig(config)
if err != nil {
return nil, err
}
return newControllerNode(cfg)
})
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {
}
// Initialize Node Informers
func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory) {
if vs.cfg == nil {
return
}
// Only on controller node it is required to register listeners.
// Register callbacks for node updates
glog.V(4).Infof("Setting up node informers for vSphere Cloud Provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: vs.NodeAdded,
DeleteFunc: vs.NodeDeleted,
})
glog.V(4).Infof("Node informers in vSphere cloud provider initialized")
}
// Creates new worker node interface and returns
func newWorkerNode() (*VSphere, error) {
var err error
vs := VSphere{}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = GetVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
return &vs, nil
}
func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance, error) {
vsphereInstanceMap := make(map[string]*VSphereInstance)
// Check if the vsphere.conf is in old format. In this
// format the cfg.VirtualCenter will be nil or empty.
if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 {
glog.V(4).Infof("Config is not per virtual center and is in old format.")
if cfg.Global.User == "" {
glog.Error("Global.User is empty!")
return nil, errors.New("Global.User is empty!")
}
if cfg.Global.Password == "" {
glog.Error("Global.Password is empty!")
return nil, errors.New("Global.Password is empty!")
}
if cfg.Global.WorkingDir == "" {
glog.Error("Global.WorkingDir is empty!")
return nil, errors.New("Global.WorkingDir is empty!")
}
if cfg.Global.VCenterIP == "" {
glog.Error("Global.VCenterIP is empty!")
return nil, errors.New("Global.VCenterIP is empty!")
}
if cfg.Global.Datacenter == "" {
glog.Error("Global.Datacenter is empty!")
return nil, errors.New("Global.Datacenter is empty!")
}
cfg.Workspace.VCenterIP = cfg.Global.VCenterIP
cfg.Workspace.Datacenter = cfg.Global.Datacenter
cfg.Workspace.Folder = cfg.Global.WorkingDir
cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore
vcConfig := VirtualCenterConfig{
User: cfg.Global.User,
Password: cfg.Global.Password,
VCenterPort: cfg.Global.VCenterPort,
Datacenters: cfg.Global.Datacenter,
RoundTripperCount: cfg.Global.RoundTripperCount,
}
vSphereConn := vclib.VSphereConnection{
Username: vcConfig.User,
Password: vcConfig.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
cfg: &vcConfig,
}
vsphereInstanceMap[cfg.Global.VCenterIP] = &vsphereIns
} else {
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
msg := fmt.Sprintf("All fields in workspace are mandatory."+
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
glog.Error(msg)
return nil, errors.New(msg)
}
for vcServer, vcConfig := range cfg.VirtualCenter {
glog.V(4).Infof("Initializing vc server %s", vcServer)
if vcServer == "" {
glog.Error("vsphere.conf does not have the VirtualCenter IP address specified")
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
}
if vcConfig.User == "" {
vcConfig.User = cfg.Global.User
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
}
if vcConfig.User == "" {
msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer)
glog.Error(msg)
return nil, errors.New(msg)
}
if vcConfig.Password == "" {
msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
glog.Error(msg)
return nil, errors.New(msg)
}
if vcConfig.VCenterPort == "" {
vcConfig.VCenterPort = cfg.Global.VCenterPort
}
if vcConfig.Datacenters == "" {
if cfg.Global.Datacenters != "" {
vcConfig.Datacenters = cfg.Global.Datacenters
} else {
// cfg.Global.Datacenter is deprecated, so giving it the last preference.
vcConfig.Datacenters = cfg.Global.Datacenter
}
}
if vcConfig.RoundTripperCount == 0 {
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
}
vSphereConn := vclib.VSphereConnection{
Username: vcConfig.User,
Password: vcConfig.Password,
Hostname: vcServer,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
cfg: vcConfig,
}
vsphereInstanceMap[vcServer] = &vsphereIns
}
}
return vsphereInstanceMap, nil
}
// getVMUUID allows tests to override GetVMUUID
var getVMUUID = GetVMUUID
// Creates new Controller node interface and returns
func newControllerNode(cfg VSphereConfig) (*VSphere, error) {
var err error
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType
} else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
}
if cfg.Global.WorkingDir != "" {
cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir)
}
if cfg.Global.RoundTripperCount == 0 {
cfg.Global.RoundTripperCount = RoundTripperDefaultCount
}
if cfg.Global.VCenterPort == "" {
cfg.Global.VCenterPort = "443"
}
vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg)
if err != nil {
return nil, err
}
vs := VSphere{
vsphereInstanceMap: vsphereInstanceMap,
nodeManager: &NodeManager{
vsphereInstanceMap: vsphereInstanceMap,
nodeInfoMap: make(map[string]*NodeInfo),
registeredNodes: make(map[string]*v1.Node),
},
cfg: &cfg,
}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = getVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
runtime.SetFinalizer(&vs, logout)
return &vs, nil
}
func logout(vs *VSphere) {
for _, vsphereIns := range vs.vsphereInstanceMap {
if vsphereIns.conn.Client != nil {
vsphereIns.conn.Logout(context.TODO())
}
}
}
// Instances returns an implementation of Instances for vSphere.
func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
return vs, true
}
func getLocalIP() ([]v1.NodeAddress, error) {
addrs := []v1.NodeAddress{}
ifaces, err := net.Interfaces()
if err != nil {
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
return nil, err
}
for _, i := range ifaces {
localAddrs, err := i.Addrs()
if err != nil {
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
} else {
for _, addr := range localAddrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
// Filter external IP by MAC address OUIs from vCenter and from ESX
var addressType v1.NodeAddressType
if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) ||
strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ipnet.IP.String(),
},
v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ipnet.IP.String(),
},
)
}
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
}
}
}
}
}
return addrs, nil
}
func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) {
vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName)
if err != nil {
glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName)
return nil, err
}
return &vsphereIns, nil
}
func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) {
vsphereIns, ok := vs.vsphereInstanceMap[vcServer]
if !ok {
glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer)
return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer))
}
// Ensure client is logged in and session is valid
err := vsphereIns.conn.Connect(ctx)
if err != nil {
glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err)
return nil, err
}
return vsphereIns, nil
}
// Get the VM Managed Object instance by from the node
func (vs *VSphere) getVMFromNodeName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return nil, err
}
return nodeInfo.vm, nil
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
// Get local IP addresses if node is local node
if vs.hostName == convertToString(nodeName) {
return getLocalIP()
}
if vs.cfg == nil {
return nil, cloudprovider.InstanceNotFound
}
// Below logic can be executed only on master as VC details are present.
addrs := []v1.NodeAddress{}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return nil, err
}
// Ensure client is logged in and session is valid
err = vsi.conn.Connect(ctx)
if err != nil {
return nil, err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
// retrieve VM's ip(s)
for _, v := range vmMoList[0].Guest.Net {
if vs.cfg.Network.PublicNetwork == v.Network {
for _, ip := range v.IpAddress {
if net.ParseIP(ip).To4() != nil {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ip,
}, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ip,
},
)
}
}
}
}
return addrs, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
return vs.NodeAddresses(ctx, convertToK8sType(providerID))
}
// AddSSHKeyToAllInstances add SSH key to all instances
func (vs *VSphere) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName gives the current node name
func (vs *VSphere) CurrentNodeName(ctx context.Context, hostname string) (k8stypes.NodeName, error) {
return convertToK8sType(vs.hostName), nil
}
func convertToString(nodeName k8stypes.NodeName) string {
return string(nodeName)
}
func convertToK8sType(vmName string) k8stypes.NodeName {
return k8stypes.NodeName(vmName)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return false, err
}
for _, node := range nodes {
// ProviderID is UUID for nodes v1.9.3+
if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID {
nodeName = node.NodeName
break
}
}
if nodeName == "" {
msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID)
return false, errors.New(msg)
}
_, err = vs.InstanceID(ctx, convertToK8sType(nodeName))
if err == nil {
return true, nil
}
return false, err
}
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
}
// InstanceID returns the cloud provider ID of the node with the specified Name.
func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) (string, error) {
instanceIDInternal := func() (string, error) {
if vs.hostName == convertToString(nodeName) {
return vs.vmUUID, nil
}
// Below logic can be performed only on master node where VC details are preset.
if vs.cfg == nil {
return "", fmt.Errorf("The current node can't detremine InstanceID for %q", convertToString(nodeName))
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return "", err
}
// Ensure client is logged in and session is valid
err = vsi.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err)
return "", err
}
if isActive {
return vs.vmUUID, nil
}
glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState)
return "", cloudprovider.InstanceNotFound
}
instanceID, err := instanceIDInternal()
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
instanceID, err = instanceIDInternal()
} else if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
}
}
}
return instanceID, err
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
return "", nil
}
func (vs *VSphere) InstanceType(ctx context.Context, name k8stypes.NodeName) (string, error) {
return "", nil
}
func (vs *VSphere) Clusters() (cloudprovider.Clusters, bool) {
return nil, true
}
// ProviderName returns the cloud provider ID.
func (vs *VSphere) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for vSphere.
func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return nil, false
}
// Zones returns an implementation of Zones for Google vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
}
// Routes returns a false since the interface is not supported for vSphere.
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// AttachDisk attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
attachDiskInternal := func(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
if nodeName == "" {
nodeName = convertToK8sType(vs.hostName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return "", err
}
// Ensure client is logged in and session is valid
err = vsi.conn.Connect(ctx)
if err != nil {
return "", err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName})
if err != nil {
glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err)
return "", err
}
return diskUUID, nil
}
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
}
}
}
glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
func (vs *VSphere) retry(nodeName k8stypes.NodeName, err error) (bool, error) {
isManagedObjectNotFoundError := false
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
isManagedObjectNotFoundError = true
glog.V(4).Infof("error %q ManagedObjectNotFound for node %q", err, convertToString(nodeName))
err = vs.nodeManager.RediscoverNode(nodeName)
}
}
return isManagedObjectNotFoundError, err
}
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error {
if nodeName == "" {
nodeName = convertToK8sType(vs.hostName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
return err
}
// Ensure client is logged in and session is valid
err = vsi.conn.Connect(ctx)
if err != nil {
return err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return err
}
err = vm.DetachDisk(ctx, volPath)
if err != nil {
glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err)
return err
}
return nil
}
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
err = detachDiskInternal(volPath, nodeName)
}
}
}
vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, err)
return err
}
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) {
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.hostName
nodeName = convertToK8sType(vSphereInstance)
} else {
vSphereInstance = convertToString(nodeName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return false, err
}
// Ensure client is logged in and session is valid
err = vsi.conn.Connect(ctx)
if err != nil {
return false, err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
// make the disk as detached and return false without error.
return false, nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}
volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == vclib.ErrNoVMFound {
isAttached, err = false, nil
} else if err == nil {
isAttached, err = diskIsAttachedInternal(volPath, nodeName)
}
}
}
vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err)
return isAttached, err
}
// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
// 1. Converts volPaths into canonical form so that it can be compared with the VM device path.
// 2. Segregates nodes by vCenter and Datacenter they are present in. This reduces calls to VC.
// 3. Creates go routines per VC-DC to find whether disks are attached to the nodes.
// 4. If the some of the VMs are not found or migrated then they are added to a list.
// 5. After successful execution of goroutines,
// 5a. If there are any VMs which needs to be retried, they are rediscovered and the whole operation is initiated again for only rediscovered VMs.
// 5b. If VMs are removed from vSphere inventory they are ignored.
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
// disksAreAttach checks whether disks are attached to the nodes.
// Returns nodes that need to be retried if retry is true
// Segregates nodes per VC and DC
// Creates go routines per VC-DC to find whether disks are attached to the nodes.
disksAreAttach := func(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) {
var wg sync.WaitGroup
var localAttachedMaps []map[string]map[string]bool
var nodesToRetry []k8stypes.NodeName
var globalErr error
globalErr = nil
globalErrMutex := &sync.Mutex{}
nodesToRetryMutex := &sync.Mutex{}
// Segregate nodes according to VC-DC
dcNodes := make(map[string][]k8stypes.NodeName)
for nodeName := range nodeVolumes {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err)
return nodesToRetry, err
}
VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String()
dcNodes[VC_DC] = append(dcNodes[VC_DC], nodeName)
}
for _, nodes := range dcNodes {
localAttachedMap := make(map[string]map[string]bool)
localAttachedMaps = append(localAttachedMaps, localAttachedMap)
// Start go routines per VC-DC to check disks are attached
go func() {
nodesToRetryLocal, err := vs.checkDiskAttached(ctx, nodes, nodeVolumes, localAttachedMap, retry)
if err != nil {
if !vclib.IsManagedObjectNotFoundError(err) {
globalErrMutex.Lock()
globalErr = err
globalErrMutex.Unlock()
glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
}
}
nodesToRetryMutex.Lock()
nodesToRetry = append(nodesToRetry, nodesToRetryLocal...)
nodesToRetryMutex.Unlock()
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
if globalErr != nil {
return nodesToRetry, globalErr
}
for _, localAttachedMap := range localAttachedMaps {
for key, value := range localAttachedMap {
attached[key] = value
}
}
return nodesToRetry, nil
}
glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[k8stypes.NodeName]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
attached := make(map[string]map[string]bool)
nodesToRetry, err := disksAreAttach(ctx, vmVolumes, attached, false)
if err != nil {
return nil, err
}
if len(nodesToRetry) != 0 {
// Rediscover nodes which are need to be retried
remainingNodesVolumes := make(map[k8stypes.NodeName][]string)
for _, nodeName := range nodesToRetry {
err = vs.nodeManager.RediscoverNode(nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.V(4).Infof("node %s not found. err: %+v", nodeName, err)
continue
}
glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err)
return nil, err
}
remainingNodesVolumes[nodeName] = nodeVolumes[nodeName]
}
// If some remaining nodes are still registered
if len(remainingNodesVolumes) != 0 {
nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true)
if err != nil || len(nodesToRetry) != 0 {
glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err)
return nil, err
}
}
for nodeName, volPaths := range attached {
disksAttached[convertToK8sType(nodeName)] = volPaths
}
}
glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
return disksAttached, nil
}
requestTime := time.Now()
attached, err := disksAreAttachedInternal(nodeVolumes)
vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err)
return attached, err
}
// CreateVolume creates a volume of given size (in KiB) and return the volume path.
// If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then
// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/<volume-name>.vmdk
// else return value will be [sharedVmfs-0] kubevols/<volume-name>.vmdk
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
var datastore string
// If datastore not specified, then use default datastore
if volumeOptions.Datastore == "" {
datastore = vs.cfg.Workspace.DefaultDatastore
} else {
datastore = volumeOptions.Datastore
}
datastore = strings.TrimSpace(datastore)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
return "", err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
return "", err
}
var vmOptions *vclib.VMOptions
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
cleanUpDummyVMLock.RLock()
defer cleanUpDummyVMLock.RUnlock()
// Create a new background routine that will delete any dummy VM's that are left stale.
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath)
if err != nil {
glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
return "", err
}
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
} else {
// Since no storage policy is specified but datastore is specified, check
// if the given datastore is a shared datastore across all node VMs.
sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get shared datastore: %+v", err)
return "", err
}
found := false
for _, sharedDs := range sharedDsList {
if datastore == sharedDs.Info.Name {
found = true
break
}
}
if !found {
msg := fmt.Sprintf("The specified datastore %s is not a shared datastore across node VMs", datastore)
return "", errors.New(msg)
}
}
ds, err := dc.GetDatastoreByName(ctx, datastore)
if err != nil {
return "", err
}
volumeOptions.Datastore = datastore
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
if err != nil && err != vclib.ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk"
disk := diskmanagers.VirtualDisk{
DiskPath: volumePath,
VolumeOptions: volumeOptions,
VMOptions: vmOptions,
}
volumePath, err = disk.Create(ctx, ds)
if err != nil {
glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
return "", err
}
// Get the canonical path for the volume path.
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
return "", err
}
if filepath.Base(datastore) != datastore {
// If datastore is within cluster, add cluster path to the volumePath
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1)
}
return canonicalVolumePath, nil
}
requestTime := time.Now()
canonicalVolumePath, err = createVolumeInternal(volumeOptions)
vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err)
glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
return canonicalVolumePath, err
}
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
deleteVolumeInternal := func(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
return err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
return err
}
disk := diskmanagers.VirtualDisk{
DiskPath: vmDiskPath,
VolumeOptions: &vclib.VolumeOptions{},
VMOptions: &vclib.VMOptions{},
}
err = disk.Delete(ctx, dc)
if err != nil {
glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
}
return err
}
requestTime := time.Now()
err := deleteVolumeInternal(vmDiskPath)
vclib.RecordvSphereMetric(vclib.OperationDeleteVolume, requestTime, err)
return err
}
// HasClusterID returns true if the cluster has a clusterID
func (vs *VSphere) HasClusterID() bool {
return true
}
// Notification handler when node is added into k8s cluster.
func (vs *VSphere) NodeAdded(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeAdded: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node added: %+v", node)
vs.nodeManager.RegisterNode(node)
}
// Notification handler when node is removed from k8s cluster.
func (vs *VSphere) NodeDeleted(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeDeleted: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node deleted: %+v", node)
vs.nodeManager.UnRegisterNode(node)
}
func (vs *VSphere) NodeManager() (nodeManager *NodeManager) {
if vs == nil {
return nil
}
return vs.nodeManager
}
Add secrets flag in vcp config and modify vcp to use nodemanger connect method
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"gopkg.in/gcfg.v1"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/controller"
)
// VSphere Cloud Provider constants
const (
ProviderName = "vsphere"
VolDir = "kubevols"
RoundTripperDefaultCount = 3
DummyVMPrefixName = "vsphere-k8s"
MacOuiVC = "00:50:56"
MacOuiEsx = "00:0c:29"
CleanUpDummyVMRoutineInterval = 5
)
var cleanUpRoutineInitialized = false
var datastoreFolderIDMap = make(map[string]map[string]string)
var cleanUpRoutineInitLock sync.Mutex
var cleanUpDummyVMLock sync.RWMutex
// Error Messages
const (
MissingUsernameErrMsg = "Username is missing"
MissingPasswordErrMsg = "Password is missing"
)
// Error constants
var (
ErrUsernameMissing = errors.New(MissingUsernameErrMsg)
ErrPasswordMissing = errors.New(MissingPasswordErrMsg)
)
// VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct {
cfg *VSphereConfig
hostName string
// Maps the VSphere IP address to VSphereInstance
vsphereInstanceMap map[string]*VSphereInstance
// Responsible for managing discovery of k8s node, their location etc.
nodeManager *NodeManager
vmUUID string
isSecretInfoProvided bool
}
// Represents a vSphere instance where one or more kubernetes nodes are running.
type VSphereInstance struct {
conn *vclib.VSphereConnection
cfg *VirtualCenterConfig
}
// Structure that represents Virtual Center configuration
type VirtualCenterConfig struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
}
// Structure that represents the content of vsphere.conf file.
// Users specify the configuration of one or more Virtual Centers in vsphere.conf where
// the Kubernetes master and worker nodes are running.
type VSphereConfig struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// Deprecated. Use VirtualCenter to specify multiple vCenter Servers.
// vCenter IP.
VCenterIP string `gcfg:"server"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
// Deprecated. Use "datacenters" instead.
Datacenter string `gcfg:"datacenter"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Datastore in which vmdks are stored.
// Deprecated. See Workspace.DefaultDatastore
DefaultDatastore string `gcfg:"datastore"`
// WorkingDir is path where VMs can be found. Also used to create dummy VMs.
// Deprecated.
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// Deprecated as the virtual machines will be automatically discovered.
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
VMUUID string `gcfg:"vm-uuid"`
// Deprecated as virtual machine will be automatically discovered.
// VMName is the VM name of virtual machine
// Combining the WorkingDir and VMName can form a unique InstanceID.
// When vm-name is set, no username/password is required on worker nodes.
VMName string `gcfg:"vm-name"`
// Name of the secret were vCenter credentials are present.
SecretName string `gcfg:"secret-name"`
// Secret Namespace where secret will be present that has vCenter credentials.
SecretNamespace string `gcfg:"secret-namespace"`
}
VirtualCenter map[string]*VirtualCenterConfig
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
// Endpoint used to create volumes
Workspace struct {
VCenterIP string `gcfg:"server"`
Datacenter string `gcfg:"datacenter"`
Folder string `gcfg:"folder"`
DefaultDatastore string `gcfg:"default-datastore"`
ResourcePoolPath string `gcfg:"resourcepool-path"`
}
}
type Volumes interface {
// AttachDisk attaches given disk to given node. Current node
// is used when nodeName is empty string.
AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error)
// DetachDisk detaches given disk to given node. Current node
// is used when nodeName is empty string.
// Assumption: If node doesn't exist, disk is already detached from node.
DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node.
// Assumption: If node doesn't exist, disk is not attached to the node.
DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
// DisksAreAttached checks if a list disks are attached to the given node.
// Assumption: If node doesn't exist, disks are not attached to the node.
DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error)
// CreateVolume creates a new vmdk with specified parameters.
CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error)
// DeleteVolume deletes vmdk.
DeleteVolume(vmDiskPath string) error
}
// Parses vSphere cloud config file and stores it into VSphereConfig.
func readConfig(config io.Reader) (VSphereConfig, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return VSphereConfig{}, err
}
var cfg VSphereConfig
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func init() {
vclib.RegisterMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
// If vSphere.conf file is not present then it is worker node.
if config == nil {
return newWorkerNode()
}
cfg, err := readConfig(config)
if err != nil {
return nil, err
}
return newControllerNode(cfg)
})
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {
}
// Initialize Node Informers
func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory) {
if vs.cfg == nil {
return
}
if vs.isSecretInfoProvided {
secretCredentialManager := &SecretCredentialManager{
SecretName: vs.cfg.Global.SecretName,
SecretNamespace: vs.cfg.Global.SecretNamespace,
SecretLister: informerFactory.Core().V1().Secrets().Lister(),
Cache: &SecretCache{
VirtualCenter: make(map[string]*Credential),
},
}
vs.nodeManager.UpdateCredentialManager(secretCredentialManager)
}
// Only on controller node it is required to register listeners.
// Register callbacks for node updates
glog.V(4).Infof("Setting up node informers for vSphere Cloud Provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: vs.NodeAdded,
DeleteFunc: vs.NodeDeleted,
})
glog.V(4).Infof("Node informers in vSphere cloud provider initialized")
}
// Creates new worker node interface and returns
func newWorkerNode() (*VSphere, error) {
var err error
vs := VSphere{}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = GetVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
return &vs, nil
}
func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance, error) {
vsphereInstanceMap := make(map[string]*VSphereInstance)
isSecretInfoProvided := true
if cfg.Global.SecretName == "" || cfg.Global.SecretNamespace == "" {
glog.Warningf("SecretName and/or SecretNamespace is not provided. " +
"VCP will use username and password from config file")
isSecretInfoProvided = false
}
if isSecretInfoProvided {
if cfg.Global.User != "" {
glog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials")
cfg.Global.User = ""
}
if cfg.Global.Password != "" {
glog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials")
cfg.Global.Password = ""
}
}
// Check if the vsphere.conf is in old format. In this
// format the cfg.VirtualCenter will be nil or empty.
if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 {
glog.V(4).Infof("Config is not per virtual center and is in old format.")
if !isSecretInfoProvided {
if cfg.Global.User == "" {
glog.Error("Global.User is empty!")
return nil, ErrUsernameMissing
}
if cfg.Global.Password == "" {
glog.Error("Global.Password is empty!")
return nil, ErrPasswordMissing
}
}
if cfg.Global.WorkingDir == "" {
glog.Error("Global.WorkingDir is empty!")
return nil, errors.New("Global.WorkingDir is empty!")
}
if cfg.Global.VCenterIP == "" {
glog.Error("Global.VCenterIP is empty!")
return nil, errors.New("Global.VCenterIP is empty!")
}
if cfg.Global.Datacenter == "" {
glog.Error("Global.Datacenter is empty!")
return nil, errors.New("Global.Datacenter is empty!")
}
cfg.Workspace.VCenterIP = cfg.Global.VCenterIP
cfg.Workspace.Datacenter = cfg.Global.Datacenter
cfg.Workspace.Folder = cfg.Global.WorkingDir
cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore
vcConfig := VirtualCenterConfig{
User: cfg.Global.User,
Password: cfg.Global.Password,
VCenterPort: cfg.Global.VCenterPort,
Datacenters: cfg.Global.Datacenter,
RoundTripperCount: cfg.Global.RoundTripperCount,
}
// Note: If secrets info is provided username and password will be populated
// once secret is created.
vSphereConn := vclib.VSphereConnection{
Username: vcConfig.User,
Password: vcConfig.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
cfg: &vcConfig,
}
vsphereInstanceMap[cfg.Global.VCenterIP] = &vsphereIns
} else {
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
msg := fmt.Sprintf("All fields in workspace are mandatory."+
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
glog.Error(msg)
return nil, errors.New(msg)
}
for vcServer, vcConfig := range cfg.VirtualCenter {
glog.V(4).Infof("Initializing vc server %s", vcServer)
if vcServer == "" {
glog.Error("vsphere.conf does not have the VirtualCenter IP address specified")
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
}
if !isSecretInfoProvided {
if vcConfig.User == "" {
vcConfig.User = cfg.Global.User
if vcConfig.User == "" {
glog.Errorf("vcConfig.User is empty for vc %s!", vcServer)
return nil, ErrUsernameMissing
}
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
if vcConfig.Password == "" {
glog.Errorf("vcConfig.Password is empty for vc %s!", vcServer)
return nil, ErrPasswordMissing
}
}
} else {
if vcConfig.User != "" {
glog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
vcConfig.User = ""
}
if vcConfig.Password != "" {
glog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
vcConfig.Password = ""
}
}
if vcConfig.VCenterPort == "" {
vcConfig.VCenterPort = cfg.Global.VCenterPort
}
if vcConfig.Datacenters == "" {
if cfg.Global.Datacenters != "" {
vcConfig.Datacenters = cfg.Global.Datacenters
} else {
// cfg.Global.Datacenter is deprecated, so giving it the last preference.
vcConfig.Datacenters = cfg.Global.Datacenter
}
}
if vcConfig.RoundTripperCount == 0 {
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
}
// Note: If secrets info is provided username and password will be populated
// once secret is created.
vSphereConn := vclib.VSphereConnection{
Username: vcConfig.User,
Password: vcConfig.Password,
Hostname: vcServer,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
cfg: vcConfig,
}
vsphereInstanceMap[vcServer] = &vsphereIns
}
}
return vsphereInstanceMap, nil
}
// getVMUUID allows tests to override GetVMUUID
var getVMUUID = GetVMUUID
// Creates new Controller node interface and returns
func newControllerNode(cfg VSphereConfig) (*VSphere, error) {
vs, err := buildVSphereFromConfig(cfg)
if err != nil {
return nil, err
}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = getVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
runtime.SetFinalizer(vs, logout)
return vs, nil
}
// Initializes vSphere from vSphere CloudProvider Configuration
func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) {
isSecretInfoProvided := false
if cfg.Global.SecretName != "" && cfg.Global.SecretNamespace != "" {
isSecretInfoProvided = true
}
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType
} else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
}
if cfg.Global.WorkingDir != "" {
cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir)
}
if cfg.Global.RoundTripperCount == 0 {
cfg.Global.RoundTripperCount = RoundTripperDefaultCount
}
if cfg.Global.VCenterPort == "" {
cfg.Global.VCenterPort = "443"
}
vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg)
if err != nil {
return nil, err
}
vs := VSphere{
vsphereInstanceMap: vsphereInstanceMap,
nodeManager: &NodeManager{
vsphereInstanceMap: vsphereInstanceMap,
nodeInfoMap: make(map[string]*NodeInfo),
registeredNodes: make(map[string]*v1.Node),
},
isSecretInfoProvided: isSecretInfoProvided,
cfg: &cfg,
}
return &vs, nil
}
func logout(vs *VSphere) {
for _, vsphereIns := range vs.vsphereInstanceMap {
if vsphereIns.conn.Client != nil {
vsphereIns.conn.Logout(context.TODO())
}
}
}
// Instances returns an implementation of Instances for vSphere.
func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
return vs, true
}
func getLocalIP() ([]v1.NodeAddress, error) {
addrs := []v1.NodeAddress{}
ifaces, err := net.Interfaces()
if err != nil {
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
return nil, err
}
for _, i := range ifaces {
localAddrs, err := i.Addrs()
if err != nil {
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
} else {
for _, addr := range localAddrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
// Filter external IP by MAC address OUIs from vCenter and from ESX
var addressType v1.NodeAddressType
if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) ||
strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ipnet.IP.String(),
},
v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ipnet.IP.String(),
},
)
}
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
}
}
}
}
}
return addrs, nil
}
func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) {
vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName)
if err != nil {
glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName)
return nil, err
}
return &vsphereIns, nil
}
func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) {
vsphereIns, ok := vs.vsphereInstanceMap[vcServer]
if !ok {
glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer)
return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer))
}
// Ensure client is logged in and session is valid
err := vs.nodeManager.vcConnect(ctx, vsphereIns)
if err != nil {
glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err)
return nil, err
}
return vsphereIns, nil
}
// Get the VM Managed Object instance by from the node
func (vs *VSphere) getVMFromNodeName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return nil, err
}
return nodeInfo.vm, nil
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
// Get local IP addresses if node is local node
if vs.hostName == convertToString(nodeName) {
return getLocalIP()
}
if vs.cfg == nil {
return nil, cloudprovider.InstanceNotFound
}
// Below logic can be executed only on master as VC details are present.
addrs := []v1.NodeAddress{}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return nil, err
}
// Ensure client is logged in and session is valid
err = vs.nodeManager.vcConnect(ctx, vsi)
if err != nil {
return nil, err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
// retrieve VM's ip(s)
for _, v := range vmMoList[0].Guest.Net {
if vs.cfg.Network.PublicNetwork == v.Network {
for _, ip := range v.IpAddress {
if net.ParseIP(ip).To4() != nil {
v1helper.AddToNodeAddresses(&addrs,
v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ip,
}, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: ip,
},
)
}
}
}
}
return addrs, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
return vs.NodeAddresses(ctx, convertToK8sType(providerID))
}
// AddSSHKeyToAllInstances add SSH key to all instances
func (vs *VSphere) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName gives the current node name
func (vs *VSphere) CurrentNodeName(ctx context.Context, hostname string) (k8stypes.NodeName, error) {
return convertToK8sType(vs.hostName), nil
}
func convertToString(nodeName k8stypes.NodeName) string {
return string(nodeName)
}
func convertToK8sType(vmName string) k8stypes.NodeName {
return k8stypes.NodeName(vmName)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return false, err
}
for _, node := range nodes {
// ProviderID is UUID for nodes v1.9.3+
if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID {
nodeName = node.NodeName
break
}
}
if nodeName == "" {
msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID)
return false, errors.New(msg)
}
_, err = vs.InstanceID(ctx, convertToK8sType(nodeName))
if err == nil {
return true, nil
}
return false, err
}
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
}
// InstanceID returns the cloud provider ID of the node with the specified Name.
func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) (string, error) {
instanceIDInternal := func() (string, error) {
if vs.hostName == convertToString(nodeName) {
return vs.vmUUID, nil
}
// Below logic can be performed only on master node where VC details are preset.
if vs.cfg == nil {
return "", fmt.Errorf("The current node can't detremine InstanceID for %q", convertToString(nodeName))
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return "", err
}
// Ensure client is logged in and session is valid
err = vs.nodeManager.vcConnect(ctx, vsi)
if err != nil {
return "", err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err)
return "", err
}
if isActive {
return vs.vmUUID, nil
}
glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState)
return "", cloudprovider.InstanceNotFound
}
instanceID, err := instanceIDInternal()
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
instanceID, err = instanceIDInternal()
} else if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
}
}
}
return instanceID, err
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (vs *VSphere) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
return "", nil
}
func (vs *VSphere) InstanceType(ctx context.Context, name k8stypes.NodeName) (string, error) {
return "", nil
}
func (vs *VSphere) Clusters() (cloudprovider.Clusters, bool) {
return nil, true
}
// ProviderName returns the cloud provider ID.
func (vs *VSphere) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for vSphere.
func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return nil, false
}
// Zones returns an implementation of Zones for Google vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
}
// Routes returns a false since the interface is not supported for vSphere.
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// AttachDisk attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
attachDiskInternal := func(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) {
if nodeName == "" {
nodeName = convertToK8sType(vs.hostName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return "", err
}
// Ensure client is logged in and session is valid
err = vs.nodeManager.vcConnect(ctx, vsi)
if err != nil {
return "", err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName})
if err != nil {
glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err)
return "", err
}
return diskUUID, nil
}
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
}
}
}
glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
func (vs *VSphere) retry(nodeName k8stypes.NodeName, err error) (bool, error) {
isManagedObjectNotFoundError := false
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
isManagedObjectNotFoundError = true
glog.V(4).Infof("error %q ManagedObjectNotFound for node %q", err, convertToString(nodeName))
err = vs.nodeManager.RediscoverNode(nodeName)
}
}
return isManagedObjectNotFoundError, err
}
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error {
if nodeName == "" {
nodeName = convertToK8sType(vs.hostName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
return err
}
// Ensure client is logged in and session is valid
err = vs.nodeManager.vcConnect(ctx, vsi)
if err != nil {
return err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return err
}
err = vm.DetachDisk(ctx, volPath)
if err != nil {
glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err)
return err
}
return nil
}
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
err = detachDiskInternal(volPath, nodeName)
}
}
}
vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, err)
return err
}
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) {
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.hostName
nodeName = convertToK8sType(vSphereInstance)
} else {
vSphereInstance = convertToString(nodeName)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstance(nodeName)
if err != nil {
return false, err
}
// Ensure client is logged in and session is valid
err = vs.nodeManager.vcConnect(ctx, vsi)
if err != nil {
return false, err
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
// make the disk as detached and return false without error.
return false, nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}
volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == vclib.ErrNoVMFound {
isAttached, err = false, nil
} else if err == nil {
isAttached, err = diskIsAttachedInternal(volPath, nodeName)
}
}
}
vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err)
return isAttached, err
}
// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
// 1. Converts volPaths into canonical form so that it can be compared with the VM device path.
// 2. Segregates nodes by vCenter and Datacenter they are present in. This reduces calls to VC.
// 3. Creates go routines per VC-DC to find whether disks are attached to the nodes.
// 4. If the some of the VMs are not found or migrated then they are added to a list.
// 5. After successful execution of goroutines,
// 5a. If there are any VMs which needs to be retried, they are rediscovered and the whole operation is initiated again for only rediscovered VMs.
// 5b. If VMs are removed from vSphere inventory they are ignored.
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
// disksAreAttach checks whether disks are attached to the nodes.
// Returns nodes that need to be retried if retry is true
// Segregates nodes per VC and DC
// Creates go routines per VC-DC to find whether disks are attached to the nodes.
disksAreAttach := func(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) {
var wg sync.WaitGroup
var localAttachedMaps []map[string]map[string]bool
var nodesToRetry []k8stypes.NodeName
var globalErr error
globalErr = nil
globalErrMutex := &sync.Mutex{}
nodesToRetryMutex := &sync.Mutex{}
// Segregate nodes according to VC-DC
dcNodes := make(map[string][]k8stypes.NodeName)
for nodeName := range nodeVolumes {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err)
return nodesToRetry, err
}
VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String()
dcNodes[VC_DC] = append(dcNodes[VC_DC], nodeName)
}
for _, nodes := range dcNodes {
localAttachedMap := make(map[string]map[string]bool)
localAttachedMaps = append(localAttachedMaps, localAttachedMap)
// Start go routines per VC-DC to check disks are attached
go func() {
nodesToRetryLocal, err := vs.checkDiskAttached(ctx, nodes, nodeVolumes, localAttachedMap, retry)
if err != nil {
if !vclib.IsManagedObjectNotFoundError(err) {
globalErrMutex.Lock()
globalErr = err
globalErrMutex.Unlock()
glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
}
}
nodesToRetryMutex.Lock()
nodesToRetry = append(nodesToRetry, nodesToRetryLocal...)
nodesToRetryMutex.Unlock()
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
if globalErr != nil {
return nodesToRetry, globalErr
}
for _, localAttachedMap := range localAttachedMaps {
for key, value := range localAttachedMap {
attached[key] = value
}
}
return nodesToRetry, nil
}
glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[k8stypes.NodeName]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
attached := make(map[string]map[string]bool)
nodesToRetry, err := disksAreAttach(ctx, vmVolumes, attached, false)
if err != nil {
return nil, err
}
if len(nodesToRetry) != 0 {
// Rediscover nodes which are need to be retried
remainingNodesVolumes := make(map[k8stypes.NodeName][]string)
for _, nodeName := range nodesToRetry {
err = vs.nodeManager.RediscoverNode(nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.V(4).Infof("node %s not found. err: %+v", nodeName, err)
continue
}
glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err)
return nil, err
}
remainingNodesVolumes[nodeName] = nodeVolumes[nodeName]
}
// If some remaining nodes are still registered
if len(remainingNodesVolumes) != 0 {
nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true)
if err != nil || len(nodesToRetry) != 0 {
glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err)
return nil, err
}
}
for nodeName, volPaths := range attached {
disksAttached[convertToK8sType(nodeName)] = volPaths
}
}
glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
return disksAttached, nil
}
requestTime := time.Now()
attached, err := disksAreAttachedInternal(nodeVolumes)
vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err)
return attached, err
}
// CreateVolume creates a volume of given size (in KiB) and return the volume path.
// If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then
// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/<volume-name>.vmdk
// else return value will be [sharedVmfs-0] kubevols/<volume-name>.vmdk
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
var datastore string
// If datastore not specified, then use default datastore
if volumeOptions.Datastore == "" {
datastore = vs.cfg.Workspace.DefaultDatastore
} else {
datastore = volumeOptions.Datastore
}
datastore = strings.TrimSpace(datastore)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
return "", err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
return "", err
}
var vmOptions *vclib.VMOptions
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
cleanUpDummyVMLock.RLock()
defer cleanUpDummyVMLock.RUnlock()
// Create a new background routine that will delete any dummy VM's that are left stale.
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath)
if err != nil {
glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
return "", err
}
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
} else {
// Since no storage policy is specified but datastore is specified, check
// if the given datastore is a shared datastore across all node VMs.
sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get shared datastore: %+v", err)
return "", err
}
found := false
for _, sharedDs := range sharedDsList {
if datastore == sharedDs.Info.Name {
found = true
break
}
}
if !found {
msg := fmt.Sprintf("The specified datastore %s is not a shared datastore across node VMs", datastore)
return "", errors.New(msg)
}
}
ds, err := dc.GetDatastoreByName(ctx, datastore)
if err != nil {
return "", err
}
volumeOptions.Datastore = datastore
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
if err != nil && err != vclib.ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk"
disk := diskmanagers.VirtualDisk{
DiskPath: volumePath,
VolumeOptions: volumeOptions,
VMOptions: vmOptions,
}
volumePath, err = disk.Create(ctx, ds)
if err != nil {
glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
return "", err
}
// Get the canonical path for the volume path.
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
return "", err
}
if filepath.Base(datastore) != datastore {
// If datastore is within cluster, add cluster path to the volumePath
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1)
}
return canonicalVolumePath, nil
}
requestTime := time.Now()
canonicalVolumePath, err = createVolumeInternal(volumeOptions)
vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err)
glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
return canonicalVolumePath, err
}
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
deleteVolumeInternal := func(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
return err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
return err
}
disk := diskmanagers.VirtualDisk{
DiskPath: vmDiskPath,
VolumeOptions: &vclib.VolumeOptions{},
VMOptions: &vclib.VMOptions{},
}
err = disk.Delete(ctx, dc)
if err != nil {
glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
}
return err
}
requestTime := time.Now()
err := deleteVolumeInternal(vmDiskPath)
vclib.RecordvSphereMetric(vclib.OperationDeleteVolume, requestTime, err)
return err
}
// HasClusterID returns true if the cluster has a clusterID
func (vs *VSphere) HasClusterID() bool {
return true
}
// Notification handler when node is added into k8s cluster.
func (vs *VSphere) NodeAdded(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeAdded: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node added: %+v", node)
vs.nodeManager.RegisterNode(node)
}
// Notification handler when node is removed from k8s cluster.
func (vs *VSphere) NodeDeleted(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeDeleted: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node deleted: %+v", node)
vs.nodeManager.UnRegisterNode(node)
}
func (vs *VSphere) NodeManager() (nodeManager *NodeManager) {
if vs == nil {
return nil
}
return vs.nodeManager
}
|
/*
Copyright 2014 CoreOS Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wal
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
"github.com/coreos/etcd/raft"
)
var (
infoType = int64(1)
entryType = int64(2)
stateType = int64(3)
)
type WAL struct {
f *os.File
bw *bufio.Writer
buf *bytes.Buffer
}
func newWAL(f *os.File) *WAL {
return &WAL{f, bufio.NewWriter(f), new(bytes.Buffer)}
}
func New(path string) (*WAL, error) {
f, err := os.Open(path)
if err == nil {
f.Close()
return nil, os.ErrExist
}
f, err = os.Create(path)
if err != nil {
return nil, err
}
return newWAL(f), nil
}
func Open(path string) (*WAL, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
return newWAL(f), nil
}
func (w *WAL) Sync() error {
if err := w.bw.Flush(); err != nil {
return err
}
return w.f.Sync()
}
func (w *WAL) Close() {
if w.f != nil {
w.Sync()
w.f.Close()
}
}
func (w *WAL) SaveInfo(id int64) error {
if err := w.checkAtHead(); err != nil {
return err
}
w.buf.Reset()
err := binary.Write(w.buf, binary.LittleEndian, id)
if err != nil {
panic(err)
}
return writeBlock(w.bw, infoType, w.buf.Bytes())
}
func (w *WAL) SaveEntry(e *raft.Entry) error {
b, err := e.Marshal()
if err != nil {
panic(err)
}
return writeBlock(w.bw, entryType, b)
}
func (w *WAL) SaveState(s *raft.State) error {
w.buf.Reset()
err := binary.Write(w.buf, binary.LittleEndian, s)
if err != nil {
panic(err)
}
return writeBlock(w.bw, stateType, w.buf.Bytes())
}
func (w *WAL) checkAtHead() error {
o, err := w.f.Seek(0, os.SEEK_CUR)
if err != nil {
return err
}
if o != 0 || w.bw.Buffered() != 0 {
return fmt.Errorf("cannot write info at %d, expect 0", max(o, int64(w.bw.Buffered())))
}
return nil
}
type Node struct {
Id int64
Ents []raft.Entry
State raft.State
}
func (w *WAL) LoadNode() (*Node, error) {
if err := w.checkAtHead(); err != nil {
return nil, err
}
br := bufio.NewReader(w.f)
b := &block{}
err := readBlock(br, b)
if err != nil {
return nil, err
}
if b.t != infoType {
return nil, fmt.Errorf("the first block of wal is not infoType but %d", b.t)
}
id, err := loadInfo(b.d)
if err != nil {
return nil, err
}
ents := make([]raft.Entry, 0)
var state raft.State
for err = readBlock(br, b); err == nil; err = readBlock(br, b) {
switch b.t {
case entryType:
e, err := loadEntry(b.d)
if err != nil {
return nil, err
}
ents = append(ents[:e.Index-1], e)
case stateType:
s, err := loadState(b.d)
if err != nil {
return nil, err
}
state = s
default:
return nil, fmt.Errorf("unexpected block type %d", b.t)
}
}
if err != io.EOF {
return nil, err
}
return &Node{id, ents, state}, nil
}
func loadInfo(d []byte) (int64, error) {
if len(d) != 8 {
return 0, fmt.Errorf("len = %d, want 8", len(d))
}
buf := bytes.NewBuffer(d)
return readInt64(buf)
}
func loadEntry(d []byte) (raft.Entry, error) {
var e raft.Entry
err := e.Unmarshal(d)
if err != nil {
panic(err)
}
return e, err
}
func loadState(d []byte) (raft.State, error) {
var s raft.State
buf := bytes.NewBuffer(d)
err := binary.Read(buf, binary.LittleEndian, &s)
return s, err
}
func writeInt64(w io.Writer, n int64) error {
return binary.Write(w, binary.LittleEndian, n)
}
func readInt64(r io.Reader) (int64, error) {
var n int64
err := binary.Read(r, binary.LittleEndian, &n)
return n, err
}
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
wal: fix O_RDONLY attr when opening old file
/*
Copyright 2014 CoreOS Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wal
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
"github.com/coreos/etcd/raft"
)
var (
infoType = int64(1)
entryType = int64(2)
stateType = int64(3)
)
type WAL struct {
f *os.File
bw *bufio.Writer
buf *bytes.Buffer
}
func newWAL(f *os.File) *WAL {
return &WAL{f, bufio.NewWriter(f), new(bytes.Buffer)}
}
func New(path string) (*WAL, error) {
f, err := os.Open(path)
if err == nil {
f.Close()
return nil, os.ErrExist
}
f, err = os.Create(path)
if err != nil {
return nil, err
}
return newWAL(f), nil
}
func Open(path string) (*WAL, error) {
f, err := os.OpenFile(path, os.O_RDWR, 0)
if err != nil {
return nil, err
}
return newWAL(f), nil
}
func (w *WAL) Sync() error {
if err := w.bw.Flush(); err != nil {
return err
}
return w.f.Sync()
}
func (w *WAL) Close() {
if w.f != nil {
w.Sync()
w.f.Close()
}
}
func (w *WAL) SaveInfo(id int64) error {
if err := w.checkAtHead(); err != nil {
return err
}
w.buf.Reset()
err := binary.Write(w.buf, binary.LittleEndian, id)
if err != nil {
panic(err)
}
return writeBlock(w.bw, infoType, w.buf.Bytes())
}
func (w *WAL) SaveEntry(e *raft.Entry) error {
b, err := e.Marshal()
if err != nil {
panic(err)
}
return writeBlock(w.bw, entryType, b)
}
func (w *WAL) SaveState(s *raft.State) error {
w.buf.Reset()
err := binary.Write(w.buf, binary.LittleEndian, s)
if err != nil {
panic(err)
}
return writeBlock(w.bw, stateType, w.buf.Bytes())
}
func (w *WAL) checkAtHead() error {
o, err := w.f.Seek(0, os.SEEK_CUR)
if err != nil {
return err
}
if o != 0 || w.bw.Buffered() != 0 {
return fmt.Errorf("cannot write info at %d, expect 0", max(o, int64(w.bw.Buffered())))
}
return nil
}
type Node struct {
Id int64
Ents []raft.Entry
State raft.State
}
func (w *WAL) LoadNode() (*Node, error) {
if err := w.checkAtHead(); err != nil {
return nil, err
}
br := bufio.NewReader(w.f)
b := &block{}
err := readBlock(br, b)
if err != nil {
return nil, err
}
if b.t != infoType {
return nil, fmt.Errorf("the first block of wal is not infoType but %d", b.t)
}
id, err := loadInfo(b.d)
if err != nil {
return nil, err
}
ents := make([]raft.Entry, 0)
var state raft.State
for err = readBlock(br, b); err == nil; err = readBlock(br, b) {
switch b.t {
case entryType:
e, err := loadEntry(b.d)
if err != nil {
return nil, err
}
ents = append(ents[:e.Index-1], e)
case stateType:
s, err := loadState(b.d)
if err != nil {
return nil, err
}
state = s
default:
return nil, fmt.Errorf("unexpected block type %d", b.t)
}
}
if err != io.EOF {
return nil, err
}
return &Node{id, ents, state}, nil
}
func loadInfo(d []byte) (int64, error) {
if len(d) != 8 {
return 0, fmt.Errorf("len = %d, want 8", len(d))
}
buf := bytes.NewBuffer(d)
return readInt64(buf)
}
func loadEntry(d []byte) (raft.Entry, error) {
var e raft.Entry
err := e.Unmarshal(d)
if err != nil {
panic(err)
}
return e, err
}
func loadState(d []byte) (raft.State, error) {
var s raft.State
buf := bytes.NewBuffer(d)
err := binary.Read(buf, binary.LittleEndian, &s)
return s, err
}
func writeInt64(w io.Writer, n int64) error {
return binary.Write(w, binary.LittleEndian, n)
}
func readInt64(r io.Reader) (int64, error) {
var n int64
err := binary.Read(r, binary.LittleEndian, &n)
return n, err
}
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
|
package beertasting
import (
"appengine"
"appengine/datastore"
"appengine/urlfetch"
"appengine/user"
"encoding/json"
"fmt"
"github.com/ant0ine/go-json-rest/rest"
"html/template"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
)
var (
endpoint = url.URL{Scheme: "http", Host: "api.untappd.com", Path: "v4"}
)
type AppengineMiddleware struct{}
func isAuthorized(r *http.Request) error {
c := appengine.NewContext(r)
u := user.Current(c)
if u == nil {
return fmt.Errorf("Not Authorized")
}
// allow an initial configuration
if r.Method == "POST" && r.URL.Path == "/api/admin/config" {
return nil
}
if config, err := getConfig(c); err != nil {
if u.Email == "test@example.com" {
return nil
}
if err = config.Whitelist.contains(u.Email); err != nil {
return err
}
}
return nil
}
func (AppengineMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc {
return func(w rest.ResponseWriter, r *rest.Request) {
if err := isAuthorized(r.Request); err != nil {
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
handler(w, r)
}
}
type AppengineAdminMiddleware struct{}
func (AppengineAdminMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc {
return func(w rest.ResponseWriter, r *rest.Request) {
c := appengine.NewContext(r.Request)
if !user.IsAdmin(c) {
rest.Error(w, "Not Authorized", http.StatusUnauthorized)
return
}
handler(w, r)
}
}
func init() {
http.HandleFunc("/feed", feedHandler)
http.HandleFunc("/login", loginHandler)
http.HandleFunc("/logout", logoutHandler)
http.HandleFunc("/displayFeed", displayFeedHandler)
http.HandleFunc("/oauth/untappd", oauthUntappdHandler)
http.HandleFunc("/api/untappd/noauth/", untappdNoAuth)
restNoAuthHandler := rest.ResourceHandler{}
restNoAuthHandler.SetRoutes(
&rest.Route{"GET", "/api/user/me", getUserMe},
)
restAdminHandler := rest.ResourceHandler{
PreRoutingMiddlewares: []rest.Middleware{
&AppengineMiddleware{},
&AppengineAdminMiddleware{},
},
}
restAdminHandler.SetRoutes(
&rest.Route{"GET", "/api/admin/config", getAdminConfig},
&rest.Route{"PUT", "/api/admin/config", putAdminConfig},
)
restHandler := rest.ResourceHandler{
PreRoutingMiddlewares: []rest.Middleware{
&AppengineMiddleware{},
},
}
restHandler.SetRoutes(
&rest.Route{"GET", "/api/users", getAllUsers},
&rest.Route{"POST", "/api/users", postUser},
&rest.Route{"GET", "/api/users/:id", getUser},
&rest.Route{"DELETE", "/api/users/:id", deleteUser},
&rest.Route{"GET", "/api/users/:id/cellars", getAllCellars},
&rest.Route{"POST", "/api/users/:id/cellars", postCellar},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id", getCellar},
&rest.Route{"DELETE", "/api/users/:id/cellars/:cellar_id", deleteCellar},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id/beers", getAllBeers},
&rest.Route{"POST", "/api/users/:id/cellars/:cellar_id/beers", postBeer},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id/beers/:beer_id", getBeer},
&rest.Route{"DELETE", "/api/users/:id/cellars/:cellar_id/beers/:beer_id", deleteBeer},
)
http.Handle("/api/admin/config", &restAdminHandler)
http.Handle("/api/user/me", &restNoAuthHandler)
http.Handle("/api/users", &restHandler)
http.Handle("/api/users/", &restHandler)
}
type stringSlice []string
func (ss stringSlice) contains(target string) error {
for _, record := range ss {
if record == target {
return nil
}
}
return fmt.Errorf("%s not found", target)
}
type Config struct {
ClientId string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Whitelist stringSlice
}
type IDKeyer interface {
PathParamID() string
Kind() string
}
func datastoreKey(r *rest.Request, keyer IDKeyer, parent *datastore.Key) (*datastore.Key, error) {
id, err := strconv.Atoi(r.PathParam(keyer.PathParamID()))
if err != nil {
return nil, err
}
c := appengine.NewContext(r.Request)
return datastore.NewKey(c, keyer.Kind(), "", int64(id), parent), nil
}
type User struct {
ID int64 `datastore:"-"`
Name string
Email string
}
func (user *User) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(user); err != nil {
return err
}
if user.Name == "" {
return fmt.Errorf("name required")
}
if user.Email == "" {
return fmt.Errorf("email required")
}
return nil
}
func (User) PathParamID() string {
return "id"
}
func (User) Kind() string {
return "User"
}
func (user User) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
return datastoreKey(r, user, nil)
}
func (user *User) DatastoreGet(r *rest.Request) (int, error) {
key, err := user.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, user); err != nil {
return http.StatusInternalServerError, err
}
user.ID = key.IntID()
return http.StatusOK, nil
}
type Users []User
func (users *Users) DatastoreGet(r *rest.Request) (int, error) {
c := appengine.NewContext(r.Request)
*users = Users{}
q := datastore.NewQuery("User")
for t := q.Run(c); ; {
var u User
key, err := t.Next(&u)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
u.ID = key.IntID()
*users = append(*users, u)
}
return http.StatusOK, nil
}
type Cellar struct {
ID int64 `datastore:"-"`
Name string
}
func (cellar *Cellar) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(cellar); err != nil {
return err
}
if cellar.Name == "" {
return fmt.Errorf("name required")
}
return nil
}
func (Cellar) PathParamID() string {
return "cellar_id"
}
func (Cellar) Kind() string {
return "Cellar"
}
func (cellar Cellar) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
userKey, err := User{}.DatastoreKey(r)
if err != nil {
return nil, err
}
return datastoreKey(r, cellar, userKey)
}
func (cellar *Cellar) DatastoreGet(r *rest.Request) (int, error) {
key, err := cellar.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, cellar); err != nil {
return http.StatusInternalServerError, err
}
cellar.ID = key.IntID()
return http.StatusOK, nil
}
type Cellars []Cellar
func (cellars *Cellars) DatastoreGet(r *rest.Request) (int, error) {
var user User
c := appengine.NewContext(r.Request)
userKey, err := user.DatastoreKey(r)
if err != nil {
return http.StatusInternalServerError, err
}
*cellars = Cellars{}
q := datastore.NewQuery("Cellar").Ancestor(userKey)
for t := q.Run(c); ; {
var cl Cellar
key, err := t.Next(&cl)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
cl.ID = key.IntID()
*cellars = append(*cellars, cl)
}
return http.StatusOK, nil
}
type Beer struct {
ID int64
Name string
}
func (beer *Beer) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(beer); err != nil {
return err
}
if beer.Name == "" {
return fmt.Errorf("name required")
}
return nil
}
func (Beer) PathParamID() string {
return "beer_id"
}
func (Beer) Kind() string {
return "Beer"
}
func (beer Beer) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
return nil, err
}
return datastoreKey(r, beer, cellarKey)
}
func (beer *Beer) DatastoreGet(r *rest.Request) (int, error) {
key, err := beer.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, beer); err != nil {
return http.StatusInternalServerError, err
}
beer.ID = key.IntID()
return http.StatusOK, nil
}
type Beers []Beer
func (beers *Beers) DatastoreGet(r *rest.Request) (int, error) {
c := appengine.NewContext(r.Request)
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
return http.StatusInternalServerError, err
}
*beers = Beers{}
q := datastore.NewQuery("Beer").Ancestor(cellarKey)
for t := q.Run(c); ; {
var b Beer
key, err := t.Next(&b)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
b.ID = key.IntID()
*beers = append(*beers, b)
}
return http.StatusOK, nil
}
func configKey(c appengine.Context) *datastore.Key {
return datastore.NewKey(c, "Config", "default", 0, nil)
}
func getConfig(c appengine.Context) (Config, error) {
var cfg Config
if err := datastore.Get(c, configKey(c), &cfg); err != nil {
return Config{}, fmt.Errorf("getConfig(): %v", err)
}
return cfg, nil
}
func httpCallback(c appengine.Context, path string) *url.URL {
return &url.URL{
Scheme: "http",
Host: appengine.DefaultVersionHostname(c),
Path: path,
}
}
func oauthCallback(c appengine.Context, svc string) *url.URL {
return httpCallback(c, fmt.Sprintf("oauth/%s", svc))
}
func userLoggedIn(r *http.Request, w http.ResponseWriter) (*user.User, bool) {
c := appengine.NewContext(r)
u := user.Current(c)
if u != nil {
return u, true
}
ur, err := user.LoginURL(c, r.URL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return nil, false
}
http.Redirect(w, r, ur, http.StatusFound)
return nil, false
}
func loginHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
newURL := r.URL
newURL.Path = "/"
u, err := user.LoginURL(c, newURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, u, http.StatusFound)
return
}
func logoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
newURL := r.URL
newURL.Path = "/"
u, err := user.LogoutURL(c, newURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, u, http.StatusFound)
return
}
func oauthUntappdHandler(w http.ResponseWriter, r *http.Request) {
_, ok := userLoggedIn(r, w)
if !ok {
return
}
if len(r.FormValue("code")) == 0 {
http.Error(w, "missing code parameter", http.StatusInternalServerError)
return
}
var config Config
var err error
c := appengine.NewContext(r)
if config, err = getConfig(c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
u := url.URL{Scheme: "https", Host: "untappd.com", Path: "oauth/authorize/"}
q := u.Query()
q.Add("client_id", config.ClientId)
q.Add("client_secret", config.ClientSecret)
q.Add("response_type", "code")
q.Add("code", r.FormValue("code"))
q.Add("redirect_url", oauthCallback(c, "untappd").String())
u.RawQuery = q.Encode()
c.Infof("authorize URL: %s", u.String())
resp, err := urlfetch.Client(c).Get(u.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
oauthResponse := struct {
Response struct {
AccessToken string `json:"access_token"`
}
}{}
err = json.Unmarshal(buf, &oauthResponse)
if err != nil {
err = fmt.Errorf("%s: %s", err.Error(), string(buf))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
expire := time.Now().AddDate(0, 0, 1)
hostname := appengine.DefaultVersionHostname(c)
raw := fmt.Sprintf("access_token=%s", oauthResponse.Response.AccessToken)
cookie := http.Cookie{
Name: "access_token",
Value: oauthResponse.Response.AccessToken,
Path: "/",
Domain: hostname,
Expires: expire,
RawExpires: expire.Format(time.UnixDate),
MaxAge: 86400,
Secure: false,
HttpOnly: false,
Raw: raw,
Unparsed: []string{raw},
}
http.SetCookie(w, &cookie)
http.Redirect(w, r, "/displayFeed", http.StatusFound)
}
func displayFeedHandler(w http.ResponseWriter, r *http.Request) {
user, ok := userLoggedIn(r, w)
if !ok {
return
}
t, err := template.ParseFiles("templates/feed.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
endpoint.Path = path.Join(endpoint.Path, "checkin/recent")
s := struct{ Name, FeedRequest string }{user.String(), endpoint.String()}
if err := t.Execute(w, s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func feedHandler(w http.ResponseWriter, r *http.Request) {
_, ok := userLoggedIn(r, w)
if !ok {
return
}
var config Config
var err error
c := appengine.NewContext(r)
if config, err = getConfig(c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var untappdOath url.URL
untappdOath.Scheme = "https"
untappdOath.Host = "untappd.com"
untappdOath.Path = "oauth/authenticate/"
q := untappdOath.Query()
q.Add("client_id", config.ClientId)
q.Add("response_type", "code")
q.Add("redirect_url", oauthCallback(c, "untappd").String())
untappdOath.RawQuery = q.Encode()
http.Redirect(w, r, untappdOath.String(), http.StatusFound)
return
}
func writeJson(w rest.ResponseWriter, v interface{}) {
if err := w.WriteJson(v); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func putAdminConfig(w rest.ResponseWriter, r *rest.Request) {
var config Config
c := appengine.NewContext(r.Request)
err := r.DecodeJsonPayload(&config)
if err != nil {
err = fmt.Errorf("DecodeJsonPayload(): %s", err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, err := datastore.Put(c, configKey(c), &config); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
writeJson(w, config)
}
func getAdminConfig(w rest.ResponseWriter, r *rest.Request) {
var config Config
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, configKey(c), &config); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
writeJson(w, config)
}
func getUserMe(w rest.ResponseWriter, r *rest.Request) {
c := appengine.NewContext(r.Request)
u := user.Current(c)
if u == nil {
http.Error(w.(http.ResponseWriter), "not signed in", http.StatusNotFound)
return
}
logoutURL, err := user.LogoutURL(c, r.URL.String())
if err != nil {
http.Error(w.(http.ResponseWriter), err.Error(), http.StatusNotFound)
return
}
writeJson(w, struct {
Name string `json:"name"`
IsAdmin bool `json:"is_admin"`
LogoutURL string `json:"logout_url"`
}{u.String(), user.IsAdmin(c), logoutURL})
}
func getUser(w rest.ResponseWriter, r *rest.Request) {
var user User
if status, err := user.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, user)
}
func getAllUsers(w rest.ResponseWriter, r *rest.Request) {
var users Users
if status, err := users.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(users)
}
func postUser(w rest.ResponseWriter, r *rest.Request) {
user := User{}
err := user.DecodeJsonPayload(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "User", nil)
newKey, err := datastore.Put(c, key, &user)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user.ID = newKey.IntID()
writeJson(w, user)
}
func deleteUser(w rest.ResponseWriter, r *rest.Request) {
key, err := User{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
err = datastore.Delete(c, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func getCellar(w rest.ResponseWriter, r *rest.Request) {
var cellar Cellar
if status, err := cellar.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, cellar)
}
func getAllCellars(w rest.ResponseWriter, r *rest.Request) {
var cellars Cellars
if status, err := cellars.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(cellars)
}
func postCellar(w rest.ResponseWriter, r *rest.Request) {
userKey, err := User{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
cellar := Cellar{}
err = cellar.DecodeJsonPayload(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "Cellar", userKey)
newKey, err := datastore.Put(c, key, &cellar)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
cellar.ID = newKey.IntID()
writeJson(w, cellar)
}
func deleteCellar(w rest.ResponseWriter, r *rest.Request) {
key, err := Cellar{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
err = datastore.Delete(c, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func getBeer(w rest.ResponseWriter, r *rest.Request) {
var beer Beer
if status, err := beer.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, beer)
}
func getAllBeers(w rest.ResponseWriter, r *rest.Request) {
var beers Beers
if status, err := beers.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(beers)
}
func postBeer(w rest.ResponseWriter, r *rest.Request) {
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
beer := Beer{}
if err := beer.DecodeJsonPayload(r); err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "Beer", cellarKey)
newKey, err := datastore.Put(c, key, &beer)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
beer.ID = newKey.IntID()
writeJson(w, beer)
}
func deleteBeer(w rest.ResponseWriter, r *rest.Request) {
key, err := Beer{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
err = datastore.Delete(c, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func noAuthUntappdURL(r *http.Request, path string) (url.URL, error) {
c := appengine.NewContext(r)
config, err := getConfig(c)
if err != nil {
return url.URL{}, err
}
res := endpoint
res.RawQuery = r.URL.RawQuery
q := res.Query()
q.Add("client_id", config.ClientId)
q.Add("client_secret", config.ClientSecret)
res.RawQuery = q.Encode()
res.Path += path
return res, nil
}
func untappdNoAuth(w http.ResponseWriter, r *http.Request) {
if err := isAuthorized(r); err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
relPath := strings.TrimPrefix(r.URL.Path, "/api/untappd/noauth")
var reqURL url.URL
c := appengine.NewContext(r)
if r.Method == "GET" {
switch relPath {
case "/search/beer":
var err error
if reqURL, err = noAuthUntappdURL(r, relPath); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
default:
http.NotFound(w, r)
return
}
} else {
http.Error(w, fmt.Sprintf("method %s not found", r.Method), http.StatusInternalServerError)
return
}
client := urlfetch.Client(c)
resp, err := client.Get(reqURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for k, v := range resp.Header {
w.Header()[k] = v
}
w.WriteHeader(resp.StatusCode)
w.Write(body)
}
use an interface to promote code reuse
package beertasting
import (
"appengine"
"appengine/datastore"
"appengine/urlfetch"
"appengine/user"
"encoding/json"
"fmt"
"github.com/ant0ine/go-json-rest/rest"
"html/template"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
)
var (
endpoint = url.URL{Scheme: "http", Host: "api.untappd.com", Path: "v4"}
)
type AppengineMiddleware struct{}
func isAuthorized(r *http.Request) error {
c := appengine.NewContext(r)
u := user.Current(c)
if u == nil {
return fmt.Errorf("Not Authorized")
}
// allow an initial configuration
if r.Method == "POST" && r.URL.Path == "/api/admin/config" {
return nil
}
if config, err := getConfig(c); err != nil {
if u.Email == "test@example.com" {
return nil
}
if err = config.Whitelist.contains(u.Email); err != nil {
return err
}
}
return nil
}
func (AppengineMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc {
return func(w rest.ResponseWriter, r *rest.Request) {
if err := isAuthorized(r.Request); err != nil {
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
handler(w, r)
}
}
type AppengineAdminMiddleware struct{}
func (AppengineAdminMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc {
return func(w rest.ResponseWriter, r *rest.Request) {
c := appengine.NewContext(r.Request)
if !user.IsAdmin(c) {
rest.Error(w, "Not Authorized", http.StatusUnauthorized)
return
}
handler(w, r)
}
}
func init() {
http.HandleFunc("/feed", feedHandler)
http.HandleFunc("/login", loginHandler)
http.HandleFunc("/logout", logoutHandler)
http.HandleFunc("/displayFeed", displayFeedHandler)
http.HandleFunc("/oauth/untappd", oauthUntappdHandler)
http.HandleFunc("/api/untappd/noauth/", untappdNoAuth)
restNoAuthHandler := rest.ResourceHandler{}
restNoAuthHandler.SetRoutes(
&rest.Route{"GET", "/api/user/me", getUserMe},
)
restAdminHandler := rest.ResourceHandler{
PreRoutingMiddlewares: []rest.Middleware{
&AppengineMiddleware{},
&AppengineAdminMiddleware{},
},
}
restAdminHandler.SetRoutes(
&rest.Route{"GET", "/api/admin/config", getAdminConfig},
&rest.Route{"PUT", "/api/admin/config", putAdminConfig},
)
restHandler := rest.ResourceHandler{
PreRoutingMiddlewares: []rest.Middleware{
&AppengineMiddleware{},
},
}
restHandler.SetRoutes(
&rest.Route{"GET", "/api/users", getAllUsers},
&rest.Route{"POST", "/api/users", postUser},
&rest.Route{"GET", "/api/users/:id", getUser},
&rest.Route{"DELETE", "/api/users/:id", deleteUser},
&rest.Route{"GET", "/api/users/:id/cellars", getAllCellars},
&rest.Route{"POST", "/api/users/:id/cellars", postCellar},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id", getCellar},
&rest.Route{"DELETE", "/api/users/:id/cellars/:cellar_id", deleteCellar},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id/beers", getAllBeers},
&rest.Route{"POST", "/api/users/:id/cellars/:cellar_id/beers", postBeer},
&rest.Route{"GET", "/api/users/:id/cellars/:cellar_id/beers/:beer_id", getBeer},
&rest.Route{"DELETE", "/api/users/:id/cellars/:cellar_id/beers/:beer_id", deleteBeer},
)
http.Handle("/api/admin/config", &restAdminHandler)
http.Handle("/api/user/me", &restNoAuthHandler)
http.Handle("/api/users", &restHandler)
http.Handle("/api/users/", &restHandler)
}
type stringSlice []string
func (ss stringSlice) contains(target string) error {
for _, record := range ss {
if record == target {
return nil
}
}
return fmt.Errorf("%s not found", target)
}
type Config struct {
ClientId string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Whitelist stringSlice
}
type IDKeyer interface {
PathParamID() string
Kind() string
}
func datastoreKey(r *rest.Request, keyer IDKeyer, parent *datastore.Key) (*datastore.Key, error) {
id, err := strconv.Atoi(r.PathParam(keyer.PathParamID()))
if err != nil {
return nil, err
}
c := appengine.NewContext(r.Request)
return datastore.NewKey(c, keyer.Kind(), "", int64(id), parent), nil
}
type User struct {
ID int64 `datastore:"-"`
Name string
Email string
}
func (user *User) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(user); err != nil {
return err
}
if user.Name == "" {
return fmt.Errorf("name required")
}
if user.Email == "" {
return fmt.Errorf("email required")
}
return nil
}
func (User) PathParamID() string {
return "id"
}
func (User) Kind() string {
return "User"
}
func (user User) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
return datastoreKey(r, user, nil)
}
func (user *User) DatastoreGet(r *rest.Request) (int, error) {
key, err := user.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, user); err != nil {
return http.StatusInternalServerError, err
}
user.ID = key.IntID()
return http.StatusOK, nil
}
type Users []User
func (users *Users) DatastoreGet(r *rest.Request) (int, error) {
c := appengine.NewContext(r.Request)
*users = Users{}
q := datastore.NewQuery("User")
for t := q.Run(c); ; {
var u User
key, err := t.Next(&u)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
u.ID = key.IntID()
*users = append(*users, u)
}
return http.StatusOK, nil
}
type Cellar struct {
ID int64 `datastore:"-"`
Name string
}
func (cellar *Cellar) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(cellar); err != nil {
return err
}
if cellar.Name == "" {
return fmt.Errorf("name required")
}
return nil
}
func (Cellar) PathParamID() string {
return "cellar_id"
}
func (Cellar) Kind() string {
return "Cellar"
}
func (cellar Cellar) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
userKey, err := User{}.DatastoreKey(r)
if err != nil {
return nil, err
}
return datastoreKey(r, cellar, userKey)
}
func (cellar *Cellar) DatastoreGet(r *rest.Request) (int, error) {
key, err := cellar.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, cellar); err != nil {
return http.StatusInternalServerError, err
}
cellar.ID = key.IntID()
return http.StatusOK, nil
}
type Cellars []Cellar
func (cellars *Cellars) DatastoreGet(r *rest.Request) (int, error) {
var user User
c := appengine.NewContext(r.Request)
userKey, err := user.DatastoreKey(r)
if err != nil {
return http.StatusInternalServerError, err
}
*cellars = Cellars{}
q := datastore.NewQuery("Cellar").Ancestor(userKey)
for t := q.Run(c); ; {
var cl Cellar
key, err := t.Next(&cl)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
cl.ID = key.IntID()
*cellars = append(*cellars, cl)
}
return http.StatusOK, nil
}
type Beer struct {
ID int64
Name string
}
func (beer *Beer) DecodeJsonPayload(r *rest.Request) error {
if err := r.DecodeJsonPayload(beer); err != nil {
return err
}
if beer.Name == "" {
return fmt.Errorf("name required")
}
return nil
}
func (Beer) PathParamID() string {
return "beer_id"
}
func (Beer) Kind() string {
return "Beer"
}
func (beer Beer) DatastoreKey(r *rest.Request) (*datastore.Key, error) {
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
return nil, err
}
return datastoreKey(r, beer, cellarKey)
}
func (beer *Beer) DatastoreGet(r *rest.Request) (int, error) {
key, err := beer.DatastoreKey(r)
if err != nil {
return http.StatusBadRequest, err
}
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, key, beer); err != nil {
return http.StatusInternalServerError, err
}
beer.ID = key.IntID()
return http.StatusOK, nil
}
type Beers []Beer
func (beers *Beers) DatastoreGet(r *rest.Request) (int, error) {
c := appengine.NewContext(r.Request)
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
return http.StatusInternalServerError, err
}
*beers = Beers{}
q := datastore.NewQuery("Beer").Ancestor(cellarKey)
for t := q.Run(c); ; {
var b Beer
key, err := t.Next(&b)
if err == datastore.Done {
break
}
if err != nil {
return http.StatusInternalServerError, err
}
b.ID = key.IntID()
*beers = append(*beers, b)
}
return http.StatusOK, nil
}
func configKey(c appengine.Context) *datastore.Key {
return datastore.NewKey(c, "Config", "default", 0, nil)
}
func getConfig(c appengine.Context) (Config, error) {
var cfg Config
if err := datastore.Get(c, configKey(c), &cfg); err != nil {
return Config{}, fmt.Errorf("getConfig(): %v", err)
}
return cfg, nil
}
func httpCallback(c appengine.Context, path string) *url.URL {
return &url.URL{
Scheme: "http",
Host: appengine.DefaultVersionHostname(c),
Path: path,
}
}
func oauthCallback(c appengine.Context, svc string) *url.URL {
return httpCallback(c, fmt.Sprintf("oauth/%s", svc))
}
func userLoggedIn(r *http.Request, w http.ResponseWriter) (*user.User, bool) {
c := appengine.NewContext(r)
u := user.Current(c)
if u != nil {
return u, true
}
ur, err := user.LoginURL(c, r.URL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return nil, false
}
http.Redirect(w, r, ur, http.StatusFound)
return nil, false
}
func loginHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
newURL := r.URL
newURL.Path = "/"
u, err := user.LoginURL(c, newURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, u, http.StatusFound)
return
}
func logoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
newURL := r.URL
newURL.Path = "/"
u, err := user.LogoutURL(c, newURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, u, http.StatusFound)
return
}
func oauthUntappdHandler(w http.ResponseWriter, r *http.Request) {
_, ok := userLoggedIn(r, w)
if !ok {
return
}
if len(r.FormValue("code")) == 0 {
http.Error(w, "missing code parameter", http.StatusInternalServerError)
return
}
var config Config
var err error
c := appengine.NewContext(r)
if config, err = getConfig(c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
u := url.URL{Scheme: "https", Host: "untappd.com", Path: "oauth/authorize/"}
q := u.Query()
q.Add("client_id", config.ClientId)
q.Add("client_secret", config.ClientSecret)
q.Add("response_type", "code")
q.Add("code", r.FormValue("code"))
q.Add("redirect_url", oauthCallback(c, "untappd").String())
u.RawQuery = q.Encode()
c.Infof("authorize URL: %s", u.String())
resp, err := urlfetch.Client(c).Get(u.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
oauthResponse := struct {
Response struct {
AccessToken string `json:"access_token"`
}
}{}
err = json.Unmarshal(buf, &oauthResponse)
if err != nil {
err = fmt.Errorf("%s: %s", err.Error(), string(buf))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
expire := time.Now().AddDate(0, 0, 1)
hostname := appengine.DefaultVersionHostname(c)
raw := fmt.Sprintf("access_token=%s", oauthResponse.Response.AccessToken)
cookie := http.Cookie{
Name: "access_token",
Value: oauthResponse.Response.AccessToken,
Path: "/",
Domain: hostname,
Expires: expire,
RawExpires: expire.Format(time.UnixDate),
MaxAge: 86400,
Secure: false,
HttpOnly: false,
Raw: raw,
Unparsed: []string{raw},
}
http.SetCookie(w, &cookie)
http.Redirect(w, r, "/displayFeed", http.StatusFound)
}
func displayFeedHandler(w http.ResponseWriter, r *http.Request) {
user, ok := userLoggedIn(r, w)
if !ok {
return
}
t, err := template.ParseFiles("templates/feed.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
endpoint.Path = path.Join(endpoint.Path, "checkin/recent")
s := struct{ Name, FeedRequest string }{user.String(), endpoint.String()}
if err := t.Execute(w, s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func feedHandler(w http.ResponseWriter, r *http.Request) {
_, ok := userLoggedIn(r, w)
if !ok {
return
}
var config Config
var err error
c := appengine.NewContext(r)
if config, err = getConfig(c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var untappdOath url.URL
untappdOath.Scheme = "https"
untappdOath.Host = "untappd.com"
untappdOath.Path = "oauth/authenticate/"
q := untappdOath.Query()
q.Add("client_id", config.ClientId)
q.Add("response_type", "code")
q.Add("redirect_url", oauthCallback(c, "untappd").String())
untappdOath.RawQuery = q.Encode()
http.Redirect(w, r, untappdOath.String(), http.StatusFound)
return
}
func writeJson(w rest.ResponseWriter, v interface{}) {
if err := w.WriteJson(v); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func putAdminConfig(w rest.ResponseWriter, r *rest.Request) {
var config Config
c := appengine.NewContext(r.Request)
err := r.DecodeJsonPayload(&config)
if err != nil {
err = fmt.Errorf("DecodeJsonPayload(): %s", err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, err := datastore.Put(c, configKey(c), &config); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
writeJson(w, config)
}
func getAdminConfig(w rest.ResponseWriter, r *rest.Request) {
var config Config
c := appengine.NewContext(r.Request)
if err := datastore.Get(c, configKey(c), &config); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
writeJson(w, config)
}
func getUserMe(w rest.ResponseWriter, r *rest.Request) {
c := appengine.NewContext(r.Request)
u := user.Current(c)
if u == nil {
http.Error(w.(http.ResponseWriter), "not signed in", http.StatusNotFound)
return
}
logoutURL, err := user.LogoutURL(c, r.URL.String())
if err != nil {
http.Error(w.(http.ResponseWriter), err.Error(), http.StatusNotFound)
return
}
writeJson(w, struct {
Name string `json:"name"`
IsAdmin bool `json:"is_admin"`
LogoutURL string `json:"logout_url"`
}{u.String(), user.IsAdmin(c), logoutURL})
}
func getUser(w rest.ResponseWriter, r *rest.Request) {
var user User
if status, err := user.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, user)
}
func getAllUsers(w rest.ResponseWriter, r *rest.Request) {
var users Users
if status, err := users.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(users)
}
func postUser(w rest.ResponseWriter, r *rest.Request) {
user := User{}
err := user.DecodeJsonPayload(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "User", nil)
newKey, err := datastore.Put(c, key, &user)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user.ID = newKey.IntID()
writeJson(w, user)
}
type RestKeyer interface {
DatastoreKey(r *rest.Request) (*datastore.Key, error)
}
func restDelete(w rest.ResponseWriter, r *rest.Request, keyer RestKeyer) {
key, err := keyer.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
err = datastore.Delete(c, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func deleteUser(w rest.ResponseWriter, r *rest.Request) {
restDelete(w, r, User{})
}
func getCellar(w rest.ResponseWriter, r *rest.Request) {
var cellar Cellar
if status, err := cellar.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, cellar)
}
func getAllCellars(w rest.ResponseWriter, r *rest.Request) {
var cellars Cellars
if status, err := cellars.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(cellars)
}
func postCellar(w rest.ResponseWriter, r *rest.Request) {
userKey, err := User{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
cellar := Cellar{}
err = cellar.DecodeJsonPayload(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "Cellar", userKey)
newKey, err := datastore.Put(c, key, &cellar)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
cellar.ID = newKey.IntID()
writeJson(w, cellar)
}
func deleteCellar(w rest.ResponseWriter, r *rest.Request) {
restDelete(w, r, Cellar{})
}
func getBeer(w rest.ResponseWriter, r *rest.Request) {
var beer Beer
if status, err := beer.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
writeJson(w, beer)
}
func getAllBeers(w rest.ResponseWriter, r *rest.Request) {
var beers Beers
if status, err := beers.DatastoreGet(r); err != nil {
rest.Error(w, err.Error(), status)
return
}
w.WriteJson(beers)
}
func postBeer(w rest.ResponseWriter, r *rest.Request) {
cellarKey, err := Cellar{}.DatastoreKey(r)
if err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
beer := Beer{}
if err := beer.DecodeJsonPayload(r); err != nil {
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
c := appengine.NewContext(r.Request)
key := datastore.NewIncompleteKey(c, "Beer", cellarKey)
newKey, err := datastore.Put(c, key, &beer)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
beer.ID = newKey.IntID()
writeJson(w, beer)
}
func deleteBeer(w rest.ResponseWriter, r *rest.Request) {
restDelete(w, r, Beer{})
}
func noAuthUntappdURL(r *http.Request, path string) (url.URL, error) {
c := appengine.NewContext(r)
config, err := getConfig(c)
if err != nil {
return url.URL{}, err
}
res := endpoint
res.RawQuery = r.URL.RawQuery
q := res.Query()
q.Add("client_id", config.ClientId)
q.Add("client_secret", config.ClientSecret)
res.RawQuery = q.Encode()
res.Path += path
return res, nil
}
func untappdNoAuth(w http.ResponseWriter, r *http.Request) {
if err := isAuthorized(r); err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
relPath := strings.TrimPrefix(r.URL.Path, "/api/untappd/noauth")
var reqURL url.URL
c := appengine.NewContext(r)
if r.Method == "GET" {
switch relPath {
case "/search/beer":
var err error
if reqURL, err = noAuthUntappdURL(r, relPath); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
default:
http.NotFound(w, r)
return
}
} else {
http.Error(w, fmt.Sprintf("method %s not found", r.Method), http.StatusInternalServerError)
return
}
client := urlfetch.Client(c)
resp, err := client.Get(reqURL.String())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for k, v := range resp.Header {
w.Header()[k] = v
}
w.WriteHeader(resp.StatusCode)
w.Write(body)
}
|
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bufio"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/fileutil"
)
const (
DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB
pageSize = 32 * 1024 // 32KB
recordHeaderSize = 7
)
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// page is an in memory buffer used to batch disk writes.
// Records bigger than the page size are split and flushed separately.
// A flush is triggered when a single records doesn't fit the page size or
// when the next record can't fit in the remaining free page space.
type page struct {
alloc int
flushed int
buf [pageSize]byte
}
func (p *page) remaining() int {
return pageSize - p.alloc
}
func (p *page) full() bool {
return pageSize-p.alloc < recordHeaderSize
}
// Segment represents a segment file.
type Segment struct {
*os.File
dir string
i int
}
// Index returns the index of the segment.
func (s *Segment) Index() int {
return s.i
}
// Dir returns the directory of the segment.
func (s *Segment) Dir() string {
return s.dir
}
// CorruptionErr is an error that's returned when corruption is encountered.
type CorruptionErr struct {
Dir string
Segment int
Offset int64
Err error
}
func (e *CorruptionErr) Error() string {
if e.Segment < 0 {
return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err)
}
return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err)
}
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
segName := SegmentName(dir, k)
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
// If the last page is torn, fill it with zeros.
// In case it was torn after all records were written successfully, this
// will just pad the page and everything will be fine.
// If it was torn mid-record, a full read (which the caller should do anyway
// to ensure integrity) will detect it as a corruption by the end.
if d := stat.Size() % pageSize; d != 0 {
level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName)
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
f.Close()
return nil, errors.Wrap(err, "zero-pad torn page")
}
}
return &Segment{File: f, i: k, dir: dir}, nil
}
// CreateSegment creates a new segment k in dir.
func CreateSegment(dir string, k int) (*Segment, error) {
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: dir}, nil
}
// OpenReadSegment opens the segment with the given filename.
func OpenReadSegment(fn string) (*Segment, error) {
k, err := strconv.Atoi(filepath.Base(fn))
if err != nil {
return nil, errors.New("not a valid filename")
}
f, err := os.Open(fn)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
}
// WAL is a write ahead log that stores records in segment files.
// It must be read from start to end once before logging new data.
// If an error occurs during read, the repair procedure must be called
// before it's safe to do further writes.
//
// Segments are written to in pages of 32KB, with records possibly split
// across page boundaries.
// Records are never split across segments to allow full segments to be
// safely truncated. It also ensures that torn writes never corrupt records
// beyond the most recent segment.
type WAL struct {
dir string
logger log.Logger
segmentSize int
mtx sync.RWMutex
segment *Segment // Active segment.
donePages int // Pages written to the segment.
page *page // Active page.
stopc chan chan struct{}
actorc chan func()
closed bool // To allow calling Close() more than once without blocking.
fsyncDuration prometheus.Summary
pageFlushes prometheus.Counter
pageCompletions prometheus.Counter
truncateFail prometheus.Counter
truncateTotal prometheus.Counter
}
// New returns a new WAL over the given directory.
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) {
return NewSize(logger, reg, dir, DefaultSegmentSize)
}
// NewSize returns a new WAL over the given directory.
// New segments are created with the specified size.
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) {
if segmentSize%pageSize != 0 {
return nil, errors.New("invalid segment size")
}
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, errors.Wrap(err, "create dir")
}
if logger == nil {
logger = log.NewNopLogger()
}
w := &WAL{
dir: dir,
logger: logger,
segmentSize: segmentSize,
page: &page{},
actorc: make(chan func(), 100),
stopc: make(chan chan struct{}),
}
w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.",
})
w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_page_flushes_total",
Help: "Total number of page flushes.",
})
w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_completed_pages_total",
Help: "Total number of completed pages.",
})
w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_failed_total",
Help: "Total number of WAL truncations that failed.",
})
w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_total",
Help: "Total number of WAL truncations attempted.",
})
if reg != nil {
reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal)
}
_, j, err := w.Segments()
if err != nil {
return nil, errors.Wrap(err, "get segment range")
}
// Fresh dir, no segments yet.
if j == -1 {
segment, err := CreateSegment(w.dir, 0)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
} else {
segment, err := OpenWriteSegment(logger, w.dir, j)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
}
go w.run()
return w, nil
}
// Dir returns the directory of the WAL.
func (w *WAL) Dir() string {
return w.dir
}
func (w *WAL) run() {
Loop:
for {
select {
case f := <-w.actorc:
f()
case donec := <-w.stopc:
close(w.actorc)
defer close(donec)
break Loop
}
}
// Drain and process any remaining functions.
for f := range w.actorc {
f()
}
}
// Repair attempts to repair the WAL based on the error.
// It discards all data after the corruption.
func (w *WAL) Repair(origErr error) error {
// We could probably have a mode that only discards torn records right around
// the corruption to preserve as data much as possible.
// But that's not generally applicable if the records have any kind of causality.
// Maybe as an extra mode in the future if mid-WAL corruptions become
// a frequent concern.
err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
cerr, ok := err.(*CorruptionErr)
if !ok {
return errors.Wrap(origErr, "cannot handle error")
}
if cerr.Segment < 0 {
return errors.New("corruption error does not specify position")
}
level.Warn(w.logger).Log("msg", "starting corruption repair",
"segment", cerr.Segment, "offset", cerr.Offset)
// All segments behind the corruption can no longer be used.
segs, err := listSegments(w.dir)
if err != nil {
return errors.Wrap(err, "list segments")
}
level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment)
for _, s := range segs {
if w.segment.i == s.index {
// The active segment needs to be removed,
// close it first (Windows!). Can be closed safely
// as we set the current segment to repaired file
// below.
if err := w.segment.Close(); err != nil {
return errors.Wrap(err, "close active segment")
}
}
if s.index <= cerr.Segment {
continue
}
if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
return errors.Wrapf(err, "delete segment:%v", s.index)
}
}
// Regardless of the corruption offset, no record reaches into the previous segment.
// So we can safely repair the WAL by removing the segment and re-inserting all
// its records up to the corruption.
level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment)
fn := SegmentName(w.dir, cerr.Segment)
tmpfn := fn + ".repair"
if err := fileutil.Rename(fn, tmpfn); err != nil {
return err
}
// Create a clean segment and make it the active one.
s, err := CreateSegment(w.dir, cerr.Segment)
if err != nil {
return err
}
if err := w.setSegment(s); err != nil {
return err
}
f, err := os.Open(tmpfn)
if err != nil {
return errors.Wrap(err, "open segment")
}
defer f.Close()
r := NewReader(bufio.NewReader(f))
for r.Next() {
// Add records only up to the where the error was.
if r.Offset() >= cerr.Offset {
break
}
if err := w.Log(r.Record()); err != nil {
return errors.Wrap(err, "insert record")
}
}
// We expect an error here from r.Err(), so nothing to handle.
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := f.Close(); err != nil {
return errors.Wrap(err, "close corrupted file")
}
if err := os.Remove(tmpfn); err != nil {
return errors.Wrap(err, "delete corrupted segment")
}
return nil
}
// SegmentName builds a segment name for the directory.
func SegmentName(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf("%08d", i))
}
// nextSegment creates the next segment and closes the previous one.
func (w *WAL) nextSegment() error {
// Only flush the current page if it actually holds data.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
next, err := CreateSegment(w.dir, w.segment.Index()+1)
if err != nil {
return errors.Wrap(err, "create new segment file")
}
prev := w.segment
if err := w.setSegment(next); err != nil {
return err
}
// Don't block further writes by fsyncing the last segment.
w.actorc <- func() {
if err := w.fsync(prev); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := prev.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
}
return nil
}
func (w *WAL) setSegment(segment *Segment) error {
w.segment = segment
// Correctly initialize donePages.
stat, err := segment.Stat()
if err != nil {
return err
}
w.donePages = int(stat.Size() / pageSize)
return nil
}
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If clear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WAL) flushPage(clear bool) error {
w.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
// No more data will fit into the page. Enqueue and clear it.
if clear {
p.alloc = pageSize // Write till end of page.
w.pageCompletions.Inc()
}
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil {
return err
}
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
for i := range p.buf {
p.buf[i] = 0
}
p.alloc = 0
p.flushed = 0
w.donePages++
}
return nil
}
type recType uint8
const (
recPageTerm recType = 0 // Rest of page is empty.
recFull recType = 1 // Full record.
recFirst recType = 2 // First fragment of a record.
recMiddle recType = 3 // Middle fragments of a record.
recLast recType = 4 // Final fragment of a record.
)
func (t recType) String() string {
switch t {
case recPageTerm:
return "zero"
case recFull:
return "full"
case recFirst:
return "first"
case recMiddle:
return "middle"
case recLast:
return "last"
default:
return "<invalid>"
}
}
func (w *WAL) pagesPerSegment() int {
return w.segmentSize / pageSize
}
// Log writes the records into the log.
// Multiple records can be passed at once to reduce writes and increase throughput.
func (w *WAL) Log(recs ...[]byte) error {
w.mtx.Lock()
defer w.mtx.Unlock()
// Callers could just implement their own list record format but adding
// a bit of extra logic here frees them from that overhead.
for i, r := range recs {
if err := w.log(r, i == len(recs)-1); err != nil {
return err
}
}
return nil
}
// log writes rec to the log and forces a flush of the current page if its
// the final record of a batch, the record is bigger than the page size or
// the current page is full.
func (w *WAL) log(rec []byte, final bool) error {
// If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries.
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
if len(rec) > left {
if err := w.nextSegment(); err != nil {
return err
}
}
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(rec) > 0; i++ {
p := w.page
// Find how much of the record we can fit into the page.
var (
l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
part = rec[:l]
buf = p.buf[p.alloc:]
typ recType
)
switch {
case i == 0 && len(part) == len(rec):
typ = recFull
case len(part) == len(rec):
typ = recLast
case i == 0:
typ = recFirst
default:
typ = recMiddle
}
buf[0] = byte(typ)
crc := crc32.Checksum(part, castagnoliTable)
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
binary.BigEndian.PutUint32(buf[3:], crc)
copy(buf[recordHeaderSize:], part)
p.alloc += len(part) + recordHeaderSize
// By definition when a record is split it means its size is bigger than
// the page boundary so the current page would be full and needs to be flushed.
// On contrary if we wrote a full record, we can fit more records of the batch
// into the page before flushing it.
if final || typ != recFull || w.page.full() {
if err := w.flushPage(false); err != nil {
return err
}
}
rec = rec[l:]
}
return nil
}
// Segments returns the range [first, n] of currently existing segments.
// If no segments are found, first and n are -1.
func (w *WAL) Segments() (first, last int, err error) {
refs, err := listSegments(w.dir)
if err != nil {
return 0, 0, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0].index, refs[len(refs)-1].index, nil
}
// Truncate drops all segments before i.
func (w *WAL) Truncate(i int) (err error) {
w.truncateTotal.Inc()
defer func() {
if err != nil {
w.truncateFail.Inc()
}
}()
refs, err := listSegments(w.dir)
if err != nil {
return err
}
for _, r := range refs {
if r.index >= i {
break
}
if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
return err
}
}
return nil
}
func (w *WAL) fsync(f *Segment) error {
start := time.Now()
err := f.File.Sync()
w.fsyncDuration.Observe(time.Since(start).Seconds())
return err
}
// Close flushes all writes and closes active segment.
func (w *WAL) Close() (err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.closed {
return errors.New("wal already closed")
}
// Flush the last page and zero out all its remaining size.
// We must not flush an empty page as it would falsely signal
// the segment is done if we start writing to it again after opening.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
donec := make(chan struct{})
w.stopc <- donec
<-donec
if err = w.fsync(w.segment); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := w.segment.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
w.closed = true
return nil
}
type segmentRef struct {
name string
index int
}
func listSegments(dir string) (refs []segmentRef, err error) {
files, err := fileutil.ReadDir(dir)
if err != nil {
return nil, err
}
var last int
for _, fn := range files {
k, err := strconv.Atoi(fn)
if err != nil {
continue
}
if len(refs) > 0 && k > last+1 {
return nil, errors.New("segments are not sequential")
}
refs = append(refs, segmentRef{name: fn, index: k})
last = k
}
sort.Slice(refs, func(i, j int) bool {
return refs[i].index < refs[j].index
})
return refs, nil
}
// SegmentRange groups segments by the directory and the first and last index it includes.
type SegmentRange struct {
Dir string
First, Last int
}
// NewSegmentsReader returns a new reader over all segments in the directory.
func NewSegmentsReader(dir string) (io.ReadCloser, error) {
return NewSegmentsRangeReader(SegmentRange{dir, -1, -1})
}
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
// If first or last are -1, the range is open on the respective end.
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
var segs []*Segment
for _, sgmRange := range sr {
refs, err := listSegments(sgmRange.Dir)
if err != nil {
return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
}
for _, r := range refs {
if sgmRange.First >= 0 && r.index < sgmRange.First {
continue
}
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
break
}
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
if err != nil {
return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
}
segs = append(segs, s)
}
}
return newSegmentBufReader(segs...), nil
}
// segmentBufReader is a buffered reader that reads in multiples of pages.
// The main purpose is that we are able to track segment and offset for
// corruption reporting. We have to be careful not to increment curr too
// early, as it is used by Reader.Err() to tell Repair which segment is corrupt.
// As such we pad the end of non-page align segments with zeros.
type segmentBufReader struct {
buf *bufio.Reader
segs []*Segment
cur int // Index into segs.
off int // Offset of read data into current segment.
}
func newSegmentBufReader(segs ...*Segment) *segmentBufReader {
return &segmentBufReader{
buf: bufio.NewReaderSize(segs[0], 16*pageSize),
segs: segs,
}
}
func (r *segmentBufReader) Close() (err error) {
for _, s := range r.segs {
if e := s.Close(); e != nil {
err = e
}
}
return err
}
// Read implements io.Reader.
func (r *segmentBufReader) Read(b []byte) (n int, err error) {
n, err = r.buf.Read(b)
r.off += n
// If we succeeded, or hit a non-EOF, we can stop.
if err == nil || err != io.EOF {
return n, err
}
// We hit EOF; fake out zero padding at the end of short segments, so we
// don't increment curr too early and report the wrong segment as corrupt.
if r.off%pageSize != 0 {
i := 0
for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ {
b[n+i] = 0
}
// Return early, even if we didn't fill b.
r.off += i
return n + i, nil
}
// There is no more deta left in the curr segment and there are no more
// segments left. Return EOF.
if r.cur+1 >= len(r.segs) {
return n, io.EOF
}
// Move to next segment.
r.cur++
r.off = 0
r.buf.Reset(r.segs[r.cur])
return n, nil
}
fix wal panic when page flush fails. (#582)
* fix wal panic when page flush fails.
New records should be added to the page only when the last flush
succeeded. Otherwise the page would be full and panics when trying to
add a new record.
Signed-off-by: Krasi Georgiev <92e9bca555ae3f6bf713b708c97f3f6dcff65542@redhat.com>
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bufio"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/fileutil"
)
const (
DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB
pageSize = 32 * 1024 // 32KB
recordHeaderSize = 7
)
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// page is an in memory buffer used to batch disk writes.
// Records bigger than the page size are split and flushed separately.
// A flush is triggered when a single records doesn't fit the page size or
// when the next record can't fit in the remaining free page space.
type page struct {
alloc int
flushed int
buf [pageSize]byte
}
func (p *page) remaining() int {
return pageSize - p.alloc
}
func (p *page) full() bool {
return pageSize-p.alloc < recordHeaderSize
}
// Segment represents a segment file.
type Segment struct {
*os.File
dir string
i int
}
// Index returns the index of the segment.
func (s *Segment) Index() int {
return s.i
}
// Dir returns the directory of the segment.
func (s *Segment) Dir() string {
return s.dir
}
// CorruptionErr is an error that's returned when corruption is encountered.
type CorruptionErr struct {
Dir string
Segment int
Offset int64
Err error
}
func (e *CorruptionErr) Error() string {
if e.Segment < 0 {
return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err)
}
return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err)
}
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
segName := SegmentName(dir, k)
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
// If the last page is torn, fill it with zeros.
// In case it was torn after all records were written successfully, this
// will just pad the page and everything will be fine.
// If it was torn mid-record, a full read (which the caller should do anyway
// to ensure integrity) will detect it as a corruption by the end.
if d := stat.Size() % pageSize; d != 0 {
level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName)
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
f.Close()
return nil, errors.Wrap(err, "zero-pad torn page")
}
}
return &Segment{File: f, i: k, dir: dir}, nil
}
// CreateSegment creates a new segment k in dir.
func CreateSegment(dir string, k int) (*Segment, error) {
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: dir}, nil
}
// OpenReadSegment opens the segment with the given filename.
func OpenReadSegment(fn string) (*Segment, error) {
k, err := strconv.Atoi(filepath.Base(fn))
if err != nil {
return nil, errors.New("not a valid filename")
}
f, err := os.Open(fn)
if err != nil {
return nil, err
}
return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
}
// WAL is a write ahead log that stores records in segment files.
// It must be read from start to end once before logging new data.
// If an error occurs during read, the repair procedure must be called
// before it's safe to do further writes.
//
// Segments are written to in pages of 32KB, with records possibly split
// across page boundaries.
// Records are never split across segments to allow full segments to be
// safely truncated. It also ensures that torn writes never corrupt records
// beyond the most recent segment.
type WAL struct {
dir string
logger log.Logger
segmentSize int
mtx sync.RWMutex
segment *Segment // Active segment.
donePages int // Pages written to the segment.
page *page // Active page.
stopc chan chan struct{}
actorc chan func()
closed bool // To allow calling Close() more than once without blocking.
fsyncDuration prometheus.Summary
pageFlushes prometheus.Counter
pageCompletions prometheus.Counter
truncateFail prometheus.Counter
truncateTotal prometheus.Counter
}
// New returns a new WAL over the given directory.
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) {
return NewSize(logger, reg, dir, DefaultSegmentSize)
}
// NewSize returns a new WAL over the given directory.
// New segments are created with the specified size.
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) {
if segmentSize%pageSize != 0 {
return nil, errors.New("invalid segment size")
}
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, errors.Wrap(err, "create dir")
}
if logger == nil {
logger = log.NewNopLogger()
}
w := &WAL{
dir: dir,
logger: logger,
segmentSize: segmentSize,
page: &page{},
actorc: make(chan func(), 100),
stopc: make(chan chan struct{}),
}
w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.",
})
w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_page_flushes_total",
Help: "Total number of page flushes.",
})
w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_completed_pages_total",
Help: "Total number of completed pages.",
})
w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_failed_total",
Help: "Total number of WAL truncations that failed.",
})
w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_truncations_total",
Help: "Total number of WAL truncations attempted.",
})
if reg != nil {
reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal)
}
_, j, err := w.Segments()
if err != nil {
return nil, errors.Wrap(err, "get segment range")
}
// Fresh dir, no segments yet.
if j == -1 {
segment, err := CreateSegment(w.dir, 0)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
} else {
segment, err := OpenWriteSegment(logger, w.dir, j)
if err != nil {
return nil, err
}
if err := w.setSegment(segment); err != nil {
return nil, err
}
}
go w.run()
return w, nil
}
// Dir returns the directory of the WAL.
func (w *WAL) Dir() string {
return w.dir
}
func (w *WAL) run() {
Loop:
for {
select {
case f := <-w.actorc:
f()
case donec := <-w.stopc:
close(w.actorc)
defer close(donec)
break Loop
}
}
// Drain and process any remaining functions.
for f := range w.actorc {
f()
}
}
// Repair attempts to repair the WAL based on the error.
// It discards all data after the corruption.
func (w *WAL) Repair(origErr error) error {
// We could probably have a mode that only discards torn records right around
// the corruption to preserve as data much as possible.
// But that's not generally applicable if the records have any kind of causality.
// Maybe as an extra mode in the future if mid-WAL corruptions become
// a frequent concern.
err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
cerr, ok := err.(*CorruptionErr)
if !ok {
return errors.Wrap(origErr, "cannot handle error")
}
if cerr.Segment < 0 {
return errors.New("corruption error does not specify position")
}
level.Warn(w.logger).Log("msg", "starting corruption repair",
"segment", cerr.Segment, "offset", cerr.Offset)
// All segments behind the corruption can no longer be used.
segs, err := listSegments(w.dir)
if err != nil {
return errors.Wrap(err, "list segments")
}
level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment)
for _, s := range segs {
if w.segment.i == s.index {
// The active segment needs to be removed,
// close it first (Windows!). Can be closed safely
// as we set the current segment to repaired file
// below.
if err := w.segment.Close(); err != nil {
return errors.Wrap(err, "close active segment")
}
}
if s.index <= cerr.Segment {
continue
}
if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
return errors.Wrapf(err, "delete segment:%v", s.index)
}
}
// Regardless of the corruption offset, no record reaches into the previous segment.
// So we can safely repair the WAL by removing the segment and re-inserting all
// its records up to the corruption.
level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment)
fn := SegmentName(w.dir, cerr.Segment)
tmpfn := fn + ".repair"
if err := fileutil.Rename(fn, tmpfn); err != nil {
return err
}
// Create a clean segment and make it the active one.
s, err := CreateSegment(w.dir, cerr.Segment)
if err != nil {
return err
}
if err := w.setSegment(s); err != nil {
return err
}
f, err := os.Open(tmpfn)
if err != nil {
return errors.Wrap(err, "open segment")
}
defer f.Close()
r := NewReader(bufio.NewReader(f))
for r.Next() {
// Add records only up to the where the error was.
if r.Offset() >= cerr.Offset {
break
}
if err := w.Log(r.Record()); err != nil {
return errors.Wrap(err, "insert record")
}
}
// We expect an error here from r.Err(), so nothing to handle.
// We explicitly close even when there is a defer for Windows to be
// able to delete it. The defer is in place to close it in-case there
// are errors above.
if err := f.Close(); err != nil {
return errors.Wrap(err, "close corrupted file")
}
if err := os.Remove(tmpfn); err != nil {
return errors.Wrap(err, "delete corrupted segment")
}
return nil
}
// SegmentName builds a segment name for the directory.
func SegmentName(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf("%08d", i))
}
// nextSegment creates the next segment and closes the previous one.
func (w *WAL) nextSegment() error {
// Only flush the current page if it actually holds data.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
next, err := CreateSegment(w.dir, w.segment.Index()+1)
if err != nil {
return errors.Wrap(err, "create new segment file")
}
prev := w.segment
if err := w.setSegment(next); err != nil {
return err
}
// Don't block further writes by fsyncing the last segment.
w.actorc <- func() {
if err := w.fsync(prev); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := prev.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
}
return nil
}
func (w *WAL) setSegment(segment *Segment) error {
w.segment = segment
// Correctly initialize donePages.
stat, err := segment.Stat()
if err != nil {
return err
}
w.donePages = int(stat.Size() / pageSize)
return nil
}
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If clear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WAL) flushPage(clear bool) error {
w.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
// No more data will fit into the page or an implicit clear.
// Enqueue and clear it.
if clear {
p.alloc = pageSize // Write till end of page.
}
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil {
return err
}
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
for i := range p.buf {
p.buf[i] = 0
}
p.alloc = 0
p.flushed = 0
w.donePages++
w.pageCompletions.Inc()
}
return nil
}
type recType uint8
const (
recPageTerm recType = 0 // Rest of page is empty.
recFull recType = 1 // Full record.
recFirst recType = 2 // First fragment of a record.
recMiddle recType = 3 // Middle fragments of a record.
recLast recType = 4 // Final fragment of a record.
)
func (t recType) String() string {
switch t {
case recPageTerm:
return "zero"
case recFull:
return "full"
case recFirst:
return "first"
case recMiddle:
return "middle"
case recLast:
return "last"
default:
return "<invalid>"
}
}
func (w *WAL) pagesPerSegment() int {
return w.segmentSize / pageSize
}
// Log writes the records into the log.
// Multiple records can be passed at once to reduce writes and increase throughput.
func (w *WAL) Log(recs ...[]byte) error {
w.mtx.Lock()
defer w.mtx.Unlock()
// Callers could just implement their own list record format but adding
// a bit of extra logic here frees them from that overhead.
for i, r := range recs {
if err := w.log(r, i == len(recs)-1); err != nil {
return err
}
}
return nil
}
// log writes rec to the log and forces a flush of the current page if:
// - the final record of a batch
// - the record is bigger than the page size
// - the current page is full.
func (w *WAL) log(rec []byte, final bool) error {
// When the last page flush failed the page will remain full.
// When the page is full, need to flush it before trying to add more records to it.
if w.page.full() {
if err := w.flushPage(true); err != nil {
return err
}
}
// If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries.
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
if len(rec) > left {
if err := w.nextSegment(); err != nil {
return err
}
}
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(rec) > 0; i++ {
p := w.page
// Find how much of the record we can fit into the page.
var (
l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
part = rec[:l]
buf = p.buf[p.alloc:]
typ recType
)
switch {
case i == 0 && len(part) == len(rec):
typ = recFull
case len(part) == len(rec):
typ = recLast
case i == 0:
typ = recFirst
default:
typ = recMiddle
}
buf[0] = byte(typ)
crc := crc32.Checksum(part, castagnoliTable)
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
binary.BigEndian.PutUint32(buf[3:], crc)
copy(buf[recordHeaderSize:], part)
p.alloc += len(part) + recordHeaderSize
// By definition when a record is split it means its size is bigger than
// the page boundary so the current page would be full and needs to be flushed.
// On contrary if we wrote a full record, we can fit more records of the batch
// into the page before flushing it.
if final || typ != recFull || w.page.full() {
if err := w.flushPage(false); err != nil {
return err
}
}
rec = rec[l:]
}
return nil
}
// Segments returns the range [first, n] of currently existing segments.
// If no segments are found, first and n are -1.
func (w *WAL) Segments() (first, last int, err error) {
refs, err := listSegments(w.dir)
if err != nil {
return 0, 0, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0].index, refs[len(refs)-1].index, nil
}
// Truncate drops all segments before i.
func (w *WAL) Truncate(i int) (err error) {
w.truncateTotal.Inc()
defer func() {
if err != nil {
w.truncateFail.Inc()
}
}()
refs, err := listSegments(w.dir)
if err != nil {
return err
}
for _, r := range refs {
if r.index >= i {
break
}
if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
return err
}
}
return nil
}
func (w *WAL) fsync(f *Segment) error {
start := time.Now()
err := f.File.Sync()
w.fsyncDuration.Observe(time.Since(start).Seconds())
return err
}
// Close flushes all writes and closes active segment.
func (w *WAL) Close() (err error) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.closed {
return errors.New("wal already closed")
}
// Flush the last page and zero out all its remaining size.
// We must not flush an empty page as it would falsely signal
// the segment is done if we start writing to it again after opening.
if w.page.alloc > 0 {
if err := w.flushPage(true); err != nil {
return err
}
}
donec := make(chan struct{})
w.stopc <- donec
<-donec
if err = w.fsync(w.segment); err != nil {
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
}
if err := w.segment.Close(); err != nil {
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
}
w.closed = true
return nil
}
type segmentRef struct {
name string
index int
}
func listSegments(dir string) (refs []segmentRef, err error) {
files, err := fileutil.ReadDir(dir)
if err != nil {
return nil, err
}
var last int
for _, fn := range files {
k, err := strconv.Atoi(fn)
if err != nil {
continue
}
if len(refs) > 0 && k > last+1 {
return nil, errors.New("segments are not sequential")
}
refs = append(refs, segmentRef{name: fn, index: k})
last = k
}
sort.Slice(refs, func(i, j int) bool {
return refs[i].index < refs[j].index
})
return refs, nil
}
// SegmentRange groups segments by the directory and the first and last index it includes.
type SegmentRange struct {
Dir string
First, Last int
}
// NewSegmentsReader returns a new reader over all segments in the directory.
func NewSegmentsReader(dir string) (io.ReadCloser, error) {
return NewSegmentsRangeReader(SegmentRange{dir, -1, -1})
}
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
// If first or last are -1, the range is open on the respective end.
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
var segs []*Segment
for _, sgmRange := range sr {
refs, err := listSegments(sgmRange.Dir)
if err != nil {
return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
}
for _, r := range refs {
if sgmRange.First >= 0 && r.index < sgmRange.First {
continue
}
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
break
}
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
if err != nil {
return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
}
segs = append(segs, s)
}
}
return newSegmentBufReader(segs...), nil
}
// segmentBufReader is a buffered reader that reads in multiples of pages.
// The main purpose is that we are able to track segment and offset for
// corruption reporting. We have to be careful not to increment curr too
// early, as it is used by Reader.Err() to tell Repair which segment is corrupt.
// As such we pad the end of non-page align segments with zeros.
type segmentBufReader struct {
buf *bufio.Reader
segs []*Segment
cur int // Index into segs.
off int // Offset of read data into current segment.
}
func newSegmentBufReader(segs ...*Segment) *segmentBufReader {
return &segmentBufReader{
buf: bufio.NewReaderSize(segs[0], 16*pageSize),
segs: segs,
}
}
func (r *segmentBufReader) Close() (err error) {
for _, s := range r.segs {
if e := s.Close(); e != nil {
err = e
}
}
return err
}
// Read implements io.Reader.
func (r *segmentBufReader) Read(b []byte) (n int, err error) {
n, err = r.buf.Read(b)
r.off += n
// If we succeeded, or hit a non-EOF, we can stop.
if err == nil || err != io.EOF {
return n, err
}
// We hit EOF; fake out zero padding at the end of short segments, so we
// don't increment curr too early and report the wrong segment as corrupt.
if r.off%pageSize != 0 {
i := 0
for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ {
b[n+i] = 0
}
// Return early, even if we didn't fill b.
r.off += i
return n + i, nil
}
// There is no more deta left in the curr segment and there are no more
// segments left. Return EOF.
if r.cur+1 >= len(r.segs) {
return n, io.EOF
}
// Move to next segment.
r.cur++
r.off = 0
r.buf.Reset(r.segs[r.cur])
return n, nil
}
|
package gpio
import (
"container/heap"
"fmt"
"io"
"os"
"syscall"
"time"
)
type watcherAction int
const (
watcherAdd watcherAction = iota
watcherRemove
watcherClose
)
type watcherCmd struct {
pin Pin
action watcherAction
}
// WatcherNotification represents a single pin change
// The new value of the pin numbered by Pin is Value
type WatcherNotification struct {
Pin uint
Value uint
}
type fdHeap []uintptr
func (h fdHeap) Len() int { return len(h) }
// Less is actually greater (we want a max heap)
func (h fdHeap) Less(i, j int) bool { return h[i] > h[j] }
func (h fdHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *fdHeap) Push(x interface{}) {
*h = append(*h, x.(uintptr))
}
func (h *fdHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (h fdHeap) FdSet() *syscall.FdSet {
fdset := &syscall.FdSet{}
for _, val := range h {
fdset.Bits[val/64] |= 1 << uint(val) % 64
}
return fdset
}
const watcherCmdChanLen = 32
const notificationLen = 32
// Watcher provides asynchronous notifications on input changes
// The user should supply it pins to watch with AddPin and then wait for changes with Watch
// Alternately, users may receive directly from the Notification channel
type Watcher struct {
pins map[uintptr]Pin
fds fdHeap
cmdChan chan watcherCmd
Notification chan WatcherNotification
}
// NewWatcher creates a new Watcher instance for asynchronous inputs
func NewWatcher() *Watcher {
w := &Watcher{
pins: make(map[uintptr]Pin),
fds: fdHeap{},
cmdChan: make(chan watcherCmd, watcherCmdChanLen),
Notification: make(chan WatcherNotification, notificationLen),
}
heap.Init(&w.fds)
go w.watch()
return w
}
func (w *Watcher) notify(fdset *syscall.FdSet) {
for _, fd := range w.fds {
if (fdset.Bits[fd/64] & (1 << uint(fd) % 64)) != 0 {
pin := w.pins[fd]
val, err := pin.Read()
if err != nil {
if err == io.EOF {
w.removeFd(fd)
continue
}
fmt.Printf("failed to read pinfile, %s", err)
os.Exit(1)
}
msg := WatcherNotification{
Pin: pin.Number,
Value: val,
}
select {
case w.Notification <- msg:
default:
}
}
}
}
func (w *Watcher) fdSelect() {
timeval := &syscall.Timeval{
Sec: 1,
Usec: 0,
}
fdset := w.fds.FdSet()
changed, err := doSelect(int(w.fds[0]+1), nil, nil, fdset, timeval)
if err != nil {
fmt.Printf("failed to call syscall.Select, %s", err)
os.Exit(1)
}
if changed {
w.notify(fdset)
}
}
func (w *Watcher) addPin(p Pin) {
fd := p.f.Fd()
w.pins[fd] = p
heap.Push(&w.fds, fd)
}
func (w *Watcher) removeFd(fd uintptr) {
// heap operates on an array index, so search heap for fd
for index, v := range w.fds {
if v == fd {
heap.Remove(&w.fds, index)
break
}
}
pin := w.pins[fd]
pin.f.Close()
delete(w.pins, fd)
}
// removePin is only a wrapper around removeFd
// it finds fd given pin and then calls removeFd
func (w *Watcher) removePin(p Pin) {
// we don't index by pin, so go looking
for fd, pin := range w.pins {
if pin.Number == p.Number {
// found pin
w.removeFd(fd)
return
}
}
}
func (w *Watcher) doCmd(cmd watcherCmd) (shouldContinue bool) {
shouldContinue = true
switch cmd.action {
case watcherAdd:
w.addPin(cmd.pin)
case watcherRemove:
w.removePin(cmd.pin)
case watcherClose:
shouldContinue = false
}
return shouldContinue
}
func (w *Watcher) recv() (shouldContinue bool) {
for {
select {
case cmd := <-w.cmdChan:
shouldContinue = w.doCmd(cmd)
if !shouldContinue {
return
}
default:
shouldContinue = true
return
}
}
}
func (w *Watcher) watch() {
for {
// first we do a syscall.select with timeout if we have any fds to check
if len(w.fds) != 0 {
w.fdSelect()
} else {
// so that we don't churn when the fdset is empty, sleep as if in select call
time.Sleep(1 * time.Second)
}
if w.recv() == false {
return
}
}
}
// AddPin adds a new pin to be watched for changes
// The pin provided should be the pin known by the kernel
func (w *Watcher) AddPin(p uint) {
pin := NewInput(p)
setEdgeTrigger(pin, edgeBoth)
w.cmdChan <- watcherCmd{
pin: pin,
action: watcherAdd,
}
}
// RemovePin stops the watcher from watching the specified pin
func (w *Watcher) RemovePin(p uint) {
pin := Pin{
Number: p,
}
w.cmdChan <- watcherCmd{
pin: pin,
action: watcherRemove,
}
}
// Watch blocks until one change occurs on one of the watched pins
// It returns the pin which changed and its new value
// Because the Watcher is not perfectly realtime it may miss very high frequency changes
// If that happens, it's possible to see consecutive changes with the same value
// Also, if the input is connected to a mechanical switch, the user of this library must deal with debouncing
// Users can either use Watch() or receive from Watcher.Notification directly
func (w *Watcher) Watch() (p uint, v uint) {
notification := <-w.Notification
return notification.Pin, notification.Value
}
// Close stops the watcher and releases all resources
func (w *Watcher) Close() {
w.cmdChan <- watcherCmd{
pin: Pin{},
action: watcherClose,
}
}
Fix watcher bug (incorrect fdset when watching with select)
package gpio
import (
"container/heap"
"fmt"
"io"
"os"
"syscall"
"time"
)
type watcherAction int
const (
watcherAdd watcherAction = iota
watcherRemove
watcherClose
)
type watcherCmd struct {
pin Pin
action watcherAction
}
// WatcherNotification represents a single pin change
// The new value of the pin numbered by Pin is Value
type WatcherNotification struct {
Pin uint
Value uint
}
type fdHeap []uintptr
func (h fdHeap) Len() int { return len(h) }
// Less is actually greater (we want a max heap)
func (h fdHeap) Less(i, j int) bool { return h[i] > h[j] }
func (h fdHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *fdHeap) Push(x interface{}) {
*h = append(*h, x.(uintptr))
}
func (h *fdHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (h fdHeap) FdSet() *syscall.FdSet {
fdset := &syscall.FdSet{}
for _, val := range h {
fdset.Bits[val/64] |= 1 << (uint(val) % 64)
}
return fdset
}
const watcherCmdChanLen = 32
const notificationLen = 32
// Watcher provides asynchronous notifications on input changes
// The user should supply it pins to watch with AddPin and then wait for changes with Watch
// Alternately, users may receive directly from the Notification channel
type Watcher struct {
pins map[uintptr]Pin
fds fdHeap
cmdChan chan watcherCmd
Notification chan WatcherNotification
}
// NewWatcher creates a new Watcher instance for asynchronous inputs
func NewWatcher() *Watcher {
w := &Watcher{
pins: make(map[uintptr]Pin),
fds: fdHeap{},
cmdChan: make(chan watcherCmd, watcherCmdChanLen),
Notification: make(chan WatcherNotification, notificationLen),
}
heap.Init(&w.fds)
go w.watch()
return w
}
func (w *Watcher) notify(fdset *syscall.FdSet) {
for _, fd := range w.fds {
if (fdset.Bits[fd/64] & (1 << (uint(fd) % 64))) != 0 {
pin := w.pins[fd]
val, err := pin.Read()
if err != nil {
if err == io.EOF {
w.removeFd(fd)
continue
}
fmt.Printf("failed to read pinfile, %s", err)
os.Exit(1)
}
msg := WatcherNotification{
Pin: pin.Number,
Value: val,
}
select {
case w.Notification <- msg:
default:
}
}
}
}
func (w *Watcher) fdSelect() {
timeval := &syscall.Timeval{
Sec: 1,
Usec: 0,
}
fdset := w.fds.FdSet()
changed, err := doSelect(int(w.fds[0])+1, nil, nil, fdset, timeval)
if err != nil {
fmt.Printf("failed to call syscall.Select, %s", err)
os.Exit(1)
}
if changed {
w.notify(fdset)
}
}
func (w *Watcher) addPin(p Pin) {
fd := p.f.Fd()
w.pins[fd] = p
heap.Push(&w.fds, fd)
}
func (w *Watcher) removeFd(fd uintptr) {
// heap operates on an array index, so search heap for fd
for index, v := range w.fds {
if v == fd {
heap.Remove(&w.fds, index)
break
}
}
pin := w.pins[fd]
pin.f.Close()
delete(w.pins, fd)
}
// removePin is only a wrapper around removeFd
// it finds fd given pin and then calls removeFd
func (w *Watcher) removePin(p Pin) {
// we don't index by pin, so go looking
for fd, pin := range w.pins {
if pin.Number == p.Number {
// found pin
w.removeFd(fd)
return
}
}
}
func (w *Watcher) doCmd(cmd watcherCmd) (shouldContinue bool) {
shouldContinue = true
switch cmd.action {
case watcherAdd:
w.addPin(cmd.pin)
case watcherRemove:
w.removePin(cmd.pin)
case watcherClose:
shouldContinue = false
}
return shouldContinue
}
func (w *Watcher) recv() (shouldContinue bool) {
for {
select {
case cmd := <-w.cmdChan:
shouldContinue = w.doCmd(cmd)
if !shouldContinue {
return
}
default:
shouldContinue = true
return
}
}
}
func (w *Watcher) watch() {
for {
// first we do a syscall.select with timeout if we have any fds to check
if len(w.fds) != 0 {
w.fdSelect()
} else {
// so that we don't churn when the fdset is empty, sleep as if in select call
time.Sleep(1 * time.Second)
}
if w.recv() == false {
return
}
}
}
// AddPin adds a new pin to be watched for changes
// The pin provided should be the pin known by the kernel
func (w *Watcher) AddPin(p uint) {
pin := NewInput(p)
setEdgeTrigger(pin, edgeBoth)
w.cmdChan <- watcherCmd{
pin: pin,
action: watcherAdd,
}
}
// RemovePin stops the watcher from watching the specified pin
func (w *Watcher) RemovePin(p uint) {
pin := Pin{
Number: p,
}
w.cmdChan <- watcherCmd{
pin: pin,
action: watcherRemove,
}
}
// Watch blocks until one change occurs on one of the watched pins
// It returns the pin which changed and its new value
// Because the Watcher is not perfectly realtime it may miss very high frequency changes
// If that happens, it's possible to see consecutive changes with the same value
// Also, if the input is connected to a mechanical switch, the user of this library must deal with debouncing
// Users can either use Watch() or receive from Watcher.Notification directly
func (w *Watcher) Watch() (p uint, v uint) {
notification := <-w.Notification
return notification.Pin, notification.Value
}
// Close stops the watcher and releases all resources
func (w *Watcher) Close() {
w.cmdChan <- watcherCmd{
pin: Pin{},
action: watcherClose,
}
}
|
package main
import (
"fmt"
"github.com/longears/sortpixels/myimage"
"github.com/longears/sortpixels/utils"
"os"
"runtime"
"runtime/debug"
"strings"
)
// How many times to repeat the vertical & horizontal sort step
const N_SORTS = 6
// How many threads to run in parallel
var THREADPOOL_SIZE int
func init() {
THREADPOOL_SIZE = runtime.NumCPU()
runtime.GOMAXPROCS(runtime.NumCPU())
}
//================================================================================
// IMAGE MODIFICATION ALGORITHMS
// Read the image from the path inFn,
// sort the pixels,
// and save the result to the path outFn.
// Return an error if the input file is not decodable as an image.
func sortPixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" sorting")
for ii := 0; ii < N_SORTS; ii++ {
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SortRows("h2", THREADPOOL_SIZE)
}
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SaveAs(outFn)
}
func congregatePixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" congregating")
for ii := 0; ii < 100; ii++ {
myImage.Congregate(10, 1.0)
tempFn := outFn + "." + fmt.Sprintf("%03d", ii) + ".jpg"
fmt.Println(tempFn)
myImage.SaveAs(tempFn)
}
myImage.SaveAs(outFn)
}
//================================================================================
// MAIN
func main() {
fmt.Println("------------------------------------------------------------\\")
defer fmt.Println("------------------------------------------------------------/")
// handle command line
if len(os.Args) < 2 {
fmt.Println()
fmt.Println(" usage: sort input.png [input2.jpg input3.png ...]")
fmt.Println()
fmt.Println(" Sort the pixels in the image(s) and save to the ./output/ folder.")
fmt.Println()
return
}
// make output directory if needed
if !utils.PathExists("output") {
err := os.Mkdir("output", 0755)
if err != nil {
panic(fmt.Sprintf("%v", err))
}
}
// open, sort, and save input images
for inputII := 1; inputII < len(os.Args); inputII++ {
inFn := os.Args[inputII]
// build outFn from inFn
outFn := inFn
if strings.Contains(outFn, ".") {
dotii := strings.LastIndex(outFn, ".")
outFn = outFn[:dotii] + ".sorted." + outFn[dotii+1:]
} else {
outFn += ".sorted"
}
if strings.Contains(outFn, "/") {
outFn = outFn[strings.LastIndex(outFn, "/")+1:]
}
outFn = "output/" + outFn
// read, sort, and save (unless file has already been sorted)
fmt.Println(inFn)
if utils.PathExists(outFn) {
fmt.Println(" SKIPPING: already exists")
} else {
//sortPixels(inFn, outFn)
congregatePixels(inFn, outFn)
}
// attempt to give memory back to the OS
debug.FreeOSMemory()
fmt.Println()
}
}
Scramble image before congregating
package main
import (
"fmt"
"github.com/longears/sortpixels/myimage"
"github.com/longears/sortpixels/utils"
"os"
"runtime"
"runtime/debug"
"strings"
)
// How many times to repeat the vertical & horizontal sort step
const N_SORTS = 6
// How many threads to run in parallel
var THREADPOOL_SIZE int
func init() {
THREADPOOL_SIZE = runtime.NumCPU()
runtime.GOMAXPROCS(runtime.NumCPU())
}
//================================================================================
// IMAGE MODIFICATION ALGORITHMS
// Read the image from the path inFn,
// sort the pixels,
// and save the result to the path outFn.
// Return an error if the input file is not decodable as an image.
func sortPixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" sorting")
for ii := 0; ii < N_SORTS; ii++ {
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SortRows("h2", THREADPOOL_SIZE)
}
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SaveAs(outFn)
}
func congregatePixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" scrambling")
myImage.SortColumns("random", THREADPOOL_SIZE)
myImage.SortRows("random", THREADPOOL_SIZE)
fmt.Println(" congregating")
for ii := 0; ii < 100; ii++ {
myImage.Congregate(10, 1.0)
tempFn := outFn + "." + fmt.Sprintf("%03d", ii) + ".jpg"
fmt.Println(tempFn)
myImage.SaveAs(tempFn)
}
myImage.SaveAs(outFn)
}
//================================================================================
// MAIN
func main() {
fmt.Println("------------------------------------------------------------\\")
defer fmt.Println("------------------------------------------------------------/")
// handle command line
if len(os.Args) < 2 {
fmt.Println()
fmt.Println(" usage: sort input.png [input2.jpg input3.png ...]")
fmt.Println()
fmt.Println(" Sort the pixels in the image(s) and save to the ./output/ folder.")
fmt.Println()
return
}
// make output directory if needed
if !utils.PathExists("output") {
err := os.Mkdir("output", 0755)
if err != nil {
panic(fmt.Sprintf("%v", err))
}
}
// open, sort, and save input images
for inputII := 1; inputII < len(os.Args); inputII++ {
inFn := os.Args[inputII]
// build outFn from inFn
outFn := inFn
if strings.Contains(outFn, ".") {
dotii := strings.LastIndex(outFn, ".")
outFn = outFn[:dotii] + ".sorted." + outFn[dotii+1:]
} else {
outFn += ".sorted"
}
if strings.Contains(outFn, "/") {
outFn = outFn[strings.LastIndex(outFn, "/")+1:]
}
outFn = "output/" + outFn
// read, sort, and save (unless file has already been sorted)
fmt.Println(inFn)
if utils.PathExists(outFn) {
fmt.Println(" SKIPPING: already exists")
} else {
//sortPixels(inFn, outFn)
congregatePixels(inFn, outFn)
}
// attempt to give memory back to the OS
debug.FreeOSMemory()
fmt.Println()
}
}
|
package main
import (
"fmt"
"github.com/longears/sortpixels/myimage"
"github.com/longears/sortpixels/utils"
"os"
"runtime"
"runtime/debug"
"strings"
)
// How many times to repeat the vertical & horizontal sort step
const N_SORTS = 6
// How many threads to run in parallel
var THREADPOOL_SIZE int
func init() {
THREADPOOL_SIZE = runtime.NumCPU()
runtime.GOMAXPROCS(runtime.NumCPU())
}
//================================================================================
// IMAGE MODIFICATION ALGORITHMS
// Read the image from the path inFn,
// sort the pixels,
// and save the result to the path outFn.
// Return an error if the input file is not decodable as an image.
func sortPixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" sorting")
for ii := 0; ii < N_SORTS; ii++ {
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SortRows("h2", THREADPOOL_SIZE)
}
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SaveAs(outFn)
}
func congregatePixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
////fmt.Println(" scrambling")
////myImage.SortColumns("random", THREADPOOL_SIZE)
////myImage.SortRows("random", THREADPOOL_SIZE)
fmt.Println(" congregating")
for ii := 0; ii < 5; ii++ {
myImage.Congregate(7, 50) // thumb size in pixels, percent of image visited per iteration
tempFn := outFn + "." + fmt.Sprintf("%03d", ii) + ".png"
fmt.Println(tempFn)
//myImage.SaveAs(tempFn)
}
//fmt.Println(" showing thumb")
//myImage.ShowThumb(0.015)
myImage.SaveAs(outFn)
}
//================================================================================
// MAIN
func main() {
fmt.Println("------------------------------------------------------------\\")
defer fmt.Println("------------------------------------------------------------/")
// handle command line
if len(os.Args) < 2 {
fmt.Println()
fmt.Println(" usage: sort input.png [input2.jpg input3.png ...]")
fmt.Println()
fmt.Println(" Sort the pixels in the image(s) and save to the ./output/ folder.")
fmt.Println()
return
}
// make output directory if needed
if !utils.PathExists("output") {
err := os.Mkdir("output", 0755)
if err != nil {
panic(fmt.Sprintf("%v", err))
}
}
// open, sort, and save input images
for inputII := 1; inputII < len(os.Args); inputII++ {
inFn := os.Args[inputII]
// build outFn from inFn
outFn := inFn
if strings.Contains(outFn, ".") {
dotii := strings.LastIndex(outFn, ".")
outFn = outFn[:dotii] + ".sorted.png"
} else {
outFn += ".sorted"
}
if strings.Contains(outFn, "/") {
outFn = outFn[strings.LastIndex(outFn, "/")+1:]
}
outFn = "output/" + outFn
// read, sort, and save (unless file has already been sorted)
fmt.Println(inFn)
if utils.PathExists(outFn) {
fmt.Println(" SKIPPING: already exists")
} else {
//sortPixels(inFn, outFn)
congregatePixels(inFn, outFn)
}
// attempt to give memory back to the OS
debug.FreeOSMemory()
fmt.Println()
}
}
Parameter tweak
package main
import (
"fmt"
"github.com/longears/sortpixels/myimage"
"github.com/longears/sortpixels/utils"
"os"
"runtime"
"runtime/debug"
"strings"
)
// How many times to repeat the vertical & horizontal sort step
const N_SORTS = 6
// How many threads to run in parallel
var THREADPOOL_SIZE int
func init() {
THREADPOOL_SIZE = runtime.NumCPU()
runtime.GOMAXPROCS(runtime.NumCPU())
}
//================================================================================
// IMAGE MODIFICATION ALGORITHMS
// Read the image from the path inFn,
// sort the pixels,
// and save the result to the path outFn.
// Return an error if the input file is not decodable as an image.
func sortPixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
fmt.Println(" sorting")
for ii := 0; ii < N_SORTS; ii++ {
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SortRows("h2", THREADPOOL_SIZE)
}
myImage.SortColumns("v", THREADPOOL_SIZE)
myImage.SaveAs(outFn)
}
func congregatePixels(inFn, outFn string) {
myImage := myimage.MakeMyImageFromPath(inFn)
////fmt.Println(" scrambling")
////myImage.SortColumns("random", THREADPOOL_SIZE)
////myImage.SortRows("random", THREADPOOL_SIZE)
fmt.Println(" congregating")
for ii := 0; ii < 10; ii++ {
myImage.Congregate(5, 25) // thumb size in pixels, percent of image visited per iteration
tempFn := outFn + "." + fmt.Sprintf("%03d", ii) + ".png"
fmt.Println(tempFn)
//myImage.SaveAs(tempFn)
}
//fmt.Println(" showing thumb")
//myImage.ShowThumb(0.015)
myImage.SaveAs(outFn)
}
//================================================================================
// MAIN
func main() {
fmt.Println("------------------------------------------------------------\\")
defer fmt.Println("------------------------------------------------------------/")
// handle command line
if len(os.Args) < 2 {
fmt.Println()
fmt.Println(" usage: sort input.png [input2.jpg input3.png ...]")
fmt.Println()
fmt.Println(" Sort the pixels in the image(s) and save to the ./output/ folder.")
fmt.Println()
return
}
// make output directory if needed
if !utils.PathExists("output") {
err := os.Mkdir("output", 0755)
if err != nil {
panic(fmt.Sprintf("%v", err))
}
}
// open, sort, and save input images
for inputII := 1; inputII < len(os.Args); inputII++ {
inFn := os.Args[inputII]
// build outFn from inFn
outFn := inFn
if strings.Contains(outFn, ".") {
dotii := strings.LastIndex(outFn, ".")
outFn = outFn[:dotii] + ".sorted.png"
} else {
outFn += ".sorted"
}
if strings.Contains(outFn, "/") {
outFn = outFn[strings.LastIndex(outFn, "/")+1:]
}
outFn = "output/" + outFn
// read, sort, and save (unless file has already been sorted)
fmt.Println(inFn)
if utils.PathExists(outFn) {
fmt.Println(" SKIPPING: already exists")
} else {
//sortPixels(inFn, outFn)
congregatePixels(inFn, outFn)
}
// attempt to give memory back to the OS
debug.FreeOSMemory()
fmt.Println()
}
}
|
/*
* Minio Client (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/gob"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/minio/mc/pkg/client"
"github.com/minio/minio/pkg/probe"
)
type sortedList struct {
name string
file *os.File
dec *gob.Decoder
enc *gob.Encoder
current client.Content
}
func getSortedListDir() (string, *probe.Error) {
configDir, err := getMcConfigDir()
if err != nil {
return "", err.Trace()
}
sortedListDir := filepath.Join(configDir, golbalSortedListDir)
return sortedListDir, nil
}
func createSortedListDir() *probe.Error {
sortedListDir, err := getSortedListDir()
if err != nil {
return err.Trace()
}
if _, err := os.Stat(sortedListDir); err == nil {
return nil
}
if err := os.MkdirAll(sortedListDir, 0700); err != nil {
return probe.NewError(err)
}
return nil
}
// Create create an on disk sorted file from clnt
func (sl *sortedList) Create(clnt client.Client, id string) *probe.Error {
var e error
if err := createSortedListDir(); err != nil {
return err.Trace()
}
sortedListDir, err := getSortedListDir()
if err != nil {
return err.Trace()
}
sl.name = filepath.Join(sortedListDir, id)
sl.file, e = os.OpenFile(sl.name, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if e != nil {
return probe.NewError(e)
}
sl.enc = gob.NewEncoder(sl.file)
sl.dec = gob.NewDecoder(sl.file)
for content := range clnt.List(true) {
if content.Err != nil {
switch err := content.Err.ToGoError().(type) {
case client.ISBrokenSymlink:
// FIXME: send the error to caller using channel
errorIf(content.Err.Trace(), fmt.Sprintf("Skipping broken Symlink ‘%s’.", err.Path))
continue
}
if os.IsNotExist(content.Err.ToGoError()) || os.IsPermission(content.Err.ToGoError()) {
// FIXME: abstract this at fs.go layer
if content.Content != nil {
if content.Content.Type.IsDir() && (content.Content.Type&os.ModeSymlink == os.ModeSymlink) {
continue
}
}
errorIf(content.Err.Trace(), fmt.Sprintf("Skipping ‘%s’.", content.Content.Name))
continue
}
return content.Err.Trace()
}
sl.enc.Encode(*content.Content)
}
if _, err := sl.file.Seek(0, os.SEEK_SET); err != nil {
return probe.NewError(err)
}
return nil
}
// List list the entries from the sorted file
func (sl sortedList) List(recursive bool) <-chan client.ContentOnChannel {
ch := make(chan client.ContentOnChannel)
go func() {
defer close(ch)
for {
var c client.Content
err := sl.dec.Decode(&c)
if err == io.EOF {
break
}
if err != nil {
ch <- client.ContentOnChannel{Content: nil, Err: probe.NewError(err)}
break
}
ch <- client.ContentOnChannel{Content: &c, Err: nil}
}
}()
return ch
}
func (sl *sortedList) Match(source *client.Content) (bool, *probe.Error) {
if len(sl.current.Name) == 0 {
// for the first time read
if err := sl.dec.Decode(&sl.current); err != nil {
if err != io.EOF {
return false, probe.NewError(err)
}
return false, nil
}
}
for {
compare := strings.Compare(source.Name, sl.current.Name)
if compare == 0 {
if source.Type.IsRegular() && sl.current.Type.IsRegular() && source.Size == sl.current.Size {
return true, nil
}
return false, nil
}
if compare < 0 {
return false, nil
}
// assign zero values to fields because if s.current's previous decode had non zero value
// fields it will not be over written if this loop's decode does not contain those fields
sl.current = client.Content{}
if err := sl.dec.Decode(&sl.current); err != nil {
return false, probe.NewError(err)
}
}
}
// Delete close and delete the ondisk file
func (sl sortedList) Delete() *probe.Error {
if err := sl.file.Close(); err != nil {
return probe.NewError(err)
}
if err := os.Remove(sl.name); err != nil {
return probe.NewError(err)
}
return nil
}
In case of errors not all the cases have content.Content as non nil - fixes #1166
/*
* Minio Client (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/gob"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/minio/mc/pkg/client"
"github.com/minio/minio/pkg/probe"
)
type sortedList struct {
name string
file *os.File
dec *gob.Decoder
enc *gob.Encoder
current client.Content
}
func getSortedListDir() (string, *probe.Error) {
configDir, err := getMcConfigDir()
if err != nil {
return "", err.Trace()
}
sortedListDir := filepath.Join(configDir, golbalSortedListDir)
return sortedListDir, nil
}
func createSortedListDir() *probe.Error {
sortedListDir, err := getSortedListDir()
if err != nil {
return err.Trace()
}
if _, err := os.Stat(sortedListDir); err == nil {
return nil
}
if err := os.MkdirAll(sortedListDir, 0700); err != nil {
return probe.NewError(err)
}
return nil
}
// Create create an on disk sorted file from clnt
func (sl *sortedList) Create(clnt client.Client, id string) *probe.Error {
var e error
if err := createSortedListDir(); err != nil {
return err.Trace()
}
sortedListDir, err := getSortedListDir()
if err != nil {
return err.Trace()
}
sl.name = filepath.Join(sortedListDir, id)
sl.file, e = os.OpenFile(sl.name, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if e != nil {
return probe.NewError(e)
}
sl.enc = gob.NewEncoder(sl.file)
sl.dec = gob.NewDecoder(sl.file)
for content := range clnt.List(true) {
if content.Err != nil {
switch err := content.Err.ToGoError().(type) {
case client.ISBrokenSymlink:
// FIXME: send the error to caller using channel
errorIf(content.Err.Trace(), fmt.Sprintf("Skipping broken Symlink ‘%s’.", err.Path))
continue
}
if os.IsNotExist(content.Err.ToGoError()) || os.IsPermission(content.Err.ToGoError()) {
// FIXME: abstract this at fs.go layer
if content.Content != nil {
if content.Content.Type.IsDir() && (content.Content.Type&os.ModeSymlink == os.ModeSymlink) {
continue
}
errorIf(content.Err.Trace(), fmt.Sprintf("Skipping ‘%s’.", content.Content.Name))
continue
}
errorIf(content.Err.Trace(), "Skipping unknown file.")
continue
}
return content.Err.Trace()
}
sl.enc.Encode(*content.Content)
}
if _, err := sl.file.Seek(0, os.SEEK_SET); err != nil {
return probe.NewError(err)
}
return nil
}
// List list the entries from the sorted file
func (sl sortedList) List(recursive bool) <-chan client.ContentOnChannel {
ch := make(chan client.ContentOnChannel)
go func() {
defer close(ch)
for {
var c client.Content
err := sl.dec.Decode(&c)
if err == io.EOF {
break
}
if err != nil {
ch <- client.ContentOnChannel{Content: nil, Err: probe.NewError(err)}
break
}
ch <- client.ContentOnChannel{Content: &c, Err: nil}
}
}()
return ch
}
func (sl *sortedList) Match(source *client.Content) (bool, *probe.Error) {
if len(sl.current.Name) == 0 {
// for the first time read
if err := sl.dec.Decode(&sl.current); err != nil {
if err != io.EOF {
return false, probe.NewError(err)
}
return false, nil
}
}
for {
compare := strings.Compare(source.Name, sl.current.Name)
if compare == 0 {
if source.Type.IsRegular() && sl.current.Type.IsRegular() && source.Size == sl.current.Size {
return true, nil
}
return false, nil
}
if compare < 0 {
return false, nil
}
// assign zero values to fields because if s.current's previous decode had non zero value
// fields it will not be over written if this loop's decode does not contain those fields
sl.current = client.Content{}
if err := sl.dec.Decode(&sl.current); err != nil {
return false, probe.NewError(err)
}
}
}
// Delete close and delete the ondisk file
func (sl sortedList) Delete() *probe.Error {
if err := sl.file.Close(); err != nil {
return probe.NewError(err)
}
if err := os.Remove(sl.name); err != nil {
return probe.NewError(err)
}
return nil
}
|
package inputsocket
import (
"bufio"
"context"
"io"
"net"
"os"
reuse "github.com/libp2p/go-reuseport"
"github.com/tsaikd/KDGoLib/errutil"
codecjson "github.com/tsaikd/gogstash/codec/json"
"github.com/tsaikd/gogstash/config"
"github.com/tsaikd/gogstash/config/goglog"
"github.com/tsaikd/gogstash/config/logevent"
"golang.org/x/sync/errgroup"
)
// ModuleName is the name used in config file
const ModuleName = "socket"
// ErrorTag tag added to event when process module failed
const ErrorTag = "gogstash_input_socket_error"
// InputConfig holds the configuration json fields and internal objects
type InputConfig struct {
config.InputConfig
Socket string `json:"socket"` // Type of socket, must be one of ["tcp", "udp", "unix", "unixpacket"].
// For TCP or UDP, address must have the form `host:port`.
// For Unix networks, the address must be a file system path.
Address string `json:"address"`
ReusePort bool `json:"reuseport"`
}
// DefaultInputConfig returns an InputConfig struct with default values
func DefaultInputConfig() InputConfig {
return InputConfig{
InputConfig: config.InputConfig{
CommonConfig: config.CommonConfig{
Type: ModuleName,
},
},
}
}
// errors
var (
ErrorUnknownSocketType1 = errutil.NewFactory("%q is not a valid socket type")
ErrorSocketAccept = errutil.NewFactory("socket accept error")
)
// InitHandler initialize the input plugin
func InitHandler(ctx context.Context, raw *config.ConfigRaw) (config.TypeInputConfig, error) {
conf := DefaultInputConfig()
err := config.ReflectConfig(raw, &conf)
if err != nil {
return nil, err
}
conf.Codec, err = config.GetCodecDefault(ctx, *raw, codecjson.ModuleName)
if err != nil {
return nil, err
}
return &conf, nil
}
// Start wraps the actual function starting the plugin
func (i *InputConfig) Start(ctx context.Context, msgChan chan<- logevent.LogEvent) error {
logger := goglog.Logger
var l net.Listener
switch i.Socket {
case "unix", "unixpacket":
// Remove existing unix socket
os.Remove(i.Address)
// Listen to socket
address, err := net.ResolveUnixAddr(i.Socket, i.Address)
if err != nil {
return err
}
logger.Debugf("listen %q on %q", i.Socket, i.Address)
l, err = net.ListenUnix(i.Socket, address)
if err != nil {
return err
}
defer l.Close()
// Set socket permissions.
if err = os.Chmod(i.Address, 0777); err != nil {
return err
}
case "tcp":
address, err := net.ResolveTCPAddr(i.Socket, i.Address)
if err != nil {
return err
}
logger.Debugf("listen %q on %q", i.Socket, address.String())
if i.ReusePort {
l, err = reuse.Listen(i.Socket, address.String())
} else {
l, err = net.ListenTCP(i.Socket, address)
}
if err != nil {
return err
}
defer l.Close()
case "udp":
address, err := net.ResolveUDPAddr(i.Socket, i.Address)
logger.Debugf("listen %q on %q", i.Socket, address.String())
var conn net.PacketConn
if i.ReusePort {
conn, err = reuse.ListenPacket(i.Socket, i.Address)
} else {
conn, err = net.ListenPacket(i.Socket, i.Address)
}
if err != nil {
return err
}
return i.handleUDP(ctx, conn, msgChan)
default:
return ErrorUnknownSocketType1.New(nil, i.Socket)
}
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
select {
case <-ctx.Done():
return l.Close()
}
})
eg.Go(func() error {
for {
conn, err := l.Accept()
if err != nil {
return ErrorSocketAccept.New(err)
}
func(conn net.Conn) {
eg.Go(func() error {
defer conn.Close()
i.parse(ctx, conn, msgChan)
return nil
})
}(conn)
}
})
return eg.Wait()
}
func (i *InputConfig) handleUDP(ctx context.Context, conn net.PacketConn, msgChan chan<- logevent.LogEvent) error {
eg, ctx := errgroup.WithContext(ctx)
b := make([]byte, 1500) // read buf
pr, pw := io.Pipe()
defer pw.Close()
eg.Go(func() error {
select {
case <-ctx.Done():
pr.Close()
conn.Close()
return nil
}
})
eg.Go(func() error {
for {
select {
case <-ctx.Done():
return nil
default:
}
n, _, err := conn.ReadFrom(b)
if err == io.EOF {
break
} else if err != nil {
return err
}
pw.Write(b[:n])
}
return nil
})
eg.Go(func() error {
i.parse(ctx, pr, msgChan)
return nil
})
return eg.Wait()
}
func (i *InputConfig) parse(ctx context.Context, r io.Reader, msgChan chan<- logevent.LogEvent) {
b := bufio.NewReader(r)
for {
select {
case <-ctx.Done():
return
default:
}
line, err := b.ReadBytes('\n')
if err != nil {
// EOF
return
}
i.Codec.Decode(ctx, line, nil, msgChan)
}
}
Add input socket udp buffer size config
package inputsocket
import (
"bufio"
"context"
"io"
"net"
"os"
reuse "github.com/libp2p/go-reuseport"
"github.com/tsaikd/KDGoLib/errutil"
codecjson "github.com/tsaikd/gogstash/codec/json"
"github.com/tsaikd/gogstash/config"
"github.com/tsaikd/gogstash/config/goglog"
"github.com/tsaikd/gogstash/config/logevent"
"golang.org/x/sync/errgroup"
)
// ModuleName is the name used in config file
const ModuleName = "socket"
// ErrorTag tag added to event when process module failed
const ErrorTag = "gogstash_input_socket_error"
// InputConfig holds the configuration json fields and internal objects
type InputConfig struct {
config.InputConfig
Socket string `json:"socket"` // Type of socket, must be one of ["tcp", "udp", "unix", "unixpacket"].
// For TCP or UDP, address must have the form `host:port`.
// For Unix networks, the address must be a file system path.
Address string `json:"address"`
ReusePort bool `json:"reuseport"`
BufferSize int `json:"buffer_size"`
}
// DefaultInputConfig returns an InputConfig struct with default values
func DefaultInputConfig() InputConfig {
return InputConfig{
InputConfig: config.InputConfig{
CommonConfig: config.CommonConfig{
Type: ModuleName,
},
},
BufferSize: 4096,
}
}
// errors
var (
ErrorUnknownSocketType1 = errutil.NewFactory("%q is not a valid socket type")
ErrorSocketAccept = errutil.NewFactory("socket accept error")
)
// InitHandler initialize the input plugin
func InitHandler(ctx context.Context, raw *config.ConfigRaw) (config.TypeInputConfig, error) {
conf := DefaultInputConfig()
err := config.ReflectConfig(raw, &conf)
if err != nil {
return nil, err
}
conf.Codec, err = config.GetCodecDefault(ctx, *raw, codecjson.ModuleName)
if err != nil {
return nil, err
}
return &conf, nil
}
// Start wraps the actual function starting the plugin
func (i *InputConfig) Start(ctx context.Context, msgChan chan<- logevent.LogEvent) error {
logger := goglog.Logger
var l net.Listener
switch i.Socket {
case "unix", "unixpacket":
// Remove existing unix socket
os.Remove(i.Address)
// Listen to socket
address, err := net.ResolveUnixAddr(i.Socket, i.Address)
if err != nil {
return err
}
logger.Debugf("listen %q on %q", i.Socket, i.Address)
l, err = net.ListenUnix(i.Socket, address)
if err != nil {
return err
}
defer l.Close()
// Set socket permissions.
if err = os.Chmod(i.Address, 0777); err != nil {
return err
}
case "tcp":
address, err := net.ResolveTCPAddr(i.Socket, i.Address)
if err != nil {
return err
}
logger.Debugf("listen %q on %q", i.Socket, address.String())
if i.ReusePort {
l, err = reuse.Listen(i.Socket, address.String())
} else {
l, err = net.ListenTCP(i.Socket, address)
}
if err != nil {
return err
}
defer l.Close()
case "udp":
address, err := net.ResolveUDPAddr(i.Socket, i.Address)
logger.Debugf("listen %q on %q", i.Socket, address.String())
var conn net.PacketConn
if i.ReusePort {
conn, err = reuse.ListenPacket(i.Socket, i.Address)
} else {
conn, err = net.ListenPacket(i.Socket, i.Address)
}
if err != nil {
return err
}
return i.handleUDP(ctx, conn, msgChan)
default:
return ErrorUnknownSocketType1.New(nil, i.Socket)
}
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
select {
case <-ctx.Done():
return l.Close()
}
})
eg.Go(func() error {
for {
conn, err := l.Accept()
if err != nil {
return ErrorSocketAccept.New(err)
}
func(conn net.Conn) {
eg.Go(func() error {
defer conn.Close()
i.parse(ctx, conn, msgChan)
return nil
})
}(conn)
}
})
return eg.Wait()
}
func (i *InputConfig) handleUDP(ctx context.Context, conn net.PacketConn, msgChan chan<- logevent.LogEvent) error {
eg, ctx := errgroup.WithContext(ctx)
b := make([]byte, i.BufferSize) // read buf
pr, pw := io.Pipe()
defer pw.Close()
eg.Go(func() error {
select {
case <-ctx.Done():
pr.Close()
conn.Close()
return nil
}
})
eg.Go(func() error {
for {
select {
case <-ctx.Done():
return nil
default:
}
n, _, err := conn.ReadFrom(b)
if err == io.EOF {
break
} else if err != nil {
return err
}
pw.Write(b[:n])
}
return nil
})
eg.Go(func() error {
i.parse(ctx, pr, msgChan)
return nil
})
return eg.Wait()
}
func (i *InputConfig) parse(ctx context.Context, r io.Reader, msgChan chan<- logevent.LogEvent) {
b := bufio.NewReader(r)
for {
select {
case <-ctx.Done():
return
default:
}
line, err := b.ReadBytes('\n')
if err != nil {
// EOF
return
}
i.Codec.Decode(ctx, line, nil, msgChan)
}
}
|
/*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package e2e
import (
"encoding/json"
"io/ioutil"
"os"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
)
var _ = Describe("SBE_E2E", func() {
var (
testDir string
client *docker.Client
network *nwo.Network
chaincode nwo.Chaincode
process ifrit.Process
)
BeforeEach(func() {
var err error
testDir, err = ioutil.TempDir("", "e2e_sbe")
Expect(err).NotTo(HaveOccurred())
client, err = docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
chaincode = nwo.Chaincode{
Name: "mycc",
Version: "0.0",
Path: "github.com/hyperledger/fabric/integration/chaincode/keylevelep/cmd",
Ctor: `{"Args":["init"]}`,
CollectionsConfig: "testdata/collection_config.json",
}
})
AfterEach(func() {
if process != nil {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
os.RemoveAll(testDir)
})
Describe("basic solo network with 2 orgs", func() {
BeforeEach(func() {
network = nwo.New(nwo.BasicSolo(), testDir, client, 30000, components)
network.GenerateConfigTree()
network.Bootstrap()
networkRunner := network.NetworkGroupRunner()
process = ifrit.Invoke(networkRunner)
Eventually(process.Ready()).Should(BeClosed())
})
It("executes a basic solo network with 2 orgs and SBE checks", func() {
By("getting the orderer by name")
orderer := network.Orderer("orderer")
By("setting up the channel")
network.CreateAndJoinChannel(orderer, "testchannel")
By("updating the anchor peers")
network.UpdateChannelAnchors(orderer, "testchannel")
By("deploying the chaincode")
nwo.DeployChaincode(network, "testchannel", orderer, chaincode)
By("deploying a second instance of the chaincode")
chaincode.Name = "mycc2"
nwo.DeployChaincode(network, "testchannel", orderer, chaincode)
RunSBE(network, orderer, "pub")
RunSBE(network, orderer, "priv")
})
})
})
func RunSBE(n *nwo.Network, orderer *nwo.Orderer, mode string) {
peerOrg1 := n.Peer("Org1", "peer0")
peerOrg2 := n.Peer("Org2", "peer0")
By("org1 initializes the key")
sess, err := n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "foo"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("foo"))
By("org1 adds org1 to the state-based ep of a key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["addorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("checking that the modification succeeded through listing the orgs in the ep")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("Org1MSP"))
By("org1 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val1"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
By("org2 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val2"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that setting the value was not succesful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
By("org1 adds org2 to the ep of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["addorgs", "` + mode + `", "Org2MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org1 lists the orgs of the ep to check that both org1 and org2 are there")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
orgs := [2]string{"Org1MSP", "Org2MSP"}
orgsList, err := json.Marshal(orgs)
Expect(err).NotTo(HaveOccurred())
Expect(sess).To(gbytes.Say(string(orgsList)))
By("org2 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val3"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that seting the value was not successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
By("org1 and org2 set the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val4"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org1 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val4"))
By("org2 deletes org1 from the ep of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["delorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 lists the orgs of the key to check that deleting org1 did not succeed")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say(string(orgsList)))
By("org1 and org2 delete org1 from the ep of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["delorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 lists the orgs of the key's ep to check that removing org1 from the ep was successful")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("Org2MSP"))
By("org2 uses cc2cc invocation to set the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc2",
Ctor: `{"Args":["cc2cc", "testchannel", "mycc", "setval", "` + mode + `", "cc2cc_org2"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 reads the value of the key to check that setting it was successful")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("cc2cc_org2"))
By("org1 uses cc2cc to set the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc2",
Ctor: `{"Args":["cc2cc", "testchannel", "mycc", "setval", "` + mode + `", "cc2cc_org1"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
By("org1 reads the value of the key to check that setting it was not successful")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("cc2cc_org2"))
}
[FAB-13593] Ledger synchronization in SBE tests
The SBE integration tests include cases where the ledger is updated,
waiting for the event on one peer and then checking the result on
another peer. This patch adds synchronization that waits until both
peers have the same ledger height, i.e. the transaction has been
delivered to both peers.
This is handled similarly in the private data integration tests.
Change-Id: I6b68d3cf9cfeebf6cddd75003b3a946295fb4392
Signed-off-by: Matthias Neugschwandtner <e570e5dc4777cc7f93f0f6ac0d7f23388ed99ae3@zurich.ibm.com>
/*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package e2e
import (
"encoding/json"
"io/ioutil"
"os"
"strings"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
"github.com/hyperledger/fabric/protos/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
)
var _ = Describe("SBE_E2E", func() {
var (
testDir string
client *docker.Client
network *nwo.Network
chaincode nwo.Chaincode
process ifrit.Process
)
BeforeEach(func() {
var err error
testDir, err = ioutil.TempDir("", "e2e_sbe")
Expect(err).NotTo(HaveOccurred())
client, err = docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
chaincode = nwo.Chaincode{
Name: "mycc",
Version: "0.0",
Path: "github.com/hyperledger/fabric/integration/chaincode/keylevelep/cmd",
Ctor: `{"Args":["init"]}`,
CollectionsConfig: "testdata/collection_config.json",
}
})
AfterEach(func() {
if process != nil {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
os.RemoveAll(testDir)
})
Describe("basic solo network with 2 orgs", func() {
BeforeEach(func() {
network = nwo.New(nwo.BasicSolo(), testDir, client, 30000, components)
network.GenerateConfigTree()
network.Bootstrap()
networkRunner := network.NetworkGroupRunner()
process = ifrit.Invoke(networkRunner)
Eventually(process.Ready()).Should(BeClosed())
})
It("executes a basic solo network with 2 orgs and SBE checks", func() {
By("getting the orderer by name")
orderer := network.Orderer("orderer")
By("setting up the channel")
network.CreateAndJoinChannel(orderer, "testchannel")
By("updating the anchor peers")
network.UpdateChannelAnchors(orderer, "testchannel")
By("deploying the chaincode")
nwo.DeployChaincode(network, "testchannel", orderer, chaincode)
By("deploying a second instance of the chaincode")
chaincode.Name = "mycc2"
nwo.DeployChaincode(network, "testchannel", orderer, chaincode)
RunSBE(network, orderer, "pub")
RunSBE(network, orderer, "priv")
})
})
})
func RunSBE(n *nwo.Network, orderer *nwo.Orderer, mode string) {
peerOrg1 := n.Peer("Org1", "peer0")
peerOrg2 := n.Peer("Org2", "peer0")
By("org1 initializes the key")
sess, err := n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "foo"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
syncLedgerHeights(n, peerOrg1, peerOrg2)
By("org2 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("foo"))
By("org1 adds org1 to the state-based ep of a key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["addorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("checking that the modification succeeded through listing the orgs in the ep")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("Org1MSP"))
By("org1 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val1"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
syncLedgerHeights(n, peerOrg1, peerOrg2)
By("org2 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
By("org2 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val2"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that setting the value was not succesful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
syncLedgerHeights(n, peerOrg2, peerOrg1)
By("org1 adds org2 to the ep of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["addorgs", "` + mode + `", "Org2MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org1 lists the orgs of the ep to check that both org1 and org2 are there")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
orgs := [2]string{"Org1MSP", "Org2MSP"}
orgsList, err := json.Marshal(orgs)
Expect(err).NotTo(HaveOccurred())
Expect(sess).To(gbytes.Say(string(orgsList)))
syncLedgerHeights(n, peerOrg1, peerOrg2)
By("org2 sets the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val3"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 checks that seting the value was not successful by reading it")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val1"))
syncLedgerHeights(n, peerOrg2, peerOrg1)
By("org1 and org2 set the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["setval", "` + mode + `", "val4"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org1 checks that setting the value was successful by reading it")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("val4"))
syncLedgerHeights(n, peerOrg1, peerOrg2)
By("org2 deletes org1 from the ep of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["delorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 lists the orgs of the key to check that deleting org1 did not succeed")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say(string(orgsList)))
syncLedgerHeights(n, peerOrg2, peerOrg1)
By("org1 and org2 delete org1 from the ep of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc",
Ctor: `{"Args":["delorgs", "` + mode + `", "Org1MSP"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 lists the orgs of the key's ep to check that removing org1 from the ep was successful")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["listorgs", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("Org2MSP"))
By("org2 uses cc2cc invocation to set the value of the key")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc2",
Ctor: `{"Args":["cc2cc", "testchannel", "mycc", "setval", "` + mode + `", "cc2cc_org2"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg2, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, time.Minute).Should(gexec.Exit(0))
By("org2 reads the value of the key to check that setting it was successful")
sess, err = n.PeerUserSession(peerOrg2, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("cc2cc_org2"))
syncLedgerHeights(n, peerOrg2, peerOrg1)
By("org1 uses cc2cc to set the value of the key")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeInvoke{
ChannelID: "testchannel",
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: "mycc2",
Ctor: `{"Args":["cc2cc", "testchannel", "mycc", "setval", "` + mode + `", "cc2cc_org1"]}`,
PeerAddresses: []string{
n.PeerAddress(peerOrg1, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
By("org1 reads the value of the key to check that setting it was not successful")
sess, err = n.PeerUserSession(peerOrg1, "User1", commands.ChaincodeQuery{
ChannelID: "testchannel",
Name: "mycc",
Ctor: `{"Args":["getval", "` + mode + `"]}`,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess).To(gbytes.Say("cc2cc_org2"))
}
func getLedgerHeight(n *nwo.Network, peer *nwo.Peer, channelName string) int {
sess, err := n.PeerUserSession(peer, "User1", commands.ChannelInfo{
ChannelID: channelName,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
channelInfoStr := strings.TrimPrefix(string(sess.Buffer().Contents()[:]), "Blockchain info:")
var channelInfo = common.BlockchainInfo{}
json.Unmarshal([]byte(channelInfoStr), &channelInfo)
return int(channelInfo.Height)
}
func syncLedgerHeights(n *nwo.Network, peer1 *nwo.Peer, peer2 *nwo.Peer) {
// get height from peer1
height := getLedgerHeight(n, peer1, "testchannel")
// wait for same height on peer2
Eventually(func() int {
return getLedgerHeight(n, peer2, "testchannel")
}, n.EventuallyTimeout).Should(Equal(height))
}
|
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"reflect"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
respput, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
// overwrite
reqput.Value = []byte("baz")
respput2, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if respput2.Header.Revision <= respput.Header.Revision {
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
respput2.Header.Revision, respput.Header.Revision)
}
if pkv := respput2.PrevKv; pkv == nil || string(pkv.Value) != "bar" {
t.Fatalf("expected PrevKv=bar, got response %+v", respput2)
}
reqrange := &pb.RangeRequest{Key: key}
resprange, err := kvc.Range(context.TODO(), reqrange)
if err != nil {
t.Fatalf("couldn't get key (%v)", err)
}
if len(resprange.Kvs) != 1 {
t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
}
kv := resprange.Kvs[0]
if kv.ModRevision <= kv.CreateRevision {
t.Errorf("expected modRev > createRev, got %d <= %d",
kv.ModRevision, kv.CreateRevision)
}
if !reflect.DeepEqual(reqput.Value, kv.Value) {
t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
}
}
// TestPutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvIdx := rand.Intn(3)
kvc := toGRPC(clus.Client(kvIdx)).KV
stopIdx := kvIdx
for stopIdx == kvIdx {
stopIdx = rand.Intn(3)
}
clus.clients[stopIdx].Close()
clus.Members[stopIdx].Stop(t)
clus.Members[stopIdx].Restart(t)
c, cerr := NewClientV3(clus.Members[stopIdx])
if cerr != nil {
t.Fatalf("cannot create client: %v", cerr)
}
clus.clients[stopIdx] = c
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancel()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
_, err := kvc.Put(ctx, reqput)
if err != nil && err == ctx.Err() {
t.Fatalf("expected grpc error, got local ctx error (%v)", err)
}
}
// TestV3CompactCurrentRev ensures keys are present when compacting on current revision.
func TestV3CompactCurrentRev(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
if _, err := kvc.Put(context.Background(), preq); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
// get key to add to proxy cache, if any
if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil {
t.Fatal(err)
}
// compact on current revision
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// key still exists when linearized?
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatalf("couldn't get key after compaction (%v)", err)
}
// key still exists when serialized?
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo"), Serializable: true})
if err != nil {
t.Fatalf("couldn't get serialized key after compaction (%v)", err)
}
}
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
func TestV3HashKV(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
mvc := toGRPC(clus.RandClient()).Maintenance
for i := 0; i < 10; i++ {
resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))})
if err != nil {
t.Fatal(err)
}
rev := resp.Header.Revision
hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{0})
if err != nil {
t.Fatal(err)
}
if rev != hresp.Header.Revision {
t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision)
}
prevHash := hresp.Hash
prevCompactRev := hresp.CompactRevision
for i := 0; i < 10; i++ {
hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{0})
if err != nil {
t.Fatal(err)
}
if rev != hresp.Header.Revision {
t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision)
}
if prevHash != hresp.Hash {
t.Fatalf("prevHash %v != Hash %v", prevHash, hresp.Hash)
}
if prevCompactRev != hresp.CompactRevision {
t.Fatalf("prevCompactRev %v != CompactRevision %v", prevHash, hresp.Hash)
}
prevHash = hresp.Hash
prevCompactRev = hresp.CompactRevision
}
}
}
func TestV3TxnTooManyOps(t *testing.T) {
defer testutil.AfterTest(t)
maxTxnOps := uint(128)
clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// unique keys
i := new(int)
keyf := func() []byte {
*i++
return []byte(fmt.Sprintf("key-%d", i))
}
addCompareOps := func(txn *pb.TxnRequest) {
txn.Compare = append(txn.Compare,
&pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: keyf(),
})
}
addSuccessOps := func(txn *pb.TxnRequest) {
txn.Success = append(txn.Success,
&pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: keyf(),
Value: []byte("bar"),
},
},
})
}
addFailureOps := func(txn *pb.TxnRequest) {
txn.Failure = append(txn.Failure,
&pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: keyf(),
Value: []byte("bar"),
},
},
})
}
addTxnOps := func(txn *pb.TxnRequest) {
newTxn := &pb.TxnRequest{}
addSuccessOps(newTxn)
txn.Success = append(txn.Success,
&pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: newTxn,
},
},
)
}
tests := []func(txn *pb.TxnRequest){
addCompareOps,
addSuccessOps,
addFailureOps,
addTxnOps,
}
for i, tt := range tests {
txn := &pb.TxnRequest{}
for j := 0; j < int(maxTxnOps+1); j++ {
tt(txn)
}
_, err := kvc.Txn(context.Background(), txn)
if !eqErrGRPC(err, rpctypes.ErrGRPCTooManyOps) {
t.Errorf("#%d: err = %v, want %v", i, err, rpctypes.ErrGRPCTooManyOps)
}
}
}
func TestV3TxnDuplicateKeys(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}}
delKeyReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("abc"),
},
},
}
delInRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("a"), RangeEnd: []byte("b"),
},
},
}
delOutOfRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("abb"), RangeEnd: []byte("abc"),
},
},
}
txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}},
},
}
txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{
Success: []*pb.RequestOp{delInRangeReq},
Failure: []*pb.RequestOp{delInRangeReq}},
},
}
txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}},
},
}
txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{
Success: []*pb.RequestOp{putreq},
Failure: []*pb.RequestOp{putreq}},
},
}
kvc := toGRPC(clus.RandClient()).KV
tests := []struct {
txnSuccess []*pb.RequestOp
werr error
}{
{
txnSuccess: []*pb.RequestOp{putreq, putreq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
{
txnSuccess: []*pb.RequestOp{putreq, delKeyReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
{
txnSuccess: []*pb.RequestOp{putreq, delInRangeReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Put(a), Then(Del(a)))
{
txnSuccess: []*pb.RequestOp{putreq, txnDelReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Del(a), Then(Put(a)))
{
txnSuccess: []*pb.RequestOp{delInRangeReq, txnPutReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then((Then(Put(a)), Else(Put(a))), (Then(Put(a)), Else(Put(a)))
{
txnSuccess: []*pb.RequestOp{txnPutReqTwoSide, txnPutReqTwoSide},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Del(x), (Then(Put(a)), Else(Put(a))))
{
txnSuccess: []*pb.RequestOp{delOutOfRangeReq, txnPutReqTwoSide},
werr: nil,
},
// Then(Then(Del(a)), (Then(Del(a)), Else(Del(a))))
{
txnSuccess: []*pb.RequestOp{txnDelReq, txnDelReqTwoSide},
werr: nil,
},
{
txnSuccess: []*pb.RequestOp{delKeyReq, delInRangeReq, delKeyReq, delInRangeReq},
werr: nil,
},
{
txnSuccess: []*pb.RequestOp{putreq, delOutOfRangeReq},
werr: nil,
},
}
for i, tt := range tests {
txn := &pb.TxnRequest{Success: tt.txnSuccess}
_, err := kvc.Txn(context.Background(), txn)
if !eqErrGRPC(err, tt.werr) {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
}
}
}
// Testv3TxnRevision tests that the transaction header revision is set as expected.
func TestV3TxnRevision(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
t.Fatal(err)
}
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// did not update revision
if presp.Header.Revision != tresp.Header.Revision {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision)
}
txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}}
txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}}
tresp, err = kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// did not update revision
if presp.Header.Revision != tresp.Header.Revision {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision)
}
txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}}
txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}}
tresp, err = kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// updated revision
if tresp.Header.Revision != presp.Header.Revision+1 {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision+1)
}
}
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
// when compared to the Succeeded field in the txn response.
func TestV3TxnCmpHeaderRev(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
for i := 0; i < 10; i++ {
// Concurrently put a key with a txn comparing on it.
revc := make(chan int64, 1)
go func() {
defer close(revc)
pr := &pb.PutRequest{Key: []byte("k"), Value: []byte("v")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
t.Fatal(err)
}
revc <- presp.Header.Revision
}()
// The read-only txn uses the optimized readindex server path.
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{
RequestRange: &pb.RangeRequest{Key: []byte("k")}}}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
// i = 0 /\ Succeeded => put followed txn
cmp := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(i)},
}
txn.Compare = append(txn.Compare, cmp)
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
prev := <-revc
// put followed txn; should eval to false
if prev > tresp.Header.Revision && !tresp.Succeeded {
t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp)
}
// txn follows put; should eval to true
if tresp.Header.Revision >= prev && tresp.Succeeded {
t.Errorf("#%d: got then but put rev %d preceded txn (%+v)", i, prev, tresp)
}
}
}
// TestV3TxnRangeCompare tests range comparisons in txns
func TestV3TxnRangeCompare(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
// put keys, named by expected revision
for _, k := range []string{"/a/2", "/a/3", "/a/4", "/f/5"} {
if _, err := clus.Client(0).Put(context.TODO(), k, "x"); err != nil {
t.Fatal(err)
}
}
tests := []struct {
cmp pb.Compare
wSuccess bool
}{
{
// >= /a/; all create revs fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte{0},
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{6},
},
true,
},
{
// >= /a/; one create rev doesn't fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte{0},
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{5},
},
false,
},
{
// prefix /a/*; all create revs fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{5},
},
true,
},
{
// prefix /a/*; one create rev doesn't fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{4},
},
false,
},
{
// does not exist, does not succeed
pb.Compare{
Key: []byte("/b/"),
RangeEnd: []byte("/b0"),
Target: pb.Compare_VALUE,
Result: pb.Compare_EQUAL,
TargetUnion: &pb.Compare_Value{[]byte("x")},
},
false,
},
{
// all keys are leased
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_LEASE,
Result: pb.Compare_GREATER,
TargetUnion: &pb.Compare_Lease{0},
},
false,
},
{
// no keys are leased
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_LEASE,
Result: pb.Compare_EQUAL,
TargetUnion: &pb.Compare_Lease{0},
},
true,
},
}
kvc := toGRPC(clus.Client(0)).KV
for i, tt := range tests {
txn := &pb.TxnRequest{}
txn.Compare = append(txn.Compare, &tt.cmp)
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
if tt.wSuccess != tresp.Succeeded {
t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded)
}
}
}
// TestV3TxnNested tests nested txns follow paths as expected.
func TestV3TxnNestedPath(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
cmpTrue := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(0)},
}
cmpFalse := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(1)},
}
// generate random path to eval txns
topTxn := &pb.TxnRequest{}
txn := topTxn
txnPath := make([]bool, 10)
for i := range txnPath {
nextTxn := &pb.TxnRequest{}
op := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: nextTxn}}
txnPath[i] = rand.Intn(2) == 0
if txnPath[i] {
txn.Compare = append(txn.Compare, cmpTrue)
txn.Success = append(txn.Success, op)
} else {
txn.Compare = append(txn.Compare, cmpFalse)
txn.Failure = append(txn.Failure, op)
}
txn = nextTxn
}
tresp, err := kvc.Txn(context.TODO(), topTxn)
if err != nil {
t.Fatal(err)
}
curTxnResp := tresp
for i := range txnPath {
if curTxnResp.Succeeded != txnPath[i] {
t.Fatalf("expected path %+v, got response %+v", txnPath, *tresp)
}
curTxnResp = curTxnResp.Responses[0].Response.(*pb.ResponseOp_ResponseTxn).ResponseTxn
}
}
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
func TestV3PutIgnoreValue(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key, val := []byte("foo"), []byte("bar")
putReq := pb.PutRequest{Key: key, Value: val}
// create lease
lc := toGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
tests := []struct {
putFunc func() error
putErr error
wleaseID int64
}{
{ // put failure for non-existent key
func() error {
preq := putReq
preq.IgnoreValue = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
},
{ // txn failure for non-existent key
func() error {
preq := putReq
preq.Value = nil
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
},
{ // put success
func() error {
_, err := kvc.Put(context.TODO(), &putReq)
return err
},
nil,
0,
},
{ // txn success, attach lease
func() error {
preq := putReq
preq.Value = nil
preq.Lease = lresp.ID
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
nil,
lresp.ID,
},
{ // non-empty value with ignore_value should error
func() error {
preq := putReq
preq.IgnoreValue = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCValueProvided,
0,
},
{ // overwrite with previous value, ensure no prev-kv is returned and lease is detached
func() error {
preq := putReq
preq.Value = nil
preq.IgnoreValue = true
presp, err := kvc.Put(context.TODO(), &preq)
if err != nil {
return err
}
if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 {
return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv)
}
return nil
},
nil,
0,
},
{ // revoke lease, ensure detached key doesn't get deleted
func() error {
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
return err
},
nil,
0,
},
}
for i, tt := range tests {
if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) {
t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err)
}
if tt.putErr != nil {
continue
}
rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key})
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if len(rr.Kvs) != 1 {
t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs))
}
if !bytes.Equal(rr.Kvs[0].Value, val) {
t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value)
}
if rr.Kvs[0].Lease != tt.wleaseID {
t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease)
}
}
}
// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites.
func TestV3PutIgnoreLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// create lease
lc := toGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
key, val, val1 := []byte("zoo"), []byte("bar"), []byte("bar1")
putReq := pb.PutRequest{Key: key, Value: val}
tests := []struct {
putFunc func() error
putErr error
wleaseID int64
wvalue []byte
}{
{ // put failure for non-existent key
func() error {
preq := putReq
preq.IgnoreLease = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
nil,
},
{ // txn failure for non-existent key
func() error {
preq := putReq
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
nil,
},
{ // put success
func() error {
preq := putReq
preq.Lease = lresp.ID
_, err := kvc.Put(context.TODO(), &preq)
return err
},
nil,
lresp.ID,
val,
},
{ // txn success, modify value using 'ignore_lease' and ensure lease is not detached
func() error {
preq := putReq
preq.Value = val1
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
nil,
lresp.ID,
val1,
},
{ // non-empty lease with ignore_lease should error
func() error {
preq := putReq
preq.Lease = lresp.ID
preq.IgnoreLease = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCLeaseProvided,
0,
nil,
},
{ // overwrite with previous value, ensure no prev-kv is returned and lease is detached
func() error {
presp, err := kvc.Put(context.TODO(), &putReq)
if err != nil {
return err
}
if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 {
return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv)
}
return nil
},
nil,
0,
val,
},
{ // revoke lease, ensure detached key doesn't get deleted
func() error {
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
return err
},
nil,
0,
val,
},
}
for i, tt := range tests {
if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) {
t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err)
}
if tt.putErr != nil {
continue
}
rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key})
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if len(rr.Kvs) != 1 {
t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs))
}
if !bytes.Equal(rr.Kvs[0].Value, tt.wvalue) {
t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value)
}
if rr.Kvs[0].Lease != tt.wleaseID {
t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease)
}
}
}
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key := []byte("foo")
preq := &pb.PutRequest{Key: key, Lease: 123456}
tests := []func(){
// put case
func() {
if presp, err := kvc.Put(context.TODO(), preq); err == nil {
t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
}
},
// txn success case
func() {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
}
},
// txn failure case
func() {
txn := &pb.TxnRequest{}
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
cmp := &pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
}
txn.Compare = append(txn.Compare, cmp)
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
}
},
// ignore bad lease in failure on success txn
func() {
txn := &pb.TxnRequest{}
rreq := &pb.RangeRequest{Key: []byte("bar")}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
RequestRange: rreq}})
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
}
},
}
for i, f := range tests {
f()
// key shouldn't have been stored
rreq := &pb.RangeRequest{Key: key}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("#%d. could not rangereq (%v)", i, err)
} else if len(rresp.Kvs) != 0 {
t.Errorf("#%d. expected no keys, got %v", i, rresp)
}
}
}
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
keySet []string
begin string
end string
prevKV bool
wantSet [][]byte
deleted int64
}{
// delete middle
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", false,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
// no delete
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "foo/", false,
[][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, 0,
},
// delete first
{
[]string{"foo", "foo/abc", "fop"},
"fo", "fop", false,
[][]byte{[]byte("fop")}, 2,
},
// delete tail
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fos", false,
[][]byte{[]byte("foo")}, 2,
},
// delete exact
{
[]string{"foo", "foo/abc", "fop"},
"foo/abc", "", false,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
// delete none, [x,x)
{
[]string{"foo"},
"foo", "foo", false,
[][]byte{[]byte("foo")}, 0,
},
// delete middle with preserveKVs set
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", true,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := toGRPC(clus.RandClient()).KV
ks := tt.keySet
for j := range ks {
reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
_, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
dreq := &pb.DeleteRangeRequest{
Key: []byte(tt.begin),
RangeEnd: []byte(tt.end),
PrevKv: tt.prevKV,
}
dresp, err := kvc.DeleteRange(context.TODO(), dreq)
if err != nil {
t.Fatalf("couldn't delete range on test %d (%v)", i, err)
}
if tt.deleted != dresp.Deleted {
t.Errorf("expected %d on test %v, got %d", tt.deleted, i, dresp.Deleted)
}
if tt.prevKV {
if len(dresp.PrevKvs) != int(dresp.Deleted) {
t.Errorf("preserve %d keys, want %d", len(dresp.PrevKvs), dresp.Deleted)
}
}
rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("couldn't get range on test %v (%v)", i, err)
}
if dresp.Header.Revision != rresp.Header.Revision {
t.Errorf("expected revision %v, got %v",
dresp.Header.Revision, rresp.Header.Revision)
}
keys := [][]byte{}
for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key)
}
if !reflect.DeepEqual(tt.wantSet, keys) {
t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns.
func TestV3TxnInvalidRange(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 2})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// future rev
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
RequestRange: rreq}})
if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCFutureRev) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev)
}
// compacted rev
tv, _ := txn.Success[1].Request.(*pb.RequestOp_RequestRange)
tv.RequestRange.Revision = 1
if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCCompacted) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCCompacted)
}
}
func TestV3TooLargeRequest(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// 2MB request value
largeV := make([]byte, 2*1024*1024)
preq := &pb.PutRequest{Key: []byte("foo"), Value: largeV}
_, err := kvc.Put(context.Background(), preq)
if !eqErrGRPC(err, rpctypes.ErrGRPCRequestTooLarge) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCRequestTooLarge)
}
}
// TestV3Hash tests hash.
func TestV3Hash(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
cli := clus.RandClient()
kvc := toGRPC(cli).KV
m := toGRPC(cli).Maintenance
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
resp, err := m.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
}
// TestV3HashRestart ensures that hash stays the same after restart.
func TestV3HashRestart(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
hash1 := resp.Hash
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
kvc := toGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
cli = clus.RandClient()
resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
hash2 := resp.Hash
if hash1 != hash2 {
t.Fatalf("hash expected %d, got %d", hash1, hash2)
}
}
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
func TestV3StorageQuotaAPI(t *testing.T) {
defer testutil.AfterTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
key := []byte("abc")
// test small put that fits in quota
smallbuf := make([]byte, 512)
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil {
t.Fatal(err)
}
// test big put
bigbuf := make([]byte, quotasize)
_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})
if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) {
t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
}
// test big txn
puttxn := &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: key,
Value: bigbuf,
},
},
}
txnreq := &pb.TxnRequest{}
txnreq.Success = append(txnreq.Success, puttxn)
_, txnerr := kvc.Txn(context.TODO(), txnreq)
if !eqErrGRPC(txnerr, rpctypes.ErrGRPCNoSpace) {
t.Fatalf("big txn got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
}
}
func TestV3RangeRequest(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
reqs []pb.RangeRequest
wresps [][]string
wmores []bool
}{
// single key
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// exists
{Key: []byte("foo")},
// doesn't exist
{Key: []byte("baz")},
},
[][]string{
{"foo"},
{},
},
[]bool{false, false},
},
// multi-key
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
// all in range
{Key: []byte("a"), RangeEnd: []byte("z")},
// [b, d)
{Key: []byte("b"), RangeEnd: []byte("d")},
// out of range
{Key: []byte("f"), RangeEnd: []byte("z")},
// [c,c) = empty
{Key: []byte("c"), RangeEnd: []byte("c")},
// [d, b) = empty
{Key: []byte("d"), RangeEnd: []byte("b")},
// ["\0", "\0") => all in range
{Key: []byte{0}, RangeEnd: []byte{0}},
},
[][]string{
{"a", "b", "c", "d", "e"},
{"b", "c"},
{},
{},
{},
{"a", "b", "c", "d", "e"},
},
[]bool{false, false, false, false, false, false},
},
// revision
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
},
[][]string{
{"a", "b", "c", "d", "e"},
{},
{"a"},
{"a", "b"},
},
[]bool{false, false, false, false},
},
// limit
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
// no more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
},
[][]string{
{"bar"},
{"bar", "foo"},
},
[]bool{true, false},
},
// sort
{
[]string{"b", "a", "c", "d", "c"},
[]pb.RangeRequest{
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_MOD,
},
{
Key: []byte("z"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{ // sort ASCEND by default
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 10,
SortOrder: pb.RangeRequest_NONE,
SortTarget: pb.RangeRequest_CREATE,
},
},
[][]string{
{"a"},
{"d"},
{"b"},
{"c"},
{},
{"b", "a", "c", "d"},
},
[]bool{true, true, true, true, false, false},
},
// min/max mod rev
{
[]string{"rev2", "rev3", "rev4", "rev5", "rev6"},
[]pb.RangeRequest{
{
Key: []byte{0}, RangeEnd: []byte{0},
MinModRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxModRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MinModRevision: 3,
MaxModRevision: 5,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxModRevision: 10,
},
},
[][]string{
{"rev3", "rev4", "rev5", "rev6"},
{"rev2", "rev3"},
{"rev3", "rev4", "rev5"},
{"rev2", "rev3", "rev4", "rev5", "rev6"},
},
[]bool{false, false, false, false},
},
// min/max create rev
{
[]string{"rev2", "rev3", "rev2", "rev2", "rev6", "rev3"},
[]pb.RangeRequest{
{
Key: []byte{0}, RangeEnd: []byte{0},
MinCreateRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxCreateRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MinCreateRevision: 3,
MaxCreateRevision: 5,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxCreateRevision: 10,
},
},
[][]string{
{"rev3", "rev6"},
{"rev2", "rev3"},
{"rev3"},
{"rev2", "rev3", "rev6"},
},
[]bool{false, false, false, false},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
for _, k := range tt.putKeys {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
for j, req := range tt.reqs {
kvc := toGRPC(clus.RandClient()).KV
resp, err := kvc.Range(context.TODO(), &req)
if err != nil {
t.Errorf("#%d.%d: Range error: %v", i, j, err)
continue
}
if len(resp.Kvs) != len(tt.wresps[j]) {
t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
continue
}
for k, wKey := range tt.wresps[j] {
respKey := string(resp.Kvs[k].Key)
if respKey != wKey {
t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
}
}
if resp.More != tt.wmores[j] {
t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
}
wrev := int64(len(tt.putKeys) + 1)
if resp.Header.Revision != wrev {
t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
}
}
clus.Terminate(t)
}
}
func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
cfg.UseGRPC = true
clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)}
clus.Launch(t)
return clus
}
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
// nil out TLS field so client will use an insecure connection
clus.Members[0].ClientTLSInfo = nil
client, err := NewClientV3(clus.Members[0])
if err != nil && err != context.DeadlineExceeded {
t.Fatalf("unexpected error (%v)", err)
} else if client == nil {
// Ideally, no client would be returned. However, grpc will
// return a connection without trying to handshake first so
// the connection appears OK.
return
}
defer client.Close()
donec := make(chan error, 1)
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
_, perr := toGRPC(client).KV.Put(ctx, reqput)
cancel()
donec <- perr
}()
if perr := <-donec; perr == nil {
t.Fatalf("expected client error on put")
}
}
// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server.
func TestTLSGRPCRejectSecureClient(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
clus.Members[0].ClientTLSInfo = &testTLSInfo
client, err := NewClientV3(clus.Members[0])
if client != nil || err == nil {
t.Fatalf("expected no client")
} else if err != context.DeadlineExceeded {
t.Fatalf("unexpected error (%v)", err)
}
}
// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS
func TestTLSGRPCAcceptSecureAll(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("expected tls client (%v)", err)
}
defer client.Close()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
t.Fatalf("unexpected error on put over tls (%v)", err)
}
}
// TestTLSReloadAtomicReplace ensures server reloads expired/valid certs
// when all certs are atomically replaced by directory renaming.
// And expects server to reject client requests, and vice versa.
func TestTLSReloadAtomicReplace(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "fixtures-tmp")
if err != nil {
t.Fatal(err)
}
os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpDir)
certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDir)
certsDirExp, err := ioutil.TempDir(os.TempDir(), "fixtures-expired")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDirExp)
cloneFunc := func() transport.TLSInfo {
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil {
t.Fatal(err)
}
return tlsInfo
}
replaceFunc := func() {
if err = os.Rename(certsDir, tmpDir); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDirExp, certsDir); err != nil {
t.Fatal(err)
}
// after rename,
// 'certsDir' contains expired certs
// 'tmpDir' contains valid certs
// 'certsDirExp' does not exist
}
revertFunc := func() {
if err = os.Rename(tmpDir, certsDirExp); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDir, tmpDir); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDirExp, certsDir); err != nil {
t.Fatal(err)
}
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc)
}
// TestTLSReloadCopy ensures server reloads expired/valid certs
// when new certs are copied over, one by one. And expects server
// to reject client requests, and vice versa.
func TestTLSReloadCopy(t *testing.T) {
certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDir)
cloneFunc := func() transport.TLSInfo {
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
return tlsInfo
}
replaceFunc := func() {
if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil {
t.Fatal(err)
}
}
revertFunc := func() {
if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil {
t.Fatal(err)
}
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc)
}
func testTLSReload(t *testing.T, cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func()) {
defer testutil.AfterTest(t)
// 1. separate copies for TLS assets modification
tlsInfo := cloneFunc()
// 2. start cluster with valid certs
clus := NewClusterV3(t, &ClusterConfig{Size: 1, PeerTLS: &tlsInfo, ClientTLS: &tlsInfo})
defer clus.Terminate(t)
// 3. concurrent client dialing while certs become expired
errc := make(chan error, 1)
go func() {
for {
cc, err := tlsInfo.ClientConfig()
if err != nil {
// errors in 'go/src/crypto/tls/tls.go'
// tls: private key does not match public key
// tls: failed to find any PEM data in key input
// tls: failed to find any PEM data in certificate input
// Or 'does not exist', 'not found', etc
t.Log(err)
continue
}
cli, cerr := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: time.Second,
TLS: cc,
})
if cerr != nil {
errc <- cerr
return
}
cli.Close()
}
}()
// 4. replace certs with expired ones
replaceFunc()
// 5. expect dial time-out when loading expired certs
select {
case gerr := <-errc:
if gerr != context.DeadlineExceeded {
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, gerr)
}
case <-time.After(5 * time.Second):
t.Fatal("failed to receive dial timeout error")
}
// 6. replace expired certs back with valid ones
revertFunc()
// 7. new requests should trigger listener to reload valid certs
tls, terr := tlsInfo.ClientConfig()
if terr != nil {
t.Fatal(terr)
}
cl, cerr := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: 5 * time.Second,
TLS: tls,
})
if cerr != nil {
t.Fatalf("expected no error, got %v", cerr)
}
cl.Close()
}
func TestGRPCRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
defer client.Close()
// wait for election timeout, then member[0] will not have a leader.
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
}
func TestGRPCStreamRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("failed to create client (%v)", err)
}
defer client.Close()
wAPI := toGRPC(client).Watch
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
// existing stream should be rejected
_, err = wStream.Recv()
if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
// new stream should also be rejected
wStream, err = wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
_, err = wStream.Recv()
if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
clus.Members[1].Restart(t)
clus.Members[2].Restart(t)
clus.waitLeader(t, clus.Members)
time.Sleep(time.Duration(2*electionTicks) * tickDuration)
// new stream should also be OK now after we restarted the other members
wStream, err = wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
wreq := &pb.WatchRequest{
RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")},
},
}
err = wStream.Send(wreq)
if err != nil {
t.Errorf("err = %v, want nil", err)
}
}
// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended.
func TestV3LargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
maxRequestBytes uint
valueSize int
expectError error
}{
// don't set to 0. use 0 as the default.
{1, 1024, rpctypes.ErrGRPCRequestTooLarge},
{10 * 1024 * 1024, 9 * 1024 * 1024, nil},
{10 * 1024 * 1024, 10 * 1024 * 1024, rpctypes.ErrGRPCRequestTooLarge},
{10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge},
}
for i, test := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
kvcli := toGRPC(clus.Client(0)).KV
reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
_, err := kvcli.Put(context.TODO(), reqput)
if !eqErrGRPC(err, test.expectError) {
t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err)
}
// request went through, expect large response back from server
if test.expectError == nil {
reqget := &pb.RangeRequest{Key: []byte("foo")}
// limit receive call size with original value + gRPC overhead bytes
_, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024))
if err != nil {
t.Errorf("#%d: range expected no error , got %v", i, err)
}
}
clus.Terminate(t)
}
}
func eqErrGRPC(err1 error, err2 error) bool {
return !(err1 == nil && err2 != nil) || err1.Error() == err2.Error()
}
// waitForRestart tries a range request until the client's server responds.
// This is mainly a stop-gap function until grpcproxy's KVClient adapter
// (and by extension, clientv3) supports grpc.CallOption pass-through so
// FailFast=false works with Put.
func waitForRestart(t *testing.T, kvc pb.KVClient) {
req := &pb.RangeRequest{Key: []byte("_"), Serializable: true}
if _, err := kvc.Range(context.TODO(), req, grpc.FailFast(false)); err != nil {
t.Fatal(err)
}
}
integration: remove typo in "TestV3LargeRequests"
Signed-off-by: Gyuho Lee <3454051ee3341eb69bdf6fca40899cdad4d86496@gmail.com>
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"reflect"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
respput, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
// overwrite
reqput.Value = []byte("baz")
respput2, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if respput2.Header.Revision <= respput.Header.Revision {
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
respput2.Header.Revision, respput.Header.Revision)
}
if pkv := respput2.PrevKv; pkv == nil || string(pkv.Value) != "bar" {
t.Fatalf("expected PrevKv=bar, got response %+v", respput2)
}
reqrange := &pb.RangeRequest{Key: key}
resprange, err := kvc.Range(context.TODO(), reqrange)
if err != nil {
t.Fatalf("couldn't get key (%v)", err)
}
if len(resprange.Kvs) != 1 {
t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
}
kv := resprange.Kvs[0]
if kv.ModRevision <= kv.CreateRevision {
t.Errorf("expected modRev > createRev, got %d <= %d",
kv.ModRevision, kv.CreateRevision)
}
if !reflect.DeepEqual(reqput.Value, kv.Value) {
t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
}
}
// TestPutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvIdx := rand.Intn(3)
kvc := toGRPC(clus.Client(kvIdx)).KV
stopIdx := kvIdx
for stopIdx == kvIdx {
stopIdx = rand.Intn(3)
}
clus.clients[stopIdx].Close()
clus.Members[stopIdx].Stop(t)
clus.Members[stopIdx].Restart(t)
c, cerr := NewClientV3(clus.Members[stopIdx])
if cerr != nil {
t.Fatalf("cannot create client: %v", cerr)
}
clus.clients[stopIdx] = c
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancel()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
_, err := kvc.Put(ctx, reqput)
if err != nil && err == ctx.Err() {
t.Fatalf("expected grpc error, got local ctx error (%v)", err)
}
}
// TestV3CompactCurrentRev ensures keys are present when compacting on current revision.
func TestV3CompactCurrentRev(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
if _, err := kvc.Put(context.Background(), preq); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
// get key to add to proxy cache, if any
if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil {
t.Fatal(err)
}
// compact on current revision
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// key still exists when linearized?
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatalf("couldn't get key after compaction (%v)", err)
}
// key still exists when serialized?
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo"), Serializable: true})
if err != nil {
t.Fatalf("couldn't get serialized key after compaction (%v)", err)
}
}
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
func TestV3HashKV(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
mvc := toGRPC(clus.RandClient()).Maintenance
for i := 0; i < 10; i++ {
resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))})
if err != nil {
t.Fatal(err)
}
rev := resp.Header.Revision
hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{0})
if err != nil {
t.Fatal(err)
}
if rev != hresp.Header.Revision {
t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision)
}
prevHash := hresp.Hash
prevCompactRev := hresp.CompactRevision
for i := 0; i < 10; i++ {
hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{0})
if err != nil {
t.Fatal(err)
}
if rev != hresp.Header.Revision {
t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision)
}
if prevHash != hresp.Hash {
t.Fatalf("prevHash %v != Hash %v", prevHash, hresp.Hash)
}
if prevCompactRev != hresp.CompactRevision {
t.Fatalf("prevCompactRev %v != CompactRevision %v", prevHash, hresp.Hash)
}
prevHash = hresp.Hash
prevCompactRev = hresp.CompactRevision
}
}
}
func TestV3TxnTooManyOps(t *testing.T) {
defer testutil.AfterTest(t)
maxTxnOps := uint(128)
clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// unique keys
i := new(int)
keyf := func() []byte {
*i++
return []byte(fmt.Sprintf("key-%d", i))
}
addCompareOps := func(txn *pb.TxnRequest) {
txn.Compare = append(txn.Compare,
&pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: keyf(),
})
}
addSuccessOps := func(txn *pb.TxnRequest) {
txn.Success = append(txn.Success,
&pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: keyf(),
Value: []byte("bar"),
},
},
})
}
addFailureOps := func(txn *pb.TxnRequest) {
txn.Failure = append(txn.Failure,
&pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: keyf(),
Value: []byte("bar"),
},
},
})
}
addTxnOps := func(txn *pb.TxnRequest) {
newTxn := &pb.TxnRequest{}
addSuccessOps(newTxn)
txn.Success = append(txn.Success,
&pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: newTxn,
},
},
)
}
tests := []func(txn *pb.TxnRequest){
addCompareOps,
addSuccessOps,
addFailureOps,
addTxnOps,
}
for i, tt := range tests {
txn := &pb.TxnRequest{}
for j := 0; j < int(maxTxnOps+1); j++ {
tt(txn)
}
_, err := kvc.Txn(context.Background(), txn)
if !eqErrGRPC(err, rpctypes.ErrGRPCTooManyOps) {
t.Errorf("#%d: err = %v, want %v", i, err, rpctypes.ErrGRPCTooManyOps)
}
}
}
func TestV3TxnDuplicateKeys(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}}
delKeyReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("abc"),
},
},
}
delInRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("a"), RangeEnd: []byte("b"),
},
},
}
delOutOfRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte("abb"), RangeEnd: []byte("abc"),
},
},
}
txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}},
},
}
txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{
Success: []*pb.RequestOp{delInRangeReq},
Failure: []*pb.RequestOp{delInRangeReq}},
},
}
txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}},
},
}
txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{
Success: []*pb.RequestOp{putreq},
Failure: []*pb.RequestOp{putreq}},
},
}
kvc := toGRPC(clus.RandClient()).KV
tests := []struct {
txnSuccess []*pb.RequestOp
werr error
}{
{
txnSuccess: []*pb.RequestOp{putreq, putreq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
{
txnSuccess: []*pb.RequestOp{putreq, delKeyReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
{
txnSuccess: []*pb.RequestOp{putreq, delInRangeReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Put(a), Then(Del(a)))
{
txnSuccess: []*pb.RequestOp{putreq, txnDelReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Del(a), Then(Put(a)))
{
txnSuccess: []*pb.RequestOp{delInRangeReq, txnPutReq},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then((Then(Put(a)), Else(Put(a))), (Then(Put(a)), Else(Put(a)))
{
txnSuccess: []*pb.RequestOp{txnPutReqTwoSide, txnPutReqTwoSide},
werr: rpctypes.ErrGRPCDuplicateKey,
},
// Then(Del(x), (Then(Put(a)), Else(Put(a))))
{
txnSuccess: []*pb.RequestOp{delOutOfRangeReq, txnPutReqTwoSide},
werr: nil,
},
// Then(Then(Del(a)), (Then(Del(a)), Else(Del(a))))
{
txnSuccess: []*pb.RequestOp{txnDelReq, txnDelReqTwoSide},
werr: nil,
},
{
txnSuccess: []*pb.RequestOp{delKeyReq, delInRangeReq, delKeyReq, delInRangeReq},
werr: nil,
},
{
txnSuccess: []*pb.RequestOp{putreq, delOutOfRangeReq},
werr: nil,
},
}
for i, tt := range tests {
txn := &pb.TxnRequest{Success: tt.txnSuccess}
_, err := kvc.Txn(context.Background(), txn)
if !eqErrGRPC(err, tt.werr) {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
}
}
}
// Testv3TxnRevision tests that the transaction header revision is set as expected.
func TestV3TxnRevision(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
t.Fatal(err)
}
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// did not update revision
if presp.Header.Revision != tresp.Header.Revision {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision)
}
txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}}
txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}}
tresp, err = kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// did not update revision
if presp.Header.Revision != tresp.Header.Revision {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision)
}
txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}}
txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}}
tresp, err = kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
// updated revision
if tresp.Header.Revision != presp.Header.Revision+1 {
t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision+1)
}
}
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
// when compared to the Succeeded field in the txn response.
func TestV3TxnCmpHeaderRev(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
for i := 0; i < 10; i++ {
// Concurrently put a key with a txn comparing on it.
revc := make(chan int64, 1)
go func() {
defer close(revc)
pr := &pb.PutRequest{Key: []byte("k"), Value: []byte("v")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
t.Fatal(err)
}
revc <- presp.Header.Revision
}()
// The read-only txn uses the optimized readindex server path.
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{
RequestRange: &pb.RangeRequest{Key: []byte("k")}}}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
// i = 0 /\ Succeeded => put followed txn
cmp := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(i)},
}
txn.Compare = append(txn.Compare, cmp)
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
prev := <-revc
// put followed txn; should eval to false
if prev > tresp.Header.Revision && !tresp.Succeeded {
t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp)
}
// txn follows put; should eval to true
if tresp.Header.Revision >= prev && tresp.Succeeded {
t.Errorf("#%d: got then but put rev %d preceded txn (%+v)", i, prev, tresp)
}
}
}
// TestV3TxnRangeCompare tests range comparisons in txns
func TestV3TxnRangeCompare(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
// put keys, named by expected revision
for _, k := range []string{"/a/2", "/a/3", "/a/4", "/f/5"} {
if _, err := clus.Client(0).Put(context.TODO(), k, "x"); err != nil {
t.Fatal(err)
}
}
tests := []struct {
cmp pb.Compare
wSuccess bool
}{
{
// >= /a/; all create revs fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte{0},
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{6},
},
true,
},
{
// >= /a/; one create rev doesn't fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte{0},
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{5},
},
false,
},
{
// prefix /a/*; all create revs fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{5},
},
true,
},
{
// prefix /a/*; one create rev doesn't fit
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_CREATE,
Result: pb.Compare_LESS,
TargetUnion: &pb.Compare_CreateRevision{4},
},
false,
},
{
// does not exist, does not succeed
pb.Compare{
Key: []byte("/b/"),
RangeEnd: []byte("/b0"),
Target: pb.Compare_VALUE,
Result: pb.Compare_EQUAL,
TargetUnion: &pb.Compare_Value{[]byte("x")},
},
false,
},
{
// all keys are leased
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_LEASE,
Result: pb.Compare_GREATER,
TargetUnion: &pb.Compare_Lease{0},
},
false,
},
{
// no keys are leased
pb.Compare{
Key: []byte("/a/"),
RangeEnd: []byte("/a0"),
Target: pb.Compare_LEASE,
Result: pb.Compare_EQUAL,
TargetUnion: &pb.Compare_Lease{0},
},
true,
},
}
kvc := toGRPC(clus.Client(0)).KV
for i, tt := range tests {
txn := &pb.TxnRequest{}
txn.Compare = append(txn.Compare, &tt.cmp)
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
if tt.wSuccess != tresp.Succeeded {
t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded)
}
}
}
// TestV3TxnNested tests nested txns follow paths as expected.
func TestV3TxnNestedPath(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
cmpTrue := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(0)},
}
cmpFalse := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(1)},
}
// generate random path to eval txns
topTxn := &pb.TxnRequest{}
txn := topTxn
txnPath := make([]bool, 10)
for i := range txnPath {
nextTxn := &pb.TxnRequest{}
op := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: nextTxn}}
txnPath[i] = rand.Intn(2) == 0
if txnPath[i] {
txn.Compare = append(txn.Compare, cmpTrue)
txn.Success = append(txn.Success, op)
} else {
txn.Compare = append(txn.Compare, cmpFalse)
txn.Failure = append(txn.Failure, op)
}
txn = nextTxn
}
tresp, err := kvc.Txn(context.TODO(), topTxn)
if err != nil {
t.Fatal(err)
}
curTxnResp := tresp
for i := range txnPath {
if curTxnResp.Succeeded != txnPath[i] {
t.Fatalf("expected path %+v, got response %+v", txnPath, *tresp)
}
curTxnResp = curTxnResp.Responses[0].Response.(*pb.ResponseOp_ResponseTxn).ResponseTxn
}
}
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
func TestV3PutIgnoreValue(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key, val := []byte("foo"), []byte("bar")
putReq := pb.PutRequest{Key: key, Value: val}
// create lease
lc := toGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
tests := []struct {
putFunc func() error
putErr error
wleaseID int64
}{
{ // put failure for non-existent key
func() error {
preq := putReq
preq.IgnoreValue = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
},
{ // txn failure for non-existent key
func() error {
preq := putReq
preq.Value = nil
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
},
{ // put success
func() error {
_, err := kvc.Put(context.TODO(), &putReq)
return err
},
nil,
0,
},
{ // txn success, attach lease
func() error {
preq := putReq
preq.Value = nil
preq.Lease = lresp.ID
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
nil,
lresp.ID,
},
{ // non-empty value with ignore_value should error
func() error {
preq := putReq
preq.IgnoreValue = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCValueProvided,
0,
},
{ // overwrite with previous value, ensure no prev-kv is returned and lease is detached
func() error {
preq := putReq
preq.Value = nil
preq.IgnoreValue = true
presp, err := kvc.Put(context.TODO(), &preq)
if err != nil {
return err
}
if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 {
return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv)
}
return nil
},
nil,
0,
},
{ // revoke lease, ensure detached key doesn't get deleted
func() error {
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
return err
},
nil,
0,
},
}
for i, tt := range tests {
if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) {
t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err)
}
if tt.putErr != nil {
continue
}
rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key})
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if len(rr.Kvs) != 1 {
t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs))
}
if !bytes.Equal(rr.Kvs[0].Value, val) {
t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value)
}
if rr.Kvs[0].Lease != tt.wleaseID {
t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease)
}
}
}
// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites.
func TestV3PutIgnoreLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// create lease
lc := toGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
key, val, val1 := []byte("zoo"), []byte("bar"), []byte("bar1")
putReq := pb.PutRequest{Key: key, Value: val}
tests := []struct {
putFunc func() error
putErr error
wleaseID int64
wvalue []byte
}{
{ // put failure for non-existent key
func() error {
preq := putReq
preq.IgnoreLease = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
nil,
},
{ // txn failure for non-existent key
func() error {
preq := putReq
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
rpctypes.ErrGRPCKeyNotFound,
0,
nil,
},
{ // put success
func() error {
preq := putReq
preq.Lease = lresp.ID
_, err := kvc.Put(context.TODO(), &preq)
return err
},
nil,
lresp.ID,
val,
},
{ // txn success, modify value using 'ignore_lease' and ensure lease is not detached
func() error {
preq := putReq
preq.Value = val1
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
_, err := kvc.Txn(context.TODO(), txn)
return err
},
nil,
lresp.ID,
val1,
},
{ // non-empty lease with ignore_lease should error
func() error {
preq := putReq
preq.Lease = lresp.ID
preq.IgnoreLease = true
_, err := kvc.Put(context.TODO(), &preq)
return err
},
rpctypes.ErrGRPCLeaseProvided,
0,
nil,
},
{ // overwrite with previous value, ensure no prev-kv is returned and lease is detached
func() error {
presp, err := kvc.Put(context.TODO(), &putReq)
if err != nil {
return err
}
if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 {
return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv)
}
return nil
},
nil,
0,
val,
},
{ // revoke lease, ensure detached key doesn't get deleted
func() error {
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
return err
},
nil,
0,
val,
},
}
for i, tt := range tests {
if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) {
t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err)
}
if tt.putErr != nil {
continue
}
rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key})
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if len(rr.Kvs) != 1 {
t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs))
}
if !bytes.Equal(rr.Kvs[0].Value, tt.wvalue) {
t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value)
}
if rr.Kvs[0].Lease != tt.wleaseID {
t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease)
}
}
}
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
key := []byte("foo")
preq := &pb.PutRequest{Key: key, Lease: 123456}
tests := []func(){
// put case
func() {
if presp, err := kvc.Put(context.TODO(), preq); err == nil {
t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
}
},
// txn success case
func() {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
}
},
// txn failure case
func() {
txn := &pb.TxnRequest{}
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
cmp := &pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
}
txn.Compare = append(txn.Compare, cmp)
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
}
},
// ignore bad lease in failure on success txn
func() {
txn := &pb.TxnRequest{}
rreq := &pb.RangeRequest{Key: []byte("bar")}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
RequestRange: rreq}})
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
}
},
}
for i, f := range tests {
f()
// key shouldn't have been stored
rreq := &pb.RangeRequest{Key: key}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("#%d. could not rangereq (%v)", i, err)
} else if len(rresp.Kvs) != 0 {
t.Errorf("#%d. expected no keys, got %v", i, rresp)
}
}
}
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
keySet []string
begin string
end string
prevKV bool
wantSet [][]byte
deleted int64
}{
// delete middle
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", false,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
// no delete
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "foo/", false,
[][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, 0,
},
// delete first
{
[]string{"foo", "foo/abc", "fop"},
"fo", "fop", false,
[][]byte{[]byte("fop")}, 2,
},
// delete tail
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fos", false,
[][]byte{[]byte("foo")}, 2,
},
// delete exact
{
[]string{"foo", "foo/abc", "fop"},
"foo/abc", "", false,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
// delete none, [x,x)
{
[]string{"foo"},
"foo", "foo", false,
[][]byte{[]byte("foo")}, 0,
},
// delete middle with preserveKVs set
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", true,
[][]byte{[]byte("foo"), []byte("fop")}, 1,
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := toGRPC(clus.RandClient()).KV
ks := tt.keySet
for j := range ks {
reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
_, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
dreq := &pb.DeleteRangeRequest{
Key: []byte(tt.begin),
RangeEnd: []byte(tt.end),
PrevKv: tt.prevKV,
}
dresp, err := kvc.DeleteRange(context.TODO(), dreq)
if err != nil {
t.Fatalf("couldn't delete range on test %d (%v)", i, err)
}
if tt.deleted != dresp.Deleted {
t.Errorf("expected %d on test %v, got %d", tt.deleted, i, dresp.Deleted)
}
if tt.prevKV {
if len(dresp.PrevKvs) != int(dresp.Deleted) {
t.Errorf("preserve %d keys, want %d", len(dresp.PrevKvs), dresp.Deleted)
}
}
rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("couldn't get range on test %v (%v)", i, err)
}
if dresp.Header.Revision != rresp.Header.Revision {
t.Errorf("expected revision %v, got %v",
dresp.Header.Revision, rresp.Header.Revision)
}
keys := [][]byte{}
for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key)
}
if !reflect.DeepEqual(tt.wantSet, keys) {
t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns.
func TestV3TxnInvalidRange(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 2})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// future rev
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: preq}})
rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
RequestRange: rreq}})
if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCFutureRev) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev)
}
// compacted rev
tv, _ := txn.Success[1].Request.(*pb.RequestOp_RequestRange)
tv.RequestRange.Revision = 1
if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCCompacted) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCCompacted)
}
}
func TestV3TooLargeRequest(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
// 2MB request value
largeV := make([]byte, 2*1024*1024)
preq := &pb.PutRequest{Key: []byte("foo"), Value: largeV}
_, err := kvc.Put(context.Background(), preq)
if !eqErrGRPC(err, rpctypes.ErrGRPCRequestTooLarge) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCRequestTooLarge)
}
}
// TestV3Hash tests hash.
func TestV3Hash(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
cli := clus.RandClient()
kvc := toGRPC(cli).KV
m := toGRPC(cli).Maintenance
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
resp, err := m.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
}
// TestV3HashRestart ensures that hash stays the same after restart.
func TestV3HashRestart(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
hash1 := resp.Hash
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
kvc := toGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
cli = clus.RandClient()
resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
hash2 := resp.Hash
if hash1 != hash2 {
t.Fatalf("hash expected %d, got %d", hash1, hash2)
}
}
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
func TestV3StorageQuotaAPI(t *testing.T) {
defer testutil.AfterTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
key := []byte("abc")
// test small put that fits in quota
smallbuf := make([]byte, 512)
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil {
t.Fatal(err)
}
// test big put
bigbuf := make([]byte, quotasize)
_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})
if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) {
t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
}
// test big txn
puttxn := &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: key,
Value: bigbuf,
},
},
}
txnreq := &pb.TxnRequest{}
txnreq.Success = append(txnreq.Success, puttxn)
_, txnerr := kvc.Txn(context.TODO(), txnreq)
if !eqErrGRPC(txnerr, rpctypes.ErrGRPCNoSpace) {
t.Fatalf("big txn got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
}
}
func TestV3RangeRequest(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
reqs []pb.RangeRequest
wresps [][]string
wmores []bool
}{
// single key
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// exists
{Key: []byte("foo")},
// doesn't exist
{Key: []byte("baz")},
},
[][]string{
{"foo"},
{},
},
[]bool{false, false},
},
// multi-key
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
// all in range
{Key: []byte("a"), RangeEnd: []byte("z")},
// [b, d)
{Key: []byte("b"), RangeEnd: []byte("d")},
// out of range
{Key: []byte("f"), RangeEnd: []byte("z")},
// [c,c) = empty
{Key: []byte("c"), RangeEnd: []byte("c")},
// [d, b) = empty
{Key: []byte("d"), RangeEnd: []byte("b")},
// ["\0", "\0") => all in range
{Key: []byte{0}, RangeEnd: []byte{0}},
},
[][]string{
{"a", "b", "c", "d", "e"},
{"b", "c"},
{},
{},
{},
{"a", "b", "c", "d", "e"},
},
[]bool{false, false, false, false, false, false},
},
// revision
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
},
[][]string{
{"a", "b", "c", "d", "e"},
{},
{"a"},
{"a", "b"},
},
[]bool{false, false, false, false},
},
// limit
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
// no more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
},
[][]string{
{"bar"},
{"bar", "foo"},
},
[]bool{true, false},
},
// sort
{
[]string{"b", "a", "c", "d", "c"},
[]pb.RangeRequest{
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_MOD,
},
{
Key: []byte("z"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{ // sort ASCEND by default
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 10,
SortOrder: pb.RangeRequest_NONE,
SortTarget: pb.RangeRequest_CREATE,
},
},
[][]string{
{"a"},
{"d"},
{"b"},
{"c"},
{},
{"b", "a", "c", "d"},
},
[]bool{true, true, true, true, false, false},
},
// min/max mod rev
{
[]string{"rev2", "rev3", "rev4", "rev5", "rev6"},
[]pb.RangeRequest{
{
Key: []byte{0}, RangeEnd: []byte{0},
MinModRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxModRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MinModRevision: 3,
MaxModRevision: 5,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxModRevision: 10,
},
},
[][]string{
{"rev3", "rev4", "rev5", "rev6"},
{"rev2", "rev3"},
{"rev3", "rev4", "rev5"},
{"rev2", "rev3", "rev4", "rev5", "rev6"},
},
[]bool{false, false, false, false},
},
// min/max create rev
{
[]string{"rev2", "rev3", "rev2", "rev2", "rev6", "rev3"},
[]pb.RangeRequest{
{
Key: []byte{0}, RangeEnd: []byte{0},
MinCreateRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxCreateRevision: 3,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MinCreateRevision: 3,
MaxCreateRevision: 5,
},
{
Key: []byte{0}, RangeEnd: []byte{0},
MaxCreateRevision: 10,
},
},
[][]string{
{"rev3", "rev6"},
{"rev2", "rev3"},
{"rev3"},
{"rev2", "rev3", "rev6"},
},
[]bool{false, false, false, false},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
for _, k := range tt.putKeys {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
for j, req := range tt.reqs {
kvc := toGRPC(clus.RandClient()).KV
resp, err := kvc.Range(context.TODO(), &req)
if err != nil {
t.Errorf("#%d.%d: Range error: %v", i, j, err)
continue
}
if len(resp.Kvs) != len(tt.wresps[j]) {
t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
continue
}
for k, wKey := range tt.wresps[j] {
respKey := string(resp.Kvs[k].Key)
if respKey != wKey {
t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
}
}
if resp.More != tt.wmores[j] {
t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
}
wrev := int64(len(tt.putKeys) + 1)
if resp.Header.Revision != wrev {
t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
}
}
clus.Terminate(t)
}
}
func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
cfg.UseGRPC = true
clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)}
clus.Launch(t)
return clus
}
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
// nil out TLS field so client will use an insecure connection
clus.Members[0].ClientTLSInfo = nil
client, err := NewClientV3(clus.Members[0])
if err != nil && err != context.DeadlineExceeded {
t.Fatalf("unexpected error (%v)", err)
} else if client == nil {
// Ideally, no client would be returned. However, grpc will
// return a connection without trying to handshake first so
// the connection appears OK.
return
}
defer client.Close()
donec := make(chan error, 1)
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
_, perr := toGRPC(client).KV.Put(ctx, reqput)
cancel()
donec <- perr
}()
if perr := <-donec; perr == nil {
t.Fatalf("expected client error on put")
}
}
// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server.
func TestTLSGRPCRejectSecureClient(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
clus.Members[0].ClientTLSInfo = &testTLSInfo
client, err := NewClientV3(clus.Members[0])
if client != nil || err == nil {
t.Fatalf("expected no client")
} else if err != context.DeadlineExceeded {
t.Fatalf("unexpected error (%v)", err)
}
}
// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS
func TestTLSGRPCAcceptSecureAll(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("expected tls client (%v)", err)
}
defer client.Close()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
t.Fatalf("unexpected error on put over tls (%v)", err)
}
}
// TestTLSReloadAtomicReplace ensures server reloads expired/valid certs
// when all certs are atomically replaced by directory renaming.
// And expects server to reject client requests, and vice versa.
func TestTLSReloadAtomicReplace(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "fixtures-tmp")
if err != nil {
t.Fatal(err)
}
os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpDir)
certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDir)
certsDirExp, err := ioutil.TempDir(os.TempDir(), "fixtures-expired")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDirExp)
cloneFunc := func() transport.TLSInfo {
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil {
t.Fatal(err)
}
return tlsInfo
}
replaceFunc := func() {
if err = os.Rename(certsDir, tmpDir); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDirExp, certsDir); err != nil {
t.Fatal(err)
}
// after rename,
// 'certsDir' contains expired certs
// 'tmpDir' contains valid certs
// 'certsDirExp' does not exist
}
revertFunc := func() {
if err = os.Rename(tmpDir, certsDirExp); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDir, tmpDir); err != nil {
t.Fatal(err)
}
if err = os.Rename(certsDirExp, certsDir); err != nil {
t.Fatal(err)
}
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc)
}
// TestTLSReloadCopy ensures server reloads expired/valid certs
// when new certs are copied over, one by one. And expects server
// to reject client requests, and vice versa.
func TestTLSReloadCopy(t *testing.T) {
certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(certsDir)
cloneFunc := func() transport.TLSInfo {
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
return tlsInfo
}
replaceFunc := func() {
if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil {
t.Fatal(err)
}
}
revertFunc := func() {
if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil {
t.Fatal(err)
}
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc)
}
func testTLSReload(t *testing.T, cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func()) {
defer testutil.AfterTest(t)
// 1. separate copies for TLS assets modification
tlsInfo := cloneFunc()
// 2. start cluster with valid certs
clus := NewClusterV3(t, &ClusterConfig{Size: 1, PeerTLS: &tlsInfo, ClientTLS: &tlsInfo})
defer clus.Terminate(t)
// 3. concurrent client dialing while certs become expired
errc := make(chan error, 1)
go func() {
for {
cc, err := tlsInfo.ClientConfig()
if err != nil {
// errors in 'go/src/crypto/tls/tls.go'
// tls: private key does not match public key
// tls: failed to find any PEM data in key input
// tls: failed to find any PEM data in certificate input
// Or 'does not exist', 'not found', etc
t.Log(err)
continue
}
cli, cerr := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: time.Second,
TLS: cc,
})
if cerr != nil {
errc <- cerr
return
}
cli.Close()
}
}()
// 4. replace certs with expired ones
replaceFunc()
// 5. expect dial time-out when loading expired certs
select {
case gerr := <-errc:
if gerr != context.DeadlineExceeded {
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, gerr)
}
case <-time.After(5 * time.Second):
t.Fatal("failed to receive dial timeout error")
}
// 6. replace expired certs back with valid ones
revertFunc()
// 7. new requests should trigger listener to reload valid certs
tls, terr := tlsInfo.ClientConfig()
if terr != nil {
t.Fatal(terr)
}
cl, cerr := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: 5 * time.Second,
TLS: tls,
})
if cerr != nil {
t.Fatalf("expected no error, got %v", cerr)
}
cl.Close()
}
func TestGRPCRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
defer client.Close()
// wait for election timeout, then member[0] will not have a leader.
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
}
func TestGRPCStreamRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)
client, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("failed to create client (%v)", err)
}
defer client.Close()
wAPI := toGRPC(client).Watch
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
// existing stream should be rejected
_, err = wStream.Recv()
if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
// new stream should also be rejected
wStream, err = wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
_, err = wStream.Recv()
if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
clus.Members[1].Restart(t)
clus.Members[2].Restart(t)
clus.waitLeader(t, clus.Members)
time.Sleep(time.Duration(2*electionTicks) * tickDuration)
// new stream should also be OK now after we restarted the other members
wStream, err = wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)
}
wreq := &pb.WatchRequest{
RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")},
},
}
err = wStream.Send(wreq)
if err != nil {
t.Errorf("err = %v, want nil", err)
}
}
// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended.
func TestV3LargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
maxRequestBytes uint
valueSize int
expectError error
}{
// don't set to 0. use 0 as the default.
{1, 1024, rpctypes.ErrGRPCRequestTooLarge},
{10 * 1024 * 1024, 9 * 1024 * 1024, nil},
{10 * 1024 * 1024, 10 * 1024 * 1024, rpctypes.ErrGRPCRequestTooLarge},
{10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge},
}
for i, test := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
kvcli := toGRPC(clus.Client(0)).KV
reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
_, err := kvcli.Put(context.TODO(), reqput)
if !eqErrGRPC(err, test.expectError) {
t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err)
}
// request went through, expect large response back from server
if test.expectError == nil {
reqget := &pb.RangeRequest{Key: []byte("foo")}
// limit receive call size with original value + gRPC overhead bytes
_, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024))
if err != nil {
t.Errorf("#%d: range expected no error, got %v", i, err)
}
}
clus.Terminate(t)
}
}
func eqErrGRPC(err1 error, err2 error) bool {
return !(err1 == nil && err2 != nil) || err1.Error() == err2.Error()
}
// waitForRestart tries a range request until the client's server responds.
// This is mainly a stop-gap function until grpcproxy's KVClient adapter
// (and by extension, clientv3) supports grpc.CallOption pass-through so
// FailFast=false works with Put.
func waitForRestart(t *testing.T, kvc pb.KVClient) {
req := &pb.RangeRequest{Key: []byte("_"), Serializable: true}
if _, err := kvc.Range(context.TODO(), req, grpc.FailFast(false)); err != nil {
t.Fatal(err)
}
}
|
package checker
import (
"context"
"fmt"
"io"
"os"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/pack"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
// Checker runs various checks on a repository. It is advisable to create an
// exclusive Lock in the repository before running any checks.
//
// A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct {
packs map[restic.ID]int64
blobRefs struct {
sync.Mutex
// see flags below
M map[restic.BlobHandle]blobStatus
}
masterIndex *repository.MasterIndex
repo restic.Repository
}
type blobStatus uint8
const (
blobStatusExists blobStatus = 1 << iota
blobStatusReferenced
)
// New returns a new checker which runs on repo.
func New(repo restic.Repository) *Checker {
c := &Checker{
packs: make(map[restic.ID]int64),
masterIndex: repository.NewMasterIndex(),
repo: repo,
}
c.blobRefs.M = make(map[restic.BlobHandle]blobStatus)
return c
}
const defaultParallelism = 5
// ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct {
PackID restic.ID
Indexes restic.IDSet
}
func (e ErrDuplicatePacks) Error() string {
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes)
}
// ErrOldIndexFormat is returned when an index with the old format is
// found.
type ErrOldIndexFormat struct {
restic.ID
}
func (err ErrOldIndexFormat) Error() string {
return fmt.Sprintf("index %v has old format", err.ID.Str())
}
// LoadIndex loads all index files.
func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
debug.Log("Start")
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, wgCtx := errgroup.WithContext(ctx)
type FileInfo struct {
restic.ID
Size int64
}
type Result struct {
*repository.Index
restic.ID
Err error
}
ch := make(chan FileInfo)
resultCh := make(chan Result)
// send list of index files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
return c.repo.List(wgCtx, restic.IndexFile, func(id restic.ID, size int64) error {
select {
case <-wgCtx.Done():
return nil
case ch <- FileInfo{id, size}:
}
return nil
})
})
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
worker := func() error {
var buf []byte
for fi := range ch {
debug.Log("worker got file %v", fi.ID.Str())
var err error
var idx *repository.Index
oldFormat := false
buf, err = c.repo.LoadAndDecrypt(wgCtx, buf[:0], restic.IndexFile, fi.ID)
if err == nil {
idx, oldFormat, err = repository.DecodeIndex(buf, fi.ID)
}
if oldFormat {
debug.Log("index %v has old format", fi.ID.Str())
hints = append(hints, ErrOldIndexFormat{fi.ID})
}
err = errors.Wrapf(err, "error loading index %v", fi.ID.Str())
select {
case resultCh <- Result{idx, fi.ID, err}:
case <-wgCtx.Done():
}
}
return nil
}
// run workers on ch
wg.Go(func() error {
defer close(resultCh)
return repository.RunWorkers(defaultParallelism, worker)
})
// receive decoded indexes
packToIndex := make(map[restic.ID]restic.IDSet)
wg.Go(func() error {
for res := range resultCh {
debug.Log("process index %v, err %v", res.ID, res.Err)
if res.Err != nil {
errs = append(errs, res.Err)
continue
}
c.masterIndex.Insert(res.Index)
debug.Log("process blobs")
cnt := 0
for blob := range res.Index.Each(wgCtx) {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
c.blobRefs.M[h] = blobStatusExists
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = restic.NewIDSet()
}
packToIndex[blob.PackID].Insert(res.ID)
}
debug.Log("%d blobs processed", cnt)
}
return nil
})
err := wg.Wait()
if err != nil {
errs = append(errs, err)
}
// Merge index before computing pack sizes, as this needs removed duplicates
c.masterIndex.MergeFinalIndexes()
// compute pack size using index entries
for blob := range c.masterIndex.Each(ctx) {
size, ok := c.packs[blob.PackID]
if !ok {
size = pack.HeaderSize
}
c.packs[blob.PackID] = size + int64(pack.PackedSizeOfBlob(blob.Length))
}
debug.Log("checking for duplicate packs")
for packID := range c.packs {
debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 {
hints = append(hints, ErrDuplicatePacks{
PackID: packID,
Indexes: packToIndex[packID],
})
}
}
err = c.repo.SetIndex(c.masterIndex)
if err != nil {
debug.Log("SetIndex returned error: %v", err)
errs = append(errs, err)
}
return hints, errs
}
// PackError describes an error with a specific pack.
type PackError struct {
ID restic.ID
Orphaned bool
Err error
}
func (e PackError) Error() string {
return "pack " + e.ID.Str() + ": " + e.Err.Error()
}
// IsOrphanedPack returns true if the error describes a pack which is not
// contained in any index.
func IsOrphanedPack(err error) bool {
if e, ok := errors.Cause(err).(PackError); ok && e.Orphaned {
return true
}
return false
}
// Packs checks that all packs referenced in the index are still available and
// there are no packs that aren't in an index. errChan is closed after all
// packs have been checked.
func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
defer close(errChan)
debug.Log("checking for %d packs", len(c.packs))
debug.Log("listing repository packs")
repoPacks := make(map[restic.ID]int64)
err := c.repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
repoPacks[id] = size
return nil
})
if err != nil {
errChan <- err
}
for id, size := range c.packs {
reposize, ok := repoPacks[id]
// remove from repoPacks so we can find orphaned packs
delete(repoPacks, id)
// missing: present in c.packs but not in the repo
if !ok {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: id, Err: errors.New("does not exist")}:
}
continue
}
// size not matching: present in c.packs and in the repo, but sizes do not match
if size != reposize {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: id, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}:
}
}
}
// orphaned: present in the repo but not in c.packs
for orphanID := range repoPacks {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: orphanID, Orphaned: true, Err: errors.New("not referenced in any index")}:
}
}
}
// Error is an error that occurred while checking a repository.
type Error struct {
TreeID restic.ID
BlobID restic.ID
Err error
}
func (e Error) Error() string {
if !e.BlobID.IsNull() && !e.TreeID.IsNull() {
msg := "tree " + e.TreeID.Str()
msg += ", blob " + e.BlobID.Str()
msg += ": " + e.Err.Error()
return msg
}
if !e.TreeID.IsNull() {
return "tree " + e.TreeID.Str() + ": " + e.Err.Error()
}
return e.Err.Error()
}
func loadTreeFromSnapshot(ctx context.Context, repo restic.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(ctx, repo, id)
if err != nil {
debug.Log("error loading snapshot %v: %v", id, err)
return restic.ID{}, err
}
if sn.Tree == nil {
debug.Log("snapshot %v has no tree", id)
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
}
return *sn.Tree, nil
}
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(ctx context.Context, repo restic.Repository) (restic.IDs, []error) {
var trees struct {
IDs restic.IDs
sync.Mutex
}
var errs struct {
errs []error
sync.Mutex
}
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, ctx := errgroup.WithContext(ctx)
ch := make(chan restic.ID)
// send list of index files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
return repo.List(ctx, restic.SnapshotFile, func(id restic.ID, size int64) error {
select {
case <-ctx.Done():
return nil
case ch <- id:
}
return nil
})
})
// a worker receives an index ID from ch, loads the snapshot and the tree,
// and adds the result to errs and trees.
worker := func() error {
for id := range ch {
debug.Log("load snapshot %v", id)
treeID, err := loadTreeFromSnapshot(ctx, repo, id)
if err != nil {
errs.Lock()
errs.errs = append(errs.errs, err)
errs.Unlock()
continue
}
debug.Log("snapshot %v has tree %v", id, treeID)
trees.Lock()
trees.IDs = append(trees.IDs, treeID)
trees.Unlock()
}
return nil
}
for i := 0; i < defaultParallelism; i++ {
wg.Go(worker)
}
err := wg.Wait()
if err != nil {
errs.errs = append(errs.errs, err)
}
return trees.IDs, errs.errs
}
// TreeError collects several errors that occurred while processing a tree.
type TreeError struct {
ID restic.ID
Errors []error
}
func (e TreeError) Error() string {
return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors)
}
type treeJob struct {
restic.ID
error
*restic.Tree
}
// loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(ctx context.Context, repo restic.Repository,
in <-chan restic.ID, out chan<- treeJob,
wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
job treeJob
)
outCh = nil
for {
select {
case <-ctx.Done():
return
case treeID, ok := <-inCh:
if !ok {
return
}
debug.Log("load tree %v", treeID)
tree, err := repo.LoadTree(ctx, treeID)
debug.Log("load tree %v (%v) returned err: %v", tree, treeID, err)
job = treeJob{ID: treeID, error: err, Tree: tree}
outCh = out
inCh = nil
case outCh <- job:
debug.Log("sent tree %v", job.ID)
outCh = nil
inCh = in
}
}
}
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(ctx context.Context, in <-chan treeJob, out chan<- error, wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
treeError TreeError
)
outCh = nil
for {
select {
case <-ctx.Done():
debug.Log("done channel closed, exiting")
return
case job, ok := <-inCh:
if !ok {
debug.Log("input channel closed, exiting")
return
}
debug.Log("check tree %v (tree %v, err %v)", job.ID, job.Tree, job.error)
var errs []error
if job.error != nil {
errs = append(errs, job.error)
} else {
errs = c.checkTree(job.ID, job.Tree)
}
if len(errs) > 0 {
debug.Log("checked tree %v: %v errors", job.ID, len(errs))
treeError = TreeError{ID: job.ID, Errors: errs}
outCh = out
inCh = nil
}
case outCh <- treeError:
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
outCh = nil
inCh = in
}
}
}
func (c *Checker) filterTrees(ctx context.Context, backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob) {
defer func() {
debug.Log("closing output channels")
close(loaderChan)
close(out)
}()
var (
inCh = in
outCh = out
loadCh = loaderChan
job treeJob
nextTreeID restic.ID
outstandingLoadTreeJobs = 0
)
outCh = nil
loadCh = nil
for {
if loadCh == nil && len(backlog) > 0 {
// process last added ids first, that is traverse the tree in depth-first order
ln := len(backlog) - 1
nextTreeID, backlog = backlog[ln], backlog[:ln]
// use a separate flag for processed trees to ensure that check still processes trees
// even when a file references a tree blob
c.blobRefs.Lock()
h := restic.BlobHandle{ID: nextTreeID, Type: restic.TreeBlob}
status := c.blobRefs.M[h]
c.blobRefs.Unlock()
if (status & blobStatusReferenced) != 0 {
continue
}
loadCh = loaderChan
}
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
debug.Log("backlog is empty, all channels nil, exiting")
return
}
select {
case <-ctx.Done():
return
case loadCh <- nextTreeID:
outstandingLoadTreeJobs++
loadCh = nil
c.blobRefs.Lock()
h := restic.BlobHandle{ID: nextTreeID, Type: restic.TreeBlob}
c.blobRefs.M[h] |= blobStatusReferenced
c.blobRefs.Unlock()
case j, ok := <-inCh:
if !ok {
debug.Log("input channel closed")
inCh = nil
in = nil
continue
}
outstandingLoadTreeJobs--
debug.Log("input job tree %v", j.ID)
if j.error != nil {
debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID)
} else if j.Tree == nil {
debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID)
// send a new job with the new error instead of the old one
j = treeJob{ID: j.ID, error: errors.New("tree is nil and error is nil")}
} else {
subtrees := j.Tree.Subtrees()
debug.Log("subtrees for tree %v: %v", j.ID, subtrees)
// iterate backwards over subtree to compensate backwards traversal order of nextTreeID selection
for i := len(subtrees) - 1; i >= 0; i-- {
id := subtrees[i]
if id.IsNull() {
// We do not need to raise this error here, it is
// checked when the tree is checked. Just make sure
// that we do not add any null IDs to the backlog.
debug.Log("tree %v has nil subtree", j.ID)
continue
}
backlog = append(backlog, id)
}
}
job = j
outCh = out
inCh = nil
case outCh <- job:
debug.Log("tree sent to check: %v", job.ID)
outCh = nil
inCh = in
}
}
}
// Structure checks that for all snapshots all referenced data blobs and
// subtrees are available in the index. errChan is closed after all trees have
// been traversed.
func (c *Checker) Structure(ctx context.Context, errChan chan<- error) {
defer close(errChan)
trees, errs := loadSnapshotTreeIDs(ctx, c.repo)
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
for _, err := range errs {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob)
var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ {
wg.Add(2)
go loadTreeWorker(ctx, c.repo, treeIDChan, treeJobChan1, &wg)
go c.checkTreeWorker(ctx, treeJobChan2, errChan, &wg)
}
c.filterTrees(ctx, trees, treeIDChan, treeJobChan1, treeJobChan2)
wg.Wait()
}
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("checking tree %v", id)
var blobs []restic.ID
for _, node := range tree.Nodes {
switch node.Type {
case "file":
if node.Content == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
}
var size uint64
for b, blobID := range node.Content {
if blobID.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
continue
}
blobs = append(blobs, blobID)
blobSize, found := c.repo.LookupBlobSize(blobID, restic.DataBlob)
if !found {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d size could not be found", node.Name, b)})
}
size += uint64(blobSize)
}
case "dir":
if node.Subtree == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
continue
}
if node.Subtree.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
continue
}
case "symlink", "socket", "chardev", "dev", "fifo":
// nothing to check
default:
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
}
if node.Name == "" {
errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")})
}
}
for _, blobID := range blobs {
c.blobRefs.Lock()
h := restic.BlobHandle{ID: blobID, Type: restic.DataBlob}
if (c.blobRefs.M[h] & blobStatusExists) == 0 {
debug.Log("tree %v references blob %v which isn't contained in index", id, blobID)
errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
}
c.blobRefs.M[h] |= blobStatusReferenced
debug.Log("blob %v is referenced", blobID)
c.blobRefs.Unlock()
}
return errs
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs restic.BlobHandles) {
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
debug.Log("checking %d blobs", len(c.blobRefs.M))
for id, flags := range c.blobRefs.M {
if (flags & blobStatusReferenced) == 0 {
debug.Log("blob %v not referenced", id)
blobs = append(blobs, id)
}
}
return blobs
}
// CountPacks returns the number of packs in the repository.
func (c *Checker) CountPacks() uint64 {
return uint64(len(c.packs))
}
// GetPacks returns IDSet of packs in the repository
func (c *Checker) GetPacks() map[restic.ID]int64 {
return c.packs
}
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(ctx context.Context, r restic.Repository, id restic.ID, size int64) error {
debug.Log("checking pack %v", id)
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
packfile, hash, realSize, err := repository.DownloadAndHash(ctx, r.Backend(), h)
if err != nil {
return errors.Wrap(err, "checkPack")
}
defer func() {
_ = packfile.Close()
_ = os.Remove(packfile.Name())
}()
debug.Log("hash for pack %v is %v", id, hash)
if !hash.Equal(id) {
debug.Log("Pack ID does not match, want %v, got %v", id, hash)
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
}
if realSize != size {
debug.Log("Pack size does not match, want %v, got %v", size, realSize)
return errors.Errorf("Pack size does not match, want %v, got %v", size, realSize)
}
blobs, err := pack.List(r.Key(), packfile, size)
if err != nil {
return err
}
var errs []error
var buf []byte
sizeFromBlobs := int64(pack.HeaderSize) // pack size computed only from blob information
idx := r.Index()
for i, blob := range blobs {
sizeFromBlobs += int64(pack.PackedSizeOfBlob(blob.Length))
debug.Log(" check blob %d: %v", i, blob)
buf = buf[:cap(buf)]
if uint(len(buf)) < blob.Length {
buf = make([]byte, blob.Length)
}
buf = buf[:blob.Length]
_, err := packfile.Seek(int64(blob.Offset), 0)
if err != nil {
return errors.Errorf("Seek(%v): %v", blob.Offset, err)
}
_, err = io.ReadFull(packfile, buf)
if err != nil {
debug.Log(" error loading blob %v: %v", blob.ID, err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
nonce, ciphertext := buf[:r.Key().NonceSize()], buf[r.Key().NonceSize():]
plaintext, err := r.Key().Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
debug.Log(" error decrypting blob %v: %v", blob.ID, err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
hash := restic.Hash(plaintext)
if !hash.Equal(blob.ID) {
debug.Log(" Blob ID does not match, want %v, got %v", blob.ID, hash)
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
continue
}
// Check if blob is contained in index and position is correct
idxHas := false
for _, pb := range idx.Lookup(blob.ID, blob.Type) {
if pb.PackID == id {
idxHas = true
break
}
}
if !idxHas {
errs = append(errs, errors.Errorf("Blob ID %v is not contained in index", blob.ID.Str()))
continue
}
}
if sizeFromBlobs != size {
debug.Log("Pack size does not match, want %v, got %v", size, sizeFromBlobs)
errs = append(errs, errors.Errorf("Pack size does not match, want %v, got %v", size, sizeFromBlobs))
}
if len(errs) > 0 {
return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
}
return nil
}
// ReadData loads all data from the repository and checks the integrity.
func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) {
c.ReadPacks(ctx, c.packs, nil, errChan)
}
// ReadPacks loads data from specified packs and checks the integrity.
func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
defer close(errChan)
g, ctx := errgroup.WithContext(ctx)
type packsize struct {
id restic.ID
size int64
}
ch := make(chan packsize)
// run workers
for i := 0; i < defaultParallelism; i++ {
g.Go(func() error {
for {
var ps packsize
var ok bool
select {
case <-ctx.Done():
return nil
case ps, ok = <-ch:
if !ok {
return nil
}
}
err := checkPack(ctx, c.repo, ps.id, ps.size)
p.Add(1)
if err == nil {
continue
}
select {
case <-ctx.Done():
return nil
case errChan <- err:
}
}
})
}
// push packs to ch
for pack, size := range packs {
select {
case ch <- packsize{id: pack, size: size}:
case <-ctx.Done():
}
}
close(ch)
err := g.Wait()
if err != nil {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
}
check: Also check blob length and offset
package checker
import (
"context"
"fmt"
"io"
"os"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/pack"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
// Checker runs various checks on a repository. It is advisable to create an
// exclusive Lock in the repository before running any checks.
//
// A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct {
packs map[restic.ID]int64
blobRefs struct {
sync.Mutex
// see flags below
M map[restic.BlobHandle]blobStatus
}
masterIndex *repository.MasterIndex
repo restic.Repository
}
type blobStatus uint8
const (
blobStatusExists blobStatus = 1 << iota
blobStatusReferenced
)
// New returns a new checker which runs on repo.
func New(repo restic.Repository) *Checker {
c := &Checker{
packs: make(map[restic.ID]int64),
masterIndex: repository.NewMasterIndex(),
repo: repo,
}
c.blobRefs.M = make(map[restic.BlobHandle]blobStatus)
return c
}
const defaultParallelism = 5
// ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct {
PackID restic.ID
Indexes restic.IDSet
}
func (e ErrDuplicatePacks) Error() string {
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes)
}
// ErrOldIndexFormat is returned when an index with the old format is
// found.
type ErrOldIndexFormat struct {
restic.ID
}
func (err ErrOldIndexFormat) Error() string {
return fmt.Sprintf("index %v has old format", err.ID.Str())
}
// LoadIndex loads all index files.
func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
debug.Log("Start")
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, wgCtx := errgroup.WithContext(ctx)
type FileInfo struct {
restic.ID
Size int64
}
type Result struct {
*repository.Index
restic.ID
Err error
}
ch := make(chan FileInfo)
resultCh := make(chan Result)
// send list of index files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
return c.repo.List(wgCtx, restic.IndexFile, func(id restic.ID, size int64) error {
select {
case <-wgCtx.Done():
return nil
case ch <- FileInfo{id, size}:
}
return nil
})
})
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
worker := func() error {
var buf []byte
for fi := range ch {
debug.Log("worker got file %v", fi.ID.Str())
var err error
var idx *repository.Index
oldFormat := false
buf, err = c.repo.LoadAndDecrypt(wgCtx, buf[:0], restic.IndexFile, fi.ID)
if err == nil {
idx, oldFormat, err = repository.DecodeIndex(buf, fi.ID)
}
if oldFormat {
debug.Log("index %v has old format", fi.ID.Str())
hints = append(hints, ErrOldIndexFormat{fi.ID})
}
err = errors.Wrapf(err, "error loading index %v", fi.ID.Str())
select {
case resultCh <- Result{idx, fi.ID, err}:
case <-wgCtx.Done():
}
}
return nil
}
// run workers on ch
wg.Go(func() error {
defer close(resultCh)
return repository.RunWorkers(defaultParallelism, worker)
})
// receive decoded indexes
packToIndex := make(map[restic.ID]restic.IDSet)
wg.Go(func() error {
for res := range resultCh {
debug.Log("process index %v, err %v", res.ID, res.Err)
if res.Err != nil {
errs = append(errs, res.Err)
continue
}
c.masterIndex.Insert(res.Index)
debug.Log("process blobs")
cnt := 0
for blob := range res.Index.Each(wgCtx) {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
c.blobRefs.M[h] = blobStatusExists
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = restic.NewIDSet()
}
packToIndex[blob.PackID].Insert(res.ID)
}
debug.Log("%d blobs processed", cnt)
}
return nil
})
err := wg.Wait()
if err != nil {
errs = append(errs, err)
}
// Merge index before computing pack sizes, as this needs removed duplicates
c.masterIndex.MergeFinalIndexes()
// compute pack size using index entries
for blob := range c.masterIndex.Each(ctx) {
size, ok := c.packs[blob.PackID]
if !ok {
size = pack.HeaderSize
}
c.packs[blob.PackID] = size + int64(pack.PackedSizeOfBlob(blob.Length))
}
debug.Log("checking for duplicate packs")
for packID := range c.packs {
debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 {
hints = append(hints, ErrDuplicatePacks{
PackID: packID,
Indexes: packToIndex[packID],
})
}
}
err = c.repo.SetIndex(c.masterIndex)
if err != nil {
debug.Log("SetIndex returned error: %v", err)
errs = append(errs, err)
}
return hints, errs
}
// PackError describes an error with a specific pack.
type PackError struct {
ID restic.ID
Orphaned bool
Err error
}
func (e PackError) Error() string {
return "pack " + e.ID.Str() + ": " + e.Err.Error()
}
// IsOrphanedPack returns true if the error describes a pack which is not
// contained in any index.
func IsOrphanedPack(err error) bool {
if e, ok := errors.Cause(err).(PackError); ok && e.Orphaned {
return true
}
return false
}
// Packs checks that all packs referenced in the index are still available and
// there are no packs that aren't in an index. errChan is closed after all
// packs have been checked.
func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
defer close(errChan)
debug.Log("checking for %d packs", len(c.packs))
debug.Log("listing repository packs")
repoPacks := make(map[restic.ID]int64)
err := c.repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
repoPacks[id] = size
return nil
})
if err != nil {
errChan <- err
}
for id, size := range c.packs {
reposize, ok := repoPacks[id]
// remove from repoPacks so we can find orphaned packs
delete(repoPacks, id)
// missing: present in c.packs but not in the repo
if !ok {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: id, Err: errors.New("does not exist")}:
}
continue
}
// size not matching: present in c.packs and in the repo, but sizes do not match
if size != reposize {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: id, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}:
}
}
}
// orphaned: present in the repo but not in c.packs
for orphanID := range repoPacks {
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: orphanID, Orphaned: true, Err: errors.New("not referenced in any index")}:
}
}
}
// Error is an error that occurred while checking a repository.
type Error struct {
TreeID restic.ID
BlobID restic.ID
Err error
}
func (e Error) Error() string {
if !e.BlobID.IsNull() && !e.TreeID.IsNull() {
msg := "tree " + e.TreeID.Str()
msg += ", blob " + e.BlobID.Str()
msg += ": " + e.Err.Error()
return msg
}
if !e.TreeID.IsNull() {
return "tree " + e.TreeID.Str() + ": " + e.Err.Error()
}
return e.Err.Error()
}
func loadTreeFromSnapshot(ctx context.Context, repo restic.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(ctx, repo, id)
if err != nil {
debug.Log("error loading snapshot %v: %v", id, err)
return restic.ID{}, err
}
if sn.Tree == nil {
debug.Log("snapshot %v has no tree", id)
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
}
return *sn.Tree, nil
}
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(ctx context.Context, repo restic.Repository) (restic.IDs, []error) {
var trees struct {
IDs restic.IDs
sync.Mutex
}
var errs struct {
errs []error
sync.Mutex
}
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, ctx := errgroup.WithContext(ctx)
ch := make(chan restic.ID)
// send list of index files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
return repo.List(ctx, restic.SnapshotFile, func(id restic.ID, size int64) error {
select {
case <-ctx.Done():
return nil
case ch <- id:
}
return nil
})
})
// a worker receives an index ID from ch, loads the snapshot and the tree,
// and adds the result to errs and trees.
worker := func() error {
for id := range ch {
debug.Log("load snapshot %v", id)
treeID, err := loadTreeFromSnapshot(ctx, repo, id)
if err != nil {
errs.Lock()
errs.errs = append(errs.errs, err)
errs.Unlock()
continue
}
debug.Log("snapshot %v has tree %v", id, treeID)
trees.Lock()
trees.IDs = append(trees.IDs, treeID)
trees.Unlock()
}
return nil
}
for i := 0; i < defaultParallelism; i++ {
wg.Go(worker)
}
err := wg.Wait()
if err != nil {
errs.errs = append(errs.errs, err)
}
return trees.IDs, errs.errs
}
// TreeError collects several errors that occurred while processing a tree.
type TreeError struct {
ID restic.ID
Errors []error
}
func (e TreeError) Error() string {
return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors)
}
type treeJob struct {
restic.ID
error
*restic.Tree
}
// loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(ctx context.Context, repo restic.Repository,
in <-chan restic.ID, out chan<- treeJob,
wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
job treeJob
)
outCh = nil
for {
select {
case <-ctx.Done():
return
case treeID, ok := <-inCh:
if !ok {
return
}
debug.Log("load tree %v", treeID)
tree, err := repo.LoadTree(ctx, treeID)
debug.Log("load tree %v (%v) returned err: %v", tree, treeID, err)
job = treeJob{ID: treeID, error: err, Tree: tree}
outCh = out
inCh = nil
case outCh <- job:
debug.Log("sent tree %v", job.ID)
outCh = nil
inCh = in
}
}
}
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(ctx context.Context, in <-chan treeJob, out chan<- error, wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
treeError TreeError
)
outCh = nil
for {
select {
case <-ctx.Done():
debug.Log("done channel closed, exiting")
return
case job, ok := <-inCh:
if !ok {
debug.Log("input channel closed, exiting")
return
}
debug.Log("check tree %v (tree %v, err %v)", job.ID, job.Tree, job.error)
var errs []error
if job.error != nil {
errs = append(errs, job.error)
} else {
errs = c.checkTree(job.ID, job.Tree)
}
if len(errs) > 0 {
debug.Log("checked tree %v: %v errors", job.ID, len(errs))
treeError = TreeError{ID: job.ID, Errors: errs}
outCh = out
inCh = nil
}
case outCh <- treeError:
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
outCh = nil
inCh = in
}
}
}
func (c *Checker) filterTrees(ctx context.Context, backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob) {
defer func() {
debug.Log("closing output channels")
close(loaderChan)
close(out)
}()
var (
inCh = in
outCh = out
loadCh = loaderChan
job treeJob
nextTreeID restic.ID
outstandingLoadTreeJobs = 0
)
outCh = nil
loadCh = nil
for {
if loadCh == nil && len(backlog) > 0 {
// process last added ids first, that is traverse the tree in depth-first order
ln := len(backlog) - 1
nextTreeID, backlog = backlog[ln], backlog[:ln]
// use a separate flag for processed trees to ensure that check still processes trees
// even when a file references a tree blob
c.blobRefs.Lock()
h := restic.BlobHandle{ID: nextTreeID, Type: restic.TreeBlob}
status := c.blobRefs.M[h]
c.blobRefs.Unlock()
if (status & blobStatusReferenced) != 0 {
continue
}
loadCh = loaderChan
}
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
debug.Log("backlog is empty, all channels nil, exiting")
return
}
select {
case <-ctx.Done():
return
case loadCh <- nextTreeID:
outstandingLoadTreeJobs++
loadCh = nil
c.blobRefs.Lock()
h := restic.BlobHandle{ID: nextTreeID, Type: restic.TreeBlob}
c.blobRefs.M[h] |= blobStatusReferenced
c.blobRefs.Unlock()
case j, ok := <-inCh:
if !ok {
debug.Log("input channel closed")
inCh = nil
in = nil
continue
}
outstandingLoadTreeJobs--
debug.Log("input job tree %v", j.ID)
if j.error != nil {
debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID)
} else if j.Tree == nil {
debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID)
// send a new job with the new error instead of the old one
j = treeJob{ID: j.ID, error: errors.New("tree is nil and error is nil")}
} else {
subtrees := j.Tree.Subtrees()
debug.Log("subtrees for tree %v: %v", j.ID, subtrees)
// iterate backwards over subtree to compensate backwards traversal order of nextTreeID selection
for i := len(subtrees) - 1; i >= 0; i-- {
id := subtrees[i]
if id.IsNull() {
// We do not need to raise this error here, it is
// checked when the tree is checked. Just make sure
// that we do not add any null IDs to the backlog.
debug.Log("tree %v has nil subtree", j.ID)
continue
}
backlog = append(backlog, id)
}
}
job = j
outCh = out
inCh = nil
case outCh <- job:
debug.Log("tree sent to check: %v", job.ID)
outCh = nil
inCh = in
}
}
}
// Structure checks that for all snapshots all referenced data blobs and
// subtrees are available in the index. errChan is closed after all trees have
// been traversed.
func (c *Checker) Structure(ctx context.Context, errChan chan<- error) {
defer close(errChan)
trees, errs := loadSnapshotTreeIDs(ctx, c.repo)
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
for _, err := range errs {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob)
var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ {
wg.Add(2)
go loadTreeWorker(ctx, c.repo, treeIDChan, treeJobChan1, &wg)
go c.checkTreeWorker(ctx, treeJobChan2, errChan, &wg)
}
c.filterTrees(ctx, trees, treeIDChan, treeJobChan1, treeJobChan2)
wg.Wait()
}
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("checking tree %v", id)
var blobs []restic.ID
for _, node := range tree.Nodes {
switch node.Type {
case "file":
if node.Content == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
}
var size uint64
for b, blobID := range node.Content {
if blobID.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
continue
}
blobs = append(blobs, blobID)
blobSize, found := c.repo.LookupBlobSize(blobID, restic.DataBlob)
if !found {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d size could not be found", node.Name, b)})
}
size += uint64(blobSize)
}
case "dir":
if node.Subtree == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
continue
}
if node.Subtree.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
continue
}
case "symlink", "socket", "chardev", "dev", "fifo":
// nothing to check
default:
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
}
if node.Name == "" {
errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")})
}
}
for _, blobID := range blobs {
c.blobRefs.Lock()
h := restic.BlobHandle{ID: blobID, Type: restic.DataBlob}
if (c.blobRefs.M[h] & blobStatusExists) == 0 {
debug.Log("tree %v references blob %v which isn't contained in index", id, blobID)
errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
}
c.blobRefs.M[h] |= blobStatusReferenced
debug.Log("blob %v is referenced", blobID)
c.blobRefs.Unlock()
}
return errs
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs restic.BlobHandles) {
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
debug.Log("checking %d blobs", len(c.blobRefs.M))
for id, flags := range c.blobRefs.M {
if (flags & blobStatusReferenced) == 0 {
debug.Log("blob %v not referenced", id)
blobs = append(blobs, id)
}
}
return blobs
}
// CountPacks returns the number of packs in the repository.
func (c *Checker) CountPacks() uint64 {
return uint64(len(c.packs))
}
// GetPacks returns IDSet of packs in the repository
func (c *Checker) GetPacks() map[restic.ID]int64 {
return c.packs
}
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(ctx context.Context, r restic.Repository, id restic.ID, size int64) error {
debug.Log("checking pack %v", id)
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
packfile, hash, realSize, err := repository.DownloadAndHash(ctx, r.Backend(), h)
if err != nil {
return errors.Wrap(err, "checkPack")
}
defer func() {
_ = packfile.Close()
_ = os.Remove(packfile.Name())
}()
debug.Log("hash for pack %v is %v", id, hash)
if !hash.Equal(id) {
debug.Log("Pack ID does not match, want %v, got %v", id, hash)
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
}
if realSize != size {
debug.Log("Pack size does not match, want %v, got %v", size, realSize)
return errors.Errorf("Pack size does not match, want %v, got %v", size, realSize)
}
blobs, err := pack.List(r.Key(), packfile, size)
if err != nil {
return err
}
var errs []error
var buf []byte
sizeFromBlobs := int64(pack.HeaderSize) // pack size computed only from blob information
idx := r.Index()
for i, blob := range blobs {
sizeFromBlobs += int64(pack.PackedSizeOfBlob(blob.Length))
debug.Log(" check blob %d: %v", i, blob)
buf = buf[:cap(buf)]
if uint(len(buf)) < blob.Length {
buf = make([]byte, blob.Length)
}
buf = buf[:blob.Length]
_, err := packfile.Seek(int64(blob.Offset), 0)
if err != nil {
return errors.Errorf("Seek(%v): %v", blob.Offset, err)
}
_, err = io.ReadFull(packfile, buf)
if err != nil {
debug.Log(" error loading blob %v: %v", blob.ID, err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
nonce, ciphertext := buf[:r.Key().NonceSize()], buf[r.Key().NonceSize():]
plaintext, err := r.Key().Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
debug.Log(" error decrypting blob %v: %v", blob.ID, err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
hash := restic.Hash(plaintext)
if !hash.Equal(blob.ID) {
debug.Log(" Blob ID does not match, want %v, got %v", blob.ID, hash)
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
continue
}
// Check if blob is contained in index and position is correct
idxHas := false
for _, pb := range idx.Lookup(blob.ID, blob.Type) {
if pb.PackID == id && pb.Offset == blob.Offset && pb.Length == blob.Length {
idxHas = true
break
}
}
if !idxHas {
errs = append(errs, errors.Errorf("Blob %v is not contained in index or position is incorrect", blob.ID.Str()))
continue
}
}
if sizeFromBlobs != size {
debug.Log("Pack size does not match, want %v, got %v", size, sizeFromBlobs)
errs = append(errs, errors.Errorf("Pack size does not match, want %v, got %v", size, sizeFromBlobs))
}
if len(errs) > 0 {
return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
}
return nil
}
// ReadData loads all data from the repository and checks the integrity.
func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) {
c.ReadPacks(ctx, c.packs, nil, errChan)
}
// ReadPacks loads data from specified packs and checks the integrity.
func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
defer close(errChan)
g, ctx := errgroup.WithContext(ctx)
type packsize struct {
id restic.ID
size int64
}
ch := make(chan packsize)
// run workers
for i := 0; i < defaultParallelism; i++ {
g.Go(func() error {
for {
var ps packsize
var ok bool
select {
case <-ctx.Done():
return nil
case ps, ok = <-ch:
if !ok {
return nil
}
}
err := checkPack(ctx, c.repo, ps.id, ps.size)
p.Add(1)
if err == nil {
continue
}
select {
case <-ctx.Done():
return nil
case errChan <- err:
}
}
})
}
// push packs to ch
for pack, size := range packs {
select {
case ch <- packsize{id: pack, size: size}:
case <-ctx.Done():
}
}
close(ch)
err := g.Wait()
if err != nil {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
}
|
package clients
import (
"context"
"fmt"
"github.com/manicminer/hamilton/auth"
"github.com/manicminer/hamilton/environments"
"github.com/hashicorp/terraform-provider-azuread/internal/common"
)
type ClientBuilder struct {
AuthConfig *auth.Config
PartnerID string
TerraformVersion string
}
// Build is a helper method which returns a fully instantiated *Client based on the auth Config's current settings.
func (b *ClientBuilder) Build(ctx context.Context) (*Client, error) {
// client declarations:
client := Client{
TenantID: b.AuthConfig.TenantID,
ClientID: b.AuthConfig.ClientID,
TerraformVersion: b.TerraformVersion,
}
if b.AuthConfig == nil {
return nil, fmt.Errorf("building client: AuthConfig is nil")
}
authorizer, err := b.AuthConfig.NewAuthorizer(ctx, b.AuthConfig.Environment.MsGraph)
if err != nil {
return nil, err
}
client.Environment = b.AuthConfig.Environment
o := &common.ClientOptions{
Authorizer: authorizer,
Environment: client.Environment,
TenantID: client.TenantID,
PartnerID: b.PartnerID,
TerraformVersion: client.TerraformVersion,
}
// Obtain the tenant ID from Azure CLI
realAuthorizer := authorizer
if cache, ok := authorizer.(*auth.CachedAuthorizer); ok {
realAuthorizer = cache.Source
}
if cli, ok := realAuthorizer.(*auth.AzureCliAuthorizer); ok {
if cli.TenantID == "" {
return nil, fmt.Errorf("azure-cli could not determine tenant ID to use")
}
client.TenantID = cli.TenantID
if clientId, ok := environments.PublishedApis["MicrosoftAzureCli"]; ok && clientId != "" {
client.ClientID = clientId
}
}
if err := client.build(ctx, o); err != nil {
return nil, fmt.Errorf("building client: %+v", err)
}
return &client, nil
}
Bugfix: set the tenant ID correctly when not specified by provider block
package clients
import (
"context"
"fmt"
"github.com/manicminer/hamilton/auth"
"github.com/manicminer/hamilton/environments"
"github.com/hashicorp/terraform-provider-azuread/internal/common"
)
type ClientBuilder struct {
AuthConfig *auth.Config
PartnerID string
TerraformVersion string
}
// Build is a helper method which returns a fully instantiated *Client based on the auth Config's current settings.
func (b *ClientBuilder) Build(ctx context.Context) (*Client, error) {
// client declarations:
client := Client{
TenantID: b.AuthConfig.TenantID,
ClientID: b.AuthConfig.ClientID,
TerraformVersion: b.TerraformVersion,
}
if b.AuthConfig == nil {
return nil, fmt.Errorf("building client: AuthConfig is nil")
}
authorizer, err := b.AuthConfig.NewAuthorizer(ctx, b.AuthConfig.Environment.MsGraph)
if err != nil {
return nil, err
}
client.Environment = b.AuthConfig.Environment
// Obtain the tenant ID from Azure CLI
realAuthorizer := authorizer
if cache, ok := authorizer.(*auth.CachedAuthorizer); ok {
realAuthorizer = cache.Source
}
if cli, ok := realAuthorizer.(*auth.AzureCliAuthorizer); ok {
if cli.TenantID == "" {
return nil, fmt.Errorf("azure-cli could not determine tenant ID to use")
}
client.TenantID = cli.TenantID
if clientId, ok := environments.PublishedApis["MicrosoftAzureCli"]; ok && clientId != "" {
client.ClientID = clientId
}
}
o := &common.ClientOptions{
Authorizer: authorizer,
Environment: client.Environment,
TenantID: client.TenantID,
PartnerID: b.PartnerID,
TerraformVersion: client.TerraformVersion,
}
if err := client.build(ctx, o); err != nil {
return nil, fmt.Errorf("building client: %+v", err)
}
return &client, nil
}
|
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
"gocloud.dev/internal/cmd/gocdk/internal/docker"
"golang.org/x/xerrors"
)
const defaultDockerTag = ":latest"
func registerBuildCmd(ctx context.Context, pctx *processContext, rootCmd *cobra.Command) {
var list bool
var refs []string
buildCmd := &cobra.Command{
Use: "build",
Short: "Build a Docker image",
Long: `Build a Docker image for your application. The same image can be deployed
to multiple biomes.
By default, the image is tagged with ":latest" and an auto-generated snapshot tag;
use --tag to override.`,
Args: cobra.ExactArgs(0),
RunE: func(_ *cobra.Command, _ []string) error {
if list {
if err := listBuilds(ctx, pctx); err != nil {
return xerrors.Errorf("gocdk build: %w", err)
}
return nil
}
_, err := build(ctx, pctx, refs)
return err
},
}
buildCmd.Flags().BoolVar(&list, "list", false, "print existing Docker tags for this project")
buildCmd.Flags().StringSliceVarP(&refs, "tag", "t", nil, "tag in the form `[name][:tag]`; name defaults to image name from Dockerfile, tag defaults to latest (can be used multiple times)")
rootCmd.AddCommand(buildCmd)
}
// build a Docker image and tag it with refs.
//
// If refs is empty, it defaults to ":<generated tag>" and ":latest".
//
// Refs that start with ":" will be prepended with the Docker image name
// configured in Dockerfile.
//
// build returns the first actual image reference used (after any prepending
// as described above). If refs was originally empty, it returns the reference
// with the generated tag.
func build(ctx context.Context, pctx *processContext, refs []string) (string, error) {
if len(refs) == 0 {
// No refs given. Use defaults.
tag, err := generateTag()
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
// Note: the generated tag is first, so that it will be returned.
refs = []string{":" + tag, defaultDockerTag}
} else {
// Copy to avoid mutating argument.
refs = append([]string(nil), refs...)
}
moduleRoot, err := pctx.ModuleRoot(ctx)
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
var imageName string
for i := range refs {
if !strings.HasPrefix(refs[i], ":") {
continue
}
if imageName == "" {
// On first tag shorthand, lookup the module's Docker image name.
var err error
imageName, err = moduleDockerImageName(moduleRoot)
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
}
refs[i] = imageName + refs[i]
}
if err := docker.New(pctx.env).Build(ctx, refs, moduleRoot, pctx.stderr); err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
return refs[0], nil
}
func listBuilds(ctx context.Context, pctx *processContext) error {
moduleRoot, err := pctx.ModuleRoot(ctx)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
imageName, err := moduleDockerImageName(moduleRoot)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
images, err := docker.New(pctx.env).ListImages(ctx, imageName)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
for _, image := range images {
if image.Repository == "" || image.Tag == "" {
pctx.Printf("@%-60s %s\n", image.Digest, image.CreatedAt.Local().Format(time.Stamp))
} else {
pctx.Printf("%-60s %s\n", image.Repository+":"+image.Tag, image.CreatedAt.Local().Format(time.Stamp))
}
}
return nil
}
func moduleDockerImageName(moduleRoot string) (string, error) {
dockerfilePath := filepath.Join(moduleRoot, "Dockerfile")
dockerfile, err := ioutil.ReadFile(dockerfilePath)
if err != nil {
return "", xerrors.Errorf("finding module Docker image name: %w", err)
}
imageName, err := parseImageNameFromDockerfile(dockerfile)
if err != nil {
return "", xerrors.Errorf("finding module Docker image name: parse %s: %w", dockerfilePath, err)
}
return imageName, nil
}
// generateTag generates a reasonably unique string that is suitable as a Docker
// image tag.
func generateTag() (string, error) {
now := time.Now().UTC()
var bits [4]byte
if _, err := rand.Read(bits[:]); err != nil {
return "", xerrors.Errorf("generate tag: %w", err)
}
year, month, day := now.Date()
hour, minute, second := now.Clock()
return fmt.Sprintf("%04d%02d%02d%02d%02d%02d_%08x", year, month, day, hour, minute, second, bits[:]), nil
}
// parseImageNameFromDockerfile finds the magic "# gocdk-image:" comment in a
// Dockerfile and returns the image name.
func parseImageNameFromDockerfile(dockerfile []byte) (string, error) {
const magic = "# gocdk-image:"
commentStart := bytes.Index(dockerfile, []byte(magic))
if commentStart == -1 {
return "", xerrors.New("source does not contain the comment \"# gocdk-image:\"")
}
// TODO(light): Keep searching if comment does not start at beginning of line.
nameStart := commentStart + len(magic)
lenName := bytes.Index(dockerfile[nameStart:], []byte("\n"))
if lenName == -1 {
// No newline, go to end of file.
lenName = len(dockerfile) - nameStart
}
name := string(dockerfile[nameStart : nameStart+lenName])
if _, tag, digest := docker.ParseImageRef(name); tag != "" || digest != "" {
return "", xerrors.Errorf("image name %q must not contain a tag or digest")
}
return strings.TrimSpace(name), nil
}
internal/gocdk: run go build as part of gocdk build (#2621)
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
"gocloud.dev/internal/cmd/gocdk/internal/docker"
"golang.org/x/xerrors"
)
const defaultDockerTag = ":latest"
func registerBuildCmd(ctx context.Context, pctx *processContext, rootCmd *cobra.Command) {
var list bool
var refs []string
buildCmd := &cobra.Command{
Use: "build",
Short: "Build a Docker image",
Long: `Build a Docker image for your application. The same image can be deployed
to multiple biomes.
By default, the image is tagged with ":latest" and an auto-generated snapshot tag;
use --tag to override.`,
Args: cobra.ExactArgs(0),
RunE: func(_ *cobra.Command, _ []string) error {
if list {
if err := listBuilds(ctx, pctx); err != nil {
return xerrors.Errorf("gocdk build: %w", err)
}
return nil
}
_, err := build(ctx, pctx, refs)
return err
},
}
buildCmd.Flags().BoolVar(&list, "list", false, "print existing Docker tags for this project")
buildCmd.Flags().StringSliceVarP(&refs, "tag", "t", nil, "tag in the form `[name][:tag]`; name defaults to image name from Dockerfile, tag defaults to latest (can be used multiple times)")
rootCmd.AddCommand(buildCmd)
}
// build a Docker image and tag it with refs.
//
// If refs is empty, it defaults to ":<generated tag>" and ":latest".
//
// Refs that start with ":" will be prepended with the Docker image name
// configured in Dockerfile.
//
// build returns the first actual image reference used (after any prepending
// as described above). If refs was originally empty, it returns the reference
// with the generated tag.
func build(ctx context.Context, pctx *processContext, refs []string) (string, error) {
if len(refs) == 0 {
// No refs given. Use defaults.
tag, err := generateTag()
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
// Note: the generated tag is first, so that it will be returned.
refs = []string{":" + tag, defaultDockerTag}
} else {
// Copy to avoid mutating argument.
refs = append([]string(nil), refs...)
}
moduleRoot, err := pctx.ModuleRoot(ctx)
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
// Run a local build to identify compile errors, etc.
// TODO(rvangent): Consider using go/packages instead, to avoid the cost of linking.
pctx.Logf("Running \"go build\"...")
buildCmd := pctx.NewCommand(ctx, moduleRoot, "go", "build")
buildCmd.Env = append(buildCmd.Env, "GO111MODULE=on")
if err := buildCmd.Run(); err != nil {
return "", xerrors.Errorf("gocdk build: go build: %w", err)
}
pctx.Logf("\"go build\" succeeded.")
var imageName string
for i := range refs {
if !strings.HasPrefix(refs[i], ":") {
continue
}
if imageName == "" {
// On first tag shorthand, lookup the module's Docker image name.
var err error
imageName, err = moduleDockerImageName(moduleRoot)
if err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
}
refs[i] = imageName + refs[i]
}
if err := docker.New(pctx.env).Build(ctx, refs, moduleRoot, pctx.stderr); err != nil {
return "", xerrors.Errorf("gocdk build: %w", err)
}
return refs[0], nil
}
func listBuilds(ctx context.Context, pctx *processContext) error {
moduleRoot, err := pctx.ModuleRoot(ctx)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
imageName, err := moduleDockerImageName(moduleRoot)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
images, err := docker.New(pctx.env).ListImages(ctx, imageName)
if err != nil {
return xerrors.Errorf("list builds: %w", err)
}
for _, image := range images {
if image.Repository == "" || image.Tag == "" {
pctx.Printf("@%-60s %s\n", image.Digest, image.CreatedAt.Local().Format(time.Stamp))
} else {
pctx.Printf("%-60s %s\n", image.Repository+":"+image.Tag, image.CreatedAt.Local().Format(time.Stamp))
}
}
return nil
}
func moduleDockerImageName(moduleRoot string) (string, error) {
dockerfilePath := filepath.Join(moduleRoot, "Dockerfile")
dockerfile, err := ioutil.ReadFile(dockerfilePath)
if err != nil {
return "", xerrors.Errorf("finding module Docker image name: %w", err)
}
imageName, err := parseImageNameFromDockerfile(dockerfile)
if err != nil {
return "", xerrors.Errorf("finding module Docker image name: parse %s: %w", dockerfilePath, err)
}
return imageName, nil
}
// generateTag generates a reasonably unique string that is suitable as a Docker
// image tag.
func generateTag() (string, error) {
now := time.Now().UTC()
var bits [4]byte
if _, err := rand.Read(bits[:]); err != nil {
return "", xerrors.Errorf("generate tag: %w", err)
}
year, month, day := now.Date()
hour, minute, second := now.Clock()
return fmt.Sprintf("%04d%02d%02d%02d%02d%02d_%08x", year, month, day, hour, minute, second, bits[:]), nil
}
// parseImageNameFromDockerfile finds the magic "# gocdk-image:" comment in a
// Dockerfile and returns the image name.
func parseImageNameFromDockerfile(dockerfile []byte) (string, error) {
const magic = "# gocdk-image:"
commentStart := bytes.Index(dockerfile, []byte(magic))
if commentStart == -1 {
return "", xerrors.New("source does not contain the comment \"# gocdk-image:\"")
}
// TODO(light): Keep searching if comment does not start at beginning of line.
nameStart := commentStart + len(magic)
lenName := bytes.Index(dockerfile[nameStart:], []byte("\n"))
if lenName == -1 {
// No newline, go to end of file.
lenName = len(dockerfile) - nameStart
}
name := string(dockerfile[nameStart : nameStart+lenName])
if _, tag, digest := docker.ParseImageRef(name); tag != "" || digest != "" {
return "", xerrors.Errorf("image name %q must not contain a tag or digest")
}
return strings.TrimSpace(name), nil
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package frontend provides functionality for running the pkg.go.dev site.
package frontend
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/safehtml/template"
"golang.org/x/pkgsite/internal"
"golang.org/x/pkgsite/internal/derrors"
"golang.org/x/pkgsite/internal/experiment"
"golang.org/x/pkgsite/internal/godoc/dochtml"
"golang.org/x/pkgsite/internal/licenses"
"golang.org/x/pkgsite/internal/log"
"golang.org/x/pkgsite/internal/middleware"
"golang.org/x/pkgsite/internal/queue"
)
// Server can be installed to serve the go discovery frontend.
type Server struct {
// getDataSource should never be called from a handler. It is called only in Server.errorHandler.
getDataSource func(context.Context) internal.DataSource
queue queue.Queue
// cmplClient is a redis client that has access to the "completions" sorted
// set.
cmplClient *redis.Client
taskIDChangeInterval time.Duration
staticPath template.TrustedSource
thirdPartyPath string
templateDir template.TrustedSource
devMode bool
errorPage []byte
appVersionLabel string
googleTagManagerID string
serveStats bool
mu sync.Mutex // Protects all fields below
templates map[string]*template.Template
}
// ServerConfig contains everything needed by a Server.
type ServerConfig struct {
// DataSourceGetter should return a DataSource on each call.
// It should be goroutine-safe.
DataSourceGetter func(context.Context) internal.DataSource
Queue queue.Queue
CompletionClient *redis.Client
TaskIDChangeInterval time.Duration
StaticPath template.TrustedSource
ThirdPartyPath string
DevMode bool
AppVersionLabel string
GoogleTagManagerID string
ServeStats bool
}
// NewServer creates a new Server for the given database and template directory.
func NewServer(scfg ServerConfig) (_ *Server, err error) {
defer derrors.Wrap(&err, "NewServer(...)")
templateDir := template.TrustedSourceJoin(scfg.StaticPath, template.TrustedSourceFromConstant("html"))
ts, err := parsePageTemplates(templateDir)
if err != nil {
return nil, fmt.Errorf("error parsing templates: %v", err)
}
docTemplateDir := template.TrustedSourceJoin(templateDir, template.TrustedSourceFromConstant("doc"))
dochtml.LoadTemplates(docTemplateDir)
s := &Server{
getDataSource: scfg.DataSourceGetter,
queue: scfg.Queue,
cmplClient: scfg.CompletionClient,
staticPath: scfg.StaticPath,
thirdPartyPath: scfg.ThirdPartyPath,
templateDir: templateDir,
devMode: scfg.DevMode,
templates: ts,
taskIDChangeInterval: scfg.TaskIDChangeInterval,
appVersionLabel: scfg.AppVersionLabel,
googleTagManagerID: scfg.GoogleTagManagerID,
serveStats: scfg.ServeStats,
}
errorPageBytes, err := s.renderErrorPage(context.Background(), http.StatusInternalServerError, "error.tmpl", nil)
if err != nil {
return nil, fmt.Errorf("s.renderErrorPage(http.StatusInternalServerError, nil): %v", err)
}
s.errorPage = errorPageBytes
return s, nil
}
// Install registers server routes using the given handler registration func.
// authValues is the set of values that can be set on authHeader to bypass the
// cache.
func (s *Server) Install(handle func(string, http.Handler), redisClient *redis.Client, authValues []string) {
var (
detailHandler http.Handler = s.errorHandler(s.serveDetails)
fetchHandler http.Handler = s.errorHandler(s.serveFetch)
searchHandler http.Handler = s.errorHandler(s.serveSearch)
)
if redisClient != nil {
detailHandler = middleware.Cache("details", redisClient, detailsTTL, authValues)(detailHandler)
searchHandler = middleware.Cache("search", redisClient, middleware.TTL(defaultTTL), authValues)(searchHandler)
}
// Each AppEngine instance is created in response to a start request, which
// is an empty HTTP GET request to /_ah/start when scaling is set to manual
// or basic, and /_ah/warmup when scaling is automatic and min_instances is
// set. AppEngine sends this request to bring an instance into existence.
// See details for /_ah/start at
// https://cloud.google.com/appengine/docs/standard/go/how-instances-are-managed#startup
// and for /_ah/warmup at
// https://cloud.google.com/appengine/docs/standard/go/configuring-warmup-requests.
handle("/_ah/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof(r.Context(), "Request made to %q", r.URL.Path)
}))
handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(s.staticPath.String()))))
handle("/third_party/", http.StripPrefix("/third_party", http.FileServer(http.Dir(s.thirdPartyPath))))
handle("/favicon.ico", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, fmt.Sprintf("%s/img/favicon.ico", http.Dir(s.staticPath.String())))
}))
handle("/fetch/", fetchHandler)
handle("/play/", http.HandlerFunc(s.handlePlay))
handle("/pkg/", http.HandlerFunc(s.handlePackageDetailsRedirect))
handle("/search", searchHandler)
handle("/search-help", s.staticPageHandler("search_help.tmpl", "Search Help - go.dev"))
handle("/license-policy", s.licensePolicyHandler())
handle("/about", http.RedirectHandler("https://go.dev/about", http.StatusFound))
handle("/badge/", http.HandlerFunc(s.badgeHandler))
handle("/", detailHandler)
if s.serveStats {
handle("/detail-stats/",
middleware.Stats()(http.StripPrefix("/detail-stats", s.errorHandler(s.serveDetails))))
}
handle("/autocomplete", http.HandlerFunc(s.handleAutoCompletion))
handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
http.ServeContent(w, r, "", time.Time{}, strings.NewReader(`User-agent: *
Disallow: /search?*
Disallow: /fetch/*
`))
}))
}
const (
// defaultTTL is used when details tab contents are subject to change, or when
// there is a problem confirming that the details can be permanently cached.
defaultTTL = 1 * time.Hour
// shortTTL is used for volatile content, such as the latest version of a
// package or module.
shortTTL = 10 * time.Minute
// longTTL is used when details content is essentially static.
longTTL = 24 * time.Hour
// tinyTTL is used to cache crawled pages.
tinyTTL = 1 * time.Minute
)
var crawlers = []string{
"+http://www.google.com/bot.html",
"+http://www.bing.com/bingbot.htm",
"+http://ahrefs.com/robot",
}
// detailsTTL assigns the cache TTL for package detail requests.
func detailsTTL(r *http.Request) time.Duration {
userAgent := r.Header.Get("User-Agent")
for _, c := range crawlers {
if strings.Contains(userAgent, c) {
return tinyTTL
}
}
return detailsTTLForPath(r.Context(), r.URL.Path, r.FormValue("tab"))
}
func detailsTTLForPath(ctx context.Context, urlPath, tab string) time.Duration {
if urlPath == "/" {
return defaultTTL
}
if strings.HasPrefix(urlPath, "/mod") {
urlPath = strings.TrimPrefix(urlPath, "/mod")
}
_, _, version, err := parseDetailsURLPath(urlPath)
if err != nil {
log.Errorf(ctx, "falling back to default TTL: %v", err)
return defaultTTL
}
if version == internal.LatestVersion {
return shortTTL
}
if tab == "importedby" || tab == "versions" {
return defaultTTL
}
return longTTL
}
// TagRoute categorizes incoming requests to the frontend for use in
// monitoring.
func TagRoute(route string, r *http.Request) string {
tag := strings.Trim(route, "/")
if tab := r.FormValue("tab"); tab != "" {
// Verify that the tab value actually exists, otherwise this is unsanitized
// input and could result in unbounded cardinality in our metrics.
_, pkgOK := packageTabLookup[tab]
_, modOK := moduleTabLookup[tab]
if pkgOK || modOK {
if tag != "" {
tag += "-"
}
tag += tab
}
}
return tag
}
// staticPageHandler handles requests to a template that contains no dynamic
// content.
func (s *Server) staticPageHandler(templateName, title string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
s.servePage(r.Context(), w, templateName, s.newBasePage(r, title))
}
}
// basePage contains fields shared by all pages when rendering templates.
type basePage struct {
// HTMLTitle is the value to use in the page’s <title> tag.
HTMLTitle string
// Query is the current search query (if applicable).
Query string
// Experiments contains the experiments currently active.
Experiments *experiment.Set
// GodocURL is the URL of the corresponding page on godoc.org (if applicable).
GodocURL string
// DevMode indicates whether the server is running in development mode.
DevMode bool
// AppVersionLabel contains the current version of the app.
AppVersionLabel string
// GoogleTagManagerID is the ID used to load Google Tag Manager.
GoogleTagManagerID string
// AllowWideContent indicates whether the content should be displayed in a
// way that’s amenable to wider viewports.
AllowWideContent bool
}
// licensePolicyPage is used to generate the static license policy page.
type licensePolicyPage struct {
basePage
LicenseFileNames []string
LicenseTypes []licenses.AcceptedLicenseInfo
}
func (s *Server) licensePolicyHandler() http.HandlerFunc {
lics := licenses.AcceptedLicenses()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
page := licensePolicyPage{
basePage: s.newBasePage(r, "Licenses"),
LicenseFileNames: licenses.FileNames,
LicenseTypes: lics,
}
s.servePage(r.Context(), w, "license_policy.tmpl", page)
})
}
// newBasePage returns a base page for the given request and title.
func (s *Server) newBasePage(r *http.Request, title string) basePage {
return basePage{
HTMLTitle: title,
Query: searchQuery(r),
Experiments: experiment.FromContext(r.Context()),
GodocURL: middleware.GodocURLPlaceholder,
DevMode: s.devMode,
AppVersionLabel: s.appVersionLabel,
GoogleTagManagerID: s.googleTagManagerID,
}
}
// errorPage contains fields for rendering a HTTP error page.
type errorPage struct {
basePage
templateName string
messageTemplate template.TrustedTemplate
MessageData interface{}
}
// PanicHandler returns an http.HandlerFunc that can be used in HTTP
// middleware. It returns an error if something goes wrong pre-rendering the
// error template.
func (s *Server) PanicHandler() (_ http.HandlerFunc, err error) {
defer derrors.Wrap(&err, "PanicHandler")
status := http.StatusInternalServerError
buf, err := s.renderErrorPage(context.Background(), status, "error.tmpl", nil)
if err != nil {
return nil, err
}
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(r.Context(), "Error copying panic template to ResponseWriter: %v", err)
}
}, nil
}
type serverError struct {
status int // HTTP status code
responseText string // Response text to the user
epage *errorPage
err error // wrapped error
}
func (s *serverError) Error() string {
return fmt.Sprintf("%d (%s): %v (epage=%v)", s.status, http.StatusText(s.status), s.err, s.epage)
}
func (s *serverError) Unwrap() error {
return s.err
}
func (s *Server) errorHandler(f func(w http.ResponseWriter, r *http.Request, ds internal.DataSource) error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Obtain a DataSource to use for this request.
ds := s.getDataSource(r.Context())
if err := f(w, r, ds); err != nil {
s.serveError(w, r, err)
}
}
}
func (s *Server) serveError(w http.ResponseWriter, r *http.Request, err error) {
ctx := r.Context()
var serr *serverError
if !errors.As(err, &serr) {
serr = &serverError{status: http.StatusInternalServerError, err: err}
}
if serr.status == http.StatusInternalServerError {
log.Error(ctx, err)
} else {
log.Infof(ctx, "returning %d (%s) for error %v", serr.status, http.StatusText(serr.status), err)
}
if serr.responseText == "" {
serr.responseText = http.StatusText(serr.status)
}
if r.Method == http.MethodPost {
http.Error(w, serr.responseText, serr.status)
return
}
s.serveErrorPage(w, r, serr.status, serr.epage)
}
func (s *Server) serveErrorPage(w http.ResponseWriter, r *http.Request, status int, page *errorPage) {
template := "error.tmpl"
if page != nil {
if page.AppVersionLabel == "" || page.GoogleTagManagerID == "" {
// If the basePage was properly created using newBasePage, both
// AppVersionLabel and GoogleTagManagerID should always be set.
page.basePage = s.newBasePage(r, "")
}
if page.templateName != "" {
template = page.templateName
}
} else {
page = &errorPage{
basePage: s.newBasePage(r, ""),
}
}
buf, err := s.renderErrorPage(r.Context(), status, template, page)
if err != nil {
log.Errorf(r.Context(), "s.renderErrorPage(w, %d, %v): %v", status, page, err)
buf = s.errorPage
status = http.StatusInternalServerError
}
w.WriteHeader(status)
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(r.Context(), "Error copying template %q buffer to ResponseWriter: %v", template, err)
}
}
// renderErrorPage executes error.tmpl with the given errorPage
func (s *Server) renderErrorPage(ctx context.Context, status int, templateName string, page *errorPage) ([]byte, error) {
statusInfo := fmt.Sprintf("%d %s", status, http.StatusText(status))
if page == nil {
page = &errorPage{}
}
if page.messageTemplate.String() == "" {
page.messageTemplate = template.MakeTrustedTemplate(`<h3 class="Error-message">{{.}}</h3>`)
}
if page.MessageData == nil {
page.MessageData = statusInfo
}
if page.HTMLTitle == "" {
page.HTMLTitle = statusInfo
}
if templateName == "" {
templateName = "error.tmpl"
}
etmpl, err := s.findTemplate(templateName)
if err != nil {
return nil, err
}
tmpl, err := etmpl.Clone()
if err != nil {
return nil, err
}
_, err = tmpl.New("message").ParseFromTrustedTemplate(page.messageTemplate)
if err != nil {
return nil, err
}
return executeTemplate(ctx, templateName, tmpl, page)
}
// servePage is used to execute all templates for a *Server.
func (s *Server) servePage(ctx context.Context, w http.ResponseWriter, templateName string, page interface{}) {
defer middleware.ElapsedStat(ctx, "servePage")()
buf, err := s.renderPage(ctx, templateName, page)
if err != nil {
log.Errorf(ctx, "s.renderPage(%q, %+v): %v", templateName, page, err)
w.WriteHeader(http.StatusInternalServerError)
buf = s.errorPage
}
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(ctx, "Error copying template %q buffer to ResponseWriter: %v", templateName, err)
w.WriteHeader(http.StatusInternalServerError)
}
}
// renderPage executes the given templateName with page.
func (s *Server) renderPage(ctx context.Context, templateName string, page interface{}) ([]byte, error) {
defer middleware.ElapsedStat(ctx, "renderPage")()
tmpl, err := s.findTemplate(templateName)
if err != nil {
return nil, err
}
return executeTemplate(ctx, templateName, tmpl, page)
}
func (s *Server) findTemplate(templateName string) (*template.Template, error) {
if s.devMode {
s.mu.Lock()
defer s.mu.Unlock()
var err error
s.templates, err = parsePageTemplates(s.templateDir)
if err != nil {
return nil, fmt.Errorf("error parsing templates: %v", err)
}
}
tmpl := s.templates[templateName]
if tmpl == nil {
return nil, fmt.Errorf("BUG: s.templates[%q] not found", templateName)
}
return tmpl, nil
}
func executeTemplate(ctx context.Context, templateName string, tmpl *template.Template, data interface{}) ([]byte, error) {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
log.Errorf(ctx, "Error executing page template %q: %v", templateName, err)
return nil, err
}
return buf.Bytes(), nil
}
// parsePageTemplates parses html templates contained in the given base
// directory in order to generate a map of Name->*template.Template.
//
// Separate templates are used so that certain contextual functions (e.g.
// templateName) can be bound independently for each page.
func parsePageTemplates(base template.TrustedSource) (map[string]*template.Template, error) {
tsc := template.TrustedSourceFromConstant
join := template.TrustedSourceJoin
htmlSets := [][]template.TrustedSource{
{tsc("badge.tmpl")},
{tsc("error.tmpl")},
{tsc("fetch.tmpl")},
{tsc("index.tmpl")},
{tsc("license_policy.tmpl")},
{tsc("search.tmpl")},
{tsc("search_help.tmpl")},
{tsc("unit_details.tmpl"), tsc("unit.tmpl")},
{tsc("unit_importedby.tmpl"), tsc("unit.tmpl")},
{tsc("unit_imports.tmpl"), tsc("unit.tmpl")},
{tsc("unit_licenses.tmpl"), tsc("unit.tmpl")},
{tsc("unit_versions.tmpl"), tsc("unit.tmpl")},
{tsc("overview.tmpl"), tsc("details.tmpl")},
{tsc("subdirectories.tmpl"), tsc("details.tmpl")},
{tsc("pkg_doc.tmpl"), tsc("details.tmpl")},
{tsc("pkg_importedby.tmpl"), tsc("details.tmpl")},
{tsc("pkg_imports.tmpl"), tsc("details.tmpl")},
{tsc("licenses.tmpl"), tsc("details.tmpl")},
{tsc("versions.tmpl"), tsc("details.tmpl")},
{tsc("not_implemented.tmpl"), tsc("details.tmpl")},
}
templates := make(map[string]*template.Template)
for _, set := range htmlSets {
t, err := template.New("base.tmpl").Funcs(template.FuncMap{
"add": func(i, j int) int { return i + j },
"pluralize": func(i int, s string) string {
if i == 1 {
return s
}
return s + "s"
},
"commaseparate": func(s []string) string {
return strings.Join(s, ", ")
},
}).ParseFilesFromTrustedSources(join(base, tsc("base.tmpl")))
if err != nil {
return nil, fmt.Errorf("ParseFiles: %v", err)
}
helperGlob := join(base, tsc("helpers"), tsc("*.tmpl"))
if _, err := t.ParseGlobFromTrustedSource(helperGlob); err != nil {
return nil, fmt.Errorf("ParseGlob(%q): %v", helperGlob, err)
}
var files []template.TrustedSource
for _, f := range set {
files = append(files, join(base, tsc("pages"), f))
}
if _, err := t.ParseFilesFromTrustedSources(files...); err != nil {
return nil, fmt.Errorf("ParseFilesFromTrustedSources(%v): %v", files, err)
}
templates[set[0].String()] = t
}
return templates, nil
}
internal/frontend: check unitTabLookup in TagRoute
TagRoute now checks unitTabLookup instead of packageTabLookup and
moduleTabLookup when verifying if a tab is valid.
Change-Id: Ica1ecf1a904c28b983dd437b5dca1f191666f8bd
Reviewed-on: https://go-review.googlesource.com/c/pkgsite/+/270238
Trust: Julie Qiu <8d32267b6b4884cf35adeaccde2b6857ae11aace@golang.org>
Run-TryBot: Julie Qiu <8d32267b6b4884cf35adeaccde2b6857ae11aace@golang.org>
TryBot-Result: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Reviewed-by: Jonathan Amsterdam <e3d3698b2ccd5955e4adf250d0785062d0f9018b@google.com>
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package frontend provides functionality for running the pkg.go.dev site.
package frontend
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/safehtml/template"
"golang.org/x/pkgsite/internal"
"golang.org/x/pkgsite/internal/derrors"
"golang.org/x/pkgsite/internal/experiment"
"golang.org/x/pkgsite/internal/godoc/dochtml"
"golang.org/x/pkgsite/internal/licenses"
"golang.org/x/pkgsite/internal/log"
"golang.org/x/pkgsite/internal/middleware"
"golang.org/x/pkgsite/internal/queue"
)
// Server can be installed to serve the go discovery frontend.
type Server struct {
// getDataSource should never be called from a handler. It is called only in Server.errorHandler.
getDataSource func(context.Context) internal.DataSource
queue queue.Queue
// cmplClient is a redis client that has access to the "completions" sorted
// set.
cmplClient *redis.Client
taskIDChangeInterval time.Duration
staticPath template.TrustedSource
thirdPartyPath string
templateDir template.TrustedSource
devMode bool
errorPage []byte
appVersionLabel string
googleTagManagerID string
serveStats bool
mu sync.Mutex // Protects all fields below
templates map[string]*template.Template
}
// ServerConfig contains everything needed by a Server.
type ServerConfig struct {
// DataSourceGetter should return a DataSource on each call.
// It should be goroutine-safe.
DataSourceGetter func(context.Context) internal.DataSource
Queue queue.Queue
CompletionClient *redis.Client
TaskIDChangeInterval time.Duration
StaticPath template.TrustedSource
ThirdPartyPath string
DevMode bool
AppVersionLabel string
GoogleTagManagerID string
ServeStats bool
}
// NewServer creates a new Server for the given database and template directory.
func NewServer(scfg ServerConfig) (_ *Server, err error) {
defer derrors.Wrap(&err, "NewServer(...)")
templateDir := template.TrustedSourceJoin(scfg.StaticPath, template.TrustedSourceFromConstant("html"))
ts, err := parsePageTemplates(templateDir)
if err != nil {
return nil, fmt.Errorf("error parsing templates: %v", err)
}
docTemplateDir := template.TrustedSourceJoin(templateDir, template.TrustedSourceFromConstant("doc"))
dochtml.LoadTemplates(docTemplateDir)
s := &Server{
getDataSource: scfg.DataSourceGetter,
queue: scfg.Queue,
cmplClient: scfg.CompletionClient,
staticPath: scfg.StaticPath,
thirdPartyPath: scfg.ThirdPartyPath,
templateDir: templateDir,
devMode: scfg.DevMode,
templates: ts,
taskIDChangeInterval: scfg.TaskIDChangeInterval,
appVersionLabel: scfg.AppVersionLabel,
googleTagManagerID: scfg.GoogleTagManagerID,
serveStats: scfg.ServeStats,
}
errorPageBytes, err := s.renderErrorPage(context.Background(), http.StatusInternalServerError, "error.tmpl", nil)
if err != nil {
return nil, fmt.Errorf("s.renderErrorPage(http.StatusInternalServerError, nil): %v", err)
}
s.errorPage = errorPageBytes
return s, nil
}
// Install registers server routes using the given handler registration func.
// authValues is the set of values that can be set on authHeader to bypass the
// cache.
func (s *Server) Install(handle func(string, http.Handler), redisClient *redis.Client, authValues []string) {
var (
detailHandler http.Handler = s.errorHandler(s.serveDetails)
fetchHandler http.Handler = s.errorHandler(s.serveFetch)
searchHandler http.Handler = s.errorHandler(s.serveSearch)
)
if redisClient != nil {
detailHandler = middleware.Cache("details", redisClient, detailsTTL, authValues)(detailHandler)
searchHandler = middleware.Cache("search", redisClient, middleware.TTL(defaultTTL), authValues)(searchHandler)
}
// Each AppEngine instance is created in response to a start request, which
// is an empty HTTP GET request to /_ah/start when scaling is set to manual
// or basic, and /_ah/warmup when scaling is automatic and min_instances is
// set. AppEngine sends this request to bring an instance into existence.
// See details for /_ah/start at
// https://cloud.google.com/appengine/docs/standard/go/how-instances-are-managed#startup
// and for /_ah/warmup at
// https://cloud.google.com/appengine/docs/standard/go/configuring-warmup-requests.
handle("/_ah/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof(r.Context(), "Request made to %q", r.URL.Path)
}))
handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(s.staticPath.String()))))
handle("/third_party/", http.StripPrefix("/third_party", http.FileServer(http.Dir(s.thirdPartyPath))))
handle("/favicon.ico", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, fmt.Sprintf("%s/img/favicon.ico", http.Dir(s.staticPath.String())))
}))
handle("/fetch/", fetchHandler)
handle("/play/", http.HandlerFunc(s.handlePlay))
handle("/pkg/", http.HandlerFunc(s.handlePackageDetailsRedirect))
handle("/search", searchHandler)
handle("/search-help", s.staticPageHandler("search_help.tmpl", "Search Help - go.dev"))
handle("/license-policy", s.licensePolicyHandler())
handle("/about", http.RedirectHandler("https://go.dev/about", http.StatusFound))
handle("/badge/", http.HandlerFunc(s.badgeHandler))
handle("/", detailHandler)
if s.serveStats {
handle("/detail-stats/",
middleware.Stats()(http.StripPrefix("/detail-stats", s.errorHandler(s.serveDetails))))
}
handle("/autocomplete", http.HandlerFunc(s.handleAutoCompletion))
handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
http.ServeContent(w, r, "", time.Time{}, strings.NewReader(`User-agent: *
Disallow: /search?*
Disallow: /fetch/*
`))
}))
}
const (
// defaultTTL is used when details tab contents are subject to change, or when
// there is a problem confirming that the details can be permanently cached.
defaultTTL = 1 * time.Hour
// shortTTL is used for volatile content, such as the latest version of a
// package or module.
shortTTL = 10 * time.Minute
// longTTL is used when details content is essentially static.
longTTL = 24 * time.Hour
// tinyTTL is used to cache crawled pages.
tinyTTL = 1 * time.Minute
)
var crawlers = []string{
"+http://www.google.com/bot.html",
"+http://www.bing.com/bingbot.htm",
"+http://ahrefs.com/robot",
}
// detailsTTL assigns the cache TTL for package detail requests.
func detailsTTL(r *http.Request) time.Duration {
userAgent := r.Header.Get("User-Agent")
for _, c := range crawlers {
if strings.Contains(userAgent, c) {
return tinyTTL
}
}
return detailsTTLForPath(r.Context(), r.URL.Path, r.FormValue("tab"))
}
func detailsTTLForPath(ctx context.Context, urlPath, tab string) time.Duration {
if urlPath == "/" {
return defaultTTL
}
if strings.HasPrefix(urlPath, "/mod") {
urlPath = strings.TrimPrefix(urlPath, "/mod")
}
_, _, version, err := parseDetailsURLPath(urlPath)
if err != nil {
log.Errorf(ctx, "falling back to default TTL: %v", err)
return defaultTTL
}
if version == internal.LatestVersion {
return shortTTL
}
if tab == "importedby" || tab == "versions" {
return defaultTTL
}
return longTTL
}
// TagRoute categorizes incoming requests to the frontend for use in
// monitoring.
func TagRoute(route string, r *http.Request) string {
tag := strings.Trim(route, "/")
if tab := r.FormValue("tab"); tab != "" {
// Verify that the tab value actually exists, otherwise this is unsanitized
// input and could result in unbounded cardinality in our metrics.
if _, ok := unitTabLookup[tab]; ok {
if tag != "" {
tag += "-"
}
tag += tab
}
}
return tag
}
// staticPageHandler handles requests to a template that contains no dynamic
// content.
func (s *Server) staticPageHandler(templateName, title string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
s.servePage(r.Context(), w, templateName, s.newBasePage(r, title))
}
}
// basePage contains fields shared by all pages when rendering templates.
type basePage struct {
// HTMLTitle is the value to use in the page’s <title> tag.
HTMLTitle string
// Query is the current search query (if applicable).
Query string
// Experiments contains the experiments currently active.
Experiments *experiment.Set
// GodocURL is the URL of the corresponding page on godoc.org (if applicable).
GodocURL string
// DevMode indicates whether the server is running in development mode.
DevMode bool
// AppVersionLabel contains the current version of the app.
AppVersionLabel string
// GoogleTagManagerID is the ID used to load Google Tag Manager.
GoogleTagManagerID string
// AllowWideContent indicates whether the content should be displayed in a
// way that’s amenable to wider viewports.
AllowWideContent bool
}
// licensePolicyPage is used to generate the static license policy page.
type licensePolicyPage struct {
basePage
LicenseFileNames []string
LicenseTypes []licenses.AcceptedLicenseInfo
}
func (s *Server) licensePolicyHandler() http.HandlerFunc {
lics := licenses.AcceptedLicenses()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
page := licensePolicyPage{
basePage: s.newBasePage(r, "Licenses"),
LicenseFileNames: licenses.FileNames,
LicenseTypes: lics,
}
s.servePage(r.Context(), w, "license_policy.tmpl", page)
})
}
// newBasePage returns a base page for the given request and title.
func (s *Server) newBasePage(r *http.Request, title string) basePage {
return basePage{
HTMLTitle: title,
Query: searchQuery(r),
Experiments: experiment.FromContext(r.Context()),
GodocURL: middleware.GodocURLPlaceholder,
DevMode: s.devMode,
AppVersionLabel: s.appVersionLabel,
GoogleTagManagerID: s.googleTagManagerID,
}
}
// errorPage contains fields for rendering a HTTP error page.
type errorPage struct {
basePage
templateName string
messageTemplate template.TrustedTemplate
MessageData interface{}
}
// PanicHandler returns an http.HandlerFunc that can be used in HTTP
// middleware. It returns an error if something goes wrong pre-rendering the
// error template.
func (s *Server) PanicHandler() (_ http.HandlerFunc, err error) {
defer derrors.Wrap(&err, "PanicHandler")
status := http.StatusInternalServerError
buf, err := s.renderErrorPage(context.Background(), status, "error.tmpl", nil)
if err != nil {
return nil, err
}
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(r.Context(), "Error copying panic template to ResponseWriter: %v", err)
}
}, nil
}
type serverError struct {
status int // HTTP status code
responseText string // Response text to the user
epage *errorPage
err error // wrapped error
}
func (s *serverError) Error() string {
return fmt.Sprintf("%d (%s): %v (epage=%v)", s.status, http.StatusText(s.status), s.err, s.epage)
}
func (s *serverError) Unwrap() error {
return s.err
}
func (s *Server) errorHandler(f func(w http.ResponseWriter, r *http.Request, ds internal.DataSource) error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Obtain a DataSource to use for this request.
ds := s.getDataSource(r.Context())
if err := f(w, r, ds); err != nil {
s.serveError(w, r, err)
}
}
}
func (s *Server) serveError(w http.ResponseWriter, r *http.Request, err error) {
ctx := r.Context()
var serr *serverError
if !errors.As(err, &serr) {
serr = &serverError{status: http.StatusInternalServerError, err: err}
}
if serr.status == http.StatusInternalServerError {
log.Error(ctx, err)
} else {
log.Infof(ctx, "returning %d (%s) for error %v", serr.status, http.StatusText(serr.status), err)
}
if serr.responseText == "" {
serr.responseText = http.StatusText(serr.status)
}
if r.Method == http.MethodPost {
http.Error(w, serr.responseText, serr.status)
return
}
s.serveErrorPage(w, r, serr.status, serr.epage)
}
func (s *Server) serveErrorPage(w http.ResponseWriter, r *http.Request, status int, page *errorPage) {
template := "error.tmpl"
if page != nil {
if page.AppVersionLabel == "" || page.GoogleTagManagerID == "" {
// If the basePage was properly created using newBasePage, both
// AppVersionLabel and GoogleTagManagerID should always be set.
page.basePage = s.newBasePage(r, "")
}
if page.templateName != "" {
template = page.templateName
}
} else {
page = &errorPage{
basePage: s.newBasePage(r, ""),
}
}
buf, err := s.renderErrorPage(r.Context(), status, template, page)
if err != nil {
log.Errorf(r.Context(), "s.renderErrorPage(w, %d, %v): %v", status, page, err)
buf = s.errorPage
status = http.StatusInternalServerError
}
w.WriteHeader(status)
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(r.Context(), "Error copying template %q buffer to ResponseWriter: %v", template, err)
}
}
// renderErrorPage executes error.tmpl with the given errorPage
func (s *Server) renderErrorPage(ctx context.Context, status int, templateName string, page *errorPage) ([]byte, error) {
statusInfo := fmt.Sprintf("%d %s", status, http.StatusText(status))
if page == nil {
page = &errorPage{}
}
if page.messageTemplate.String() == "" {
page.messageTemplate = template.MakeTrustedTemplate(`<h3 class="Error-message">{{.}}</h3>`)
}
if page.MessageData == nil {
page.MessageData = statusInfo
}
if page.HTMLTitle == "" {
page.HTMLTitle = statusInfo
}
if templateName == "" {
templateName = "error.tmpl"
}
etmpl, err := s.findTemplate(templateName)
if err != nil {
return nil, err
}
tmpl, err := etmpl.Clone()
if err != nil {
return nil, err
}
_, err = tmpl.New("message").ParseFromTrustedTemplate(page.messageTemplate)
if err != nil {
return nil, err
}
return executeTemplate(ctx, templateName, tmpl, page)
}
// servePage is used to execute all templates for a *Server.
func (s *Server) servePage(ctx context.Context, w http.ResponseWriter, templateName string, page interface{}) {
defer middleware.ElapsedStat(ctx, "servePage")()
buf, err := s.renderPage(ctx, templateName, page)
if err != nil {
log.Errorf(ctx, "s.renderPage(%q, %+v): %v", templateName, page, err)
w.WriteHeader(http.StatusInternalServerError)
buf = s.errorPage
}
if _, err := io.Copy(w, bytes.NewReader(buf)); err != nil {
log.Errorf(ctx, "Error copying template %q buffer to ResponseWriter: %v", templateName, err)
w.WriteHeader(http.StatusInternalServerError)
}
}
// renderPage executes the given templateName with page.
func (s *Server) renderPage(ctx context.Context, templateName string, page interface{}) ([]byte, error) {
defer middleware.ElapsedStat(ctx, "renderPage")()
tmpl, err := s.findTemplate(templateName)
if err != nil {
return nil, err
}
return executeTemplate(ctx, templateName, tmpl, page)
}
func (s *Server) findTemplate(templateName string) (*template.Template, error) {
if s.devMode {
s.mu.Lock()
defer s.mu.Unlock()
var err error
s.templates, err = parsePageTemplates(s.templateDir)
if err != nil {
return nil, fmt.Errorf("error parsing templates: %v", err)
}
}
tmpl := s.templates[templateName]
if tmpl == nil {
return nil, fmt.Errorf("BUG: s.templates[%q] not found", templateName)
}
return tmpl, nil
}
func executeTemplate(ctx context.Context, templateName string, tmpl *template.Template, data interface{}) ([]byte, error) {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
log.Errorf(ctx, "Error executing page template %q: %v", templateName, err)
return nil, err
}
return buf.Bytes(), nil
}
// parsePageTemplates parses html templates contained in the given base
// directory in order to generate a map of Name->*template.Template.
//
// Separate templates are used so that certain contextual functions (e.g.
// templateName) can be bound independently for each page.
func parsePageTemplates(base template.TrustedSource) (map[string]*template.Template, error) {
tsc := template.TrustedSourceFromConstant
join := template.TrustedSourceJoin
htmlSets := [][]template.TrustedSource{
{tsc("badge.tmpl")},
{tsc("error.tmpl")},
{tsc("fetch.tmpl")},
{tsc("index.tmpl")},
{tsc("license_policy.tmpl")},
{tsc("search.tmpl")},
{tsc("search_help.tmpl")},
{tsc("unit_details.tmpl"), tsc("unit.tmpl")},
{tsc("unit_importedby.tmpl"), tsc("unit.tmpl")},
{tsc("unit_imports.tmpl"), tsc("unit.tmpl")},
{tsc("unit_licenses.tmpl"), tsc("unit.tmpl")},
{tsc("unit_versions.tmpl"), tsc("unit.tmpl")},
{tsc("overview.tmpl"), tsc("details.tmpl")},
{tsc("subdirectories.tmpl"), tsc("details.tmpl")},
{tsc("pkg_doc.tmpl"), tsc("details.tmpl")},
{tsc("pkg_importedby.tmpl"), tsc("details.tmpl")},
{tsc("pkg_imports.tmpl"), tsc("details.tmpl")},
{tsc("licenses.tmpl"), tsc("details.tmpl")},
{tsc("versions.tmpl"), tsc("details.tmpl")},
{tsc("not_implemented.tmpl"), tsc("details.tmpl")},
}
templates := make(map[string]*template.Template)
for _, set := range htmlSets {
t, err := template.New("base.tmpl").Funcs(template.FuncMap{
"add": func(i, j int) int { return i + j },
"pluralize": func(i int, s string) string {
if i == 1 {
return s
}
return s + "s"
},
"commaseparate": func(s []string) string {
return strings.Join(s, ", ")
},
}).ParseFilesFromTrustedSources(join(base, tsc("base.tmpl")))
if err != nil {
return nil, fmt.Errorf("ParseFiles: %v", err)
}
helperGlob := join(base, tsc("helpers"), tsc("*.tmpl"))
if _, err := t.ParseGlobFromTrustedSource(helperGlob); err != nil {
return nil, fmt.Errorf("ParseGlob(%q): %v", helperGlob, err)
}
var files []template.TrustedSource
for _, f := range set {
files = append(files, join(base, tsc("pages"), f))
}
if _, err := t.ParseFilesFromTrustedSources(files...); err != nil {
return nil, fmt.Errorf("ParseFilesFromTrustedSources(%v): %v", files, err)
}
templates[set[0].String()] = t
}
return templates, nil
}
|
package redconn
import (
"encoding/json"
"fmt"
"log"
"math"
"math/rand"
"time"
"github.com/PuerkitoBio/exp/juggler/msg"
"github.com/garyburd/redigo/redis"
"github.com/pborman/uuid"
)
// Pool defines the methods required for a redis pool that provides
// a method to get a connection and to release the pool's resources.
type Pool interface {
Get() redis.Conn
Close() error
}
// Connector is a redis connector that provides the methods to
// interact with Redis using the juggler protocol.
type Connector struct {
Pool Pool
BlockingTimeout time.Duration
LogFunc func(string, ...interface{})
}
const (
defaultBlockingTimeout = 5 * time.Second
// CALL: callee BRPOPs on callKey. On a new payload, it checks if
// callTimeoutKey is still valid and for how long (PTTL). If it is
// still valid, it processes the call, otherwise it drops it.
// callTimeoutKey is deleted.
callKey = "juggler:calls:{%s}" // 1: URI
callTimeoutKey = "juggler:calls:timeout:{%s}:%s" // 1: URI, 2: mUUID
defaultCallTimeout = time.Minute
// RES: callee stores the result of the call in resKey (LPUSH) and
// sets resTimeoutKey with an expiration of callTimeoutKey PTTL minus
// the time of the call invocation.
//
// Caller BRPOPs on resKey. On a new payload, it checks if resTimeoutKey
// is still valid. If it is, it sends the result on the connection,
// otherwise it drops it. resTimeoutKey is deleted.
resKey = "juggler:results:{%s}" // 1: cUUID
resTimeoutKey = "juggler:results:timeout:{%s}:%s" // 1: cUUID, 2: mUUID
)
// TODO: not redis-specific, should go elsewhere...
type CallPayload struct {
ConnUUID uuid.UUID `json:"conn_uuid"`
MsgUUID uuid.UUID `json:"msg_uuid"`
Args json.RawMessage `json:"args,omitempty"`
}
func (c *Connector) Call(connUUID uuid.UUID, m *msg.Call) error {
pld := &CallPayload{
ConnUUID: connUUID,
MsgUUID: m.UUID(),
Args: m.Payload.Args,
}
b, err := json.Marshal(pld)
if err != nil {
return err
}
// a call generates two redis key values:
// - SET that expires after timeout
// - LPUSH that adds the call payload to the list of calls under URI
//
// A callee will read with BRPOP on the list, and will check the
// expiring key to see if it still exists. If it doesn't, the call is
// dropped, unprocessed, as the client is not waiting for the response
// anymore.
//
// If it is still there, the callee gets its PTTL and deletes it, and
// it processes the call and stores the response payload under a new
// key with an expiration of PTTL.
rc := c.Pool.Get()
defer rc.Close()
to := int(m.Payload.Timeout / time.Millisecond)
if to == 0 {
to = int(defaultCallTimeout / time.Millisecond)
}
if err := rc.Send("SET", fmt.Sprintf(callTimeoutKey, m.Payload.URI, m.UUID()), to, "PX", to); err != nil {
return err
}
_, err = rc.Do("LPUSH", fmt.Sprintf(callKey, m.Payload.URI), b)
// TODO : support capping the list with LTRIM
return err
}
var prng = rand.New(rand.NewSource(time.Now().UnixNano()))
func expJitterDelay(att int, base, max time.Duration) time.Duration {
exp := math.Pow(2, float64(att))
top := float64(base) * exp
return time.Duration(
prng.Int63n(int64(math.Min(float64(max), top))),
)
}
func (c *Connector) ProcessCalls(uri string, stop <-chan struct{}) <-chan *CallPayload {
ch := make(chan *CallPayload)
go func() {
defer close(ch)
k := fmt.Sprintf(callKey, uri)
to := int(c.BlockingTimeout / time.Second)
if to == 0 {
to = int(defaultBlockingTimeout / time.Second)
}
var rc redis.Conn
defer func() {
if rc != nil {
rc.Close()
}
}()
for {
if rc == nil {
rc = c.Pool.Get()
}
vals, err := redis.Values(rc.Do("BRPOP", k, to))
switch err {
case redis.ErrNil:
// no value available
continue
case nil:
// got a call payload, process it
var b []byte
_, err := redis.Scan(vals, nil, b)
if err != nil {
// TODO : ?
}
var cp CallPayload
if err := json.Unmarshal(b, &cp); err != nil {
// TODO : ?
}
ch <- &cp
default:
// error, try again with a different redis connection, in
// case that node went down.
rc.Close()
rc = nil
}
}
}()
return ch
}
func (c *Connector) ProcessResults() {
}
func (c *Connector) Publish(m *msg.Pub) error {
rc := c.Pool.Get()
defer rc.Close()
_, err := rc.Do("PUBLISH", m.Payload.Channel, m.Payload.Args)
return err
}
func (c *Connector) Subscribe(m *msg.Sub) error {
return c.subUnsub(m.Payload.Channel, m.Payload.Pattern, true)
}
func (c *Connector) Unsubscribe(m *msg.Unsb) error {
return c.subUnsub(m.Payload.Channel, m.Payload.Pattern, false)
}
var subUnsubCmds = map[struct{ pat, sub bool }]string{
{true, true}: "PSUBSCRIBE",
{true, false}: "PUNSUBSCRIBE",
{false, true}: "SUBSCRIBE",
{false, false}: "UNSUBSCRIBE",
}
func (c *Connector) subUnsub(ch string, pat bool, sub bool) error {
// TODO : no, must be on the same connection always...
rc := c.Pool.Get()
defer rc.Close()
cmd := subUnsubCmds[struct{ pat, sub bool }{pat, sub}]
_, err := rc.Do(cmd, ch)
return err
}
func (c *Connector) ProcessEvents() {
// TODO : must be on the same connection as the sub
}
func logf(c *Connector, f string, args ...interface{}) {
if c.LogFunc != nil {
c.LogFunc(f, args...)
} else {
log.Printf(f, args...)
}
}
juggler/internal/redconn: check for stop signal, expired request when processing calls
package redconn
import (
"encoding/json"
"fmt"
"log"
"math"
"math/rand"
"time"
"github.com/PuerkitoBio/exp/juggler/msg"
"github.com/garyburd/redigo/redis"
"github.com/pborman/uuid"
)
// Pool defines the methods required for a redis pool that provides
// a method to get a connection and to release the pool's resources.
type Pool interface {
Get() redis.Conn
Close() error
}
// Connector is a redis connector that provides the methods to
// interact with Redis using the juggler protocol.
type Connector struct {
Pool Pool
BlockingTimeout time.Duration
LogFunc func(string, ...interface{})
}
const (
defaultBlockingTimeout = 5 * time.Second
// CALL: callee BRPOPs on callKey. On a new payload, it checks if
// callTimeoutKey is still valid and for how long (PTTL). If it is
// still valid, it processes the call, otherwise it drops it.
// callTimeoutKey is deleted.
callKey = "juggler:calls:{%s}" // 1: URI
callTimeoutKey = "juggler:calls:timeout:{%s}:%s" // 1: URI, 2: mUUID
defaultCallTimeout = time.Minute
// RES: callee stores the result of the call in resKey (LPUSH) and
// sets resTimeoutKey with an expiration of callTimeoutKey PTTL minus
// the time of the call invocation.
//
// Caller BRPOPs on resKey. On a new payload, it checks if resTimeoutKey
// is still valid. If it is, it sends the result on the connection,
// otherwise it drops it. resTimeoutKey is deleted.
resKey = "juggler:results:{%s}" // 1: cUUID
resTimeoutKey = "juggler:results:timeout:{%s}:%s" // 1: cUUID, 2: mUUID
)
// TODO: not redis-specific, should go elsewhere...
type CallPayload struct {
ConnUUID uuid.UUID `json:"conn_uuid"`
MsgUUID uuid.UUID `json:"msg_uuid"`
Args json.RawMessage `json:"args,omitempty"`
TTLAfterRead time.Duration `json:"-"`
ReadTimestamp time.Time `json:"-"`
}
func (c *Connector) Call(connUUID uuid.UUID, m *msg.Call) error {
pld := &CallPayload{
ConnUUID: connUUID,
MsgUUID: m.UUID(),
Args: m.Payload.Args,
}
b, err := json.Marshal(pld)
if err != nil {
return err
}
// a call generates two redis key values:
// - SET that expires after timeout
// - LPUSH that adds the call payload to the list of calls under URI
//
// A callee will read with BRPOP on the list, and will check the
// expiring key to see if it still exists. If it doesn't, the call is
// dropped, unprocessed, as the client is not waiting for the response
// anymore.
//
// If it is still there, the callee gets its PTTL and deletes it, and
// it processes the call and stores the response payload under a new
// key with an expiration of PTTL.
rc := c.Pool.Get()
defer rc.Close()
to := int(m.Payload.Timeout / time.Millisecond)
if to == 0 {
to = int(defaultCallTimeout / time.Millisecond)
}
if err := rc.Send("SET", fmt.Sprintf(callTimeoutKey, m.Payload.URI, m.UUID()), to, "PX", to); err != nil {
return err
}
_, err = rc.Do("LPUSH", fmt.Sprintf(callKey, m.Payload.URI), b)
// TODO : support capping the list with LTRIM
return err
}
var prng = rand.New(rand.NewSource(time.Now().UnixNano()))
func expJitterDelay(att int, base, max time.Duration) time.Duration {
exp := math.Pow(2, float64(att))
top := float64(base) * exp
return time.Duration(
prng.Int63n(int64(math.Min(float64(max), top))),
)
}
// ProcessCalls returns a channel that returns a stream of call requests
// for the specified URI. When the stop channel signals a stop, the
// returned channel is closed and the goroutine that listens for call
// requests is properly terminated.
func (c *Connector) ProcessCalls(uri string, stop <-chan struct{}) <-chan *CallPayload {
ch := make(chan *CallPayload)
go func() {
defer close(ch)
// compute the key and blocking timeout
k := fmt.Sprintf(callKey, uri)
to := int(c.BlockingTimeout / time.Second)
if to == 0 {
to = int(defaultBlockingTimeout / time.Second)
}
var rc redis.Conn
defer func() {
if rc != nil {
rc.Close()
}
}()
for {
// check for the stop signal
select {
case <-stop:
return
default:
}
// grab a redis connection if we don't have any valid one.
if rc == nil {
rc = c.Pool.Get()
}
// block checking for a call request to process.
vals, err := redis.Values(rc.Do("BRPOP", k, to))
switch err {
case redis.ErrNil:
// no value available
continue
case nil:
// got a call payload, process it
var b []byte
_, err := redis.Scan(vals, nil, b)
if err != nil {
logf(c, "ProcessCalls: BRPOP failed to scan redis value: %v", err)
continue
}
var cp CallPayload
if err := json.Unmarshal(b, &cp); err != nil {
logf(c, "ProcessCalls: BRPOP failed to unmarshal call payload: %v", err)
continue
}
toKey := fmt.Sprintf(callTimeoutKey, uri, cp.MsgUUID)
if err := rc.Send("PTTL", toKey); err != nil {
logf(c, "ProcessCalls: PTTL send failed: %v", err)
continue
}
res, err := redis.Values(rc.Do("DEL", toKey))
if err != nil {
logf(c, "ProcessCalls: PTTL/DEL failed: %v", err)
continue
}
var pttl int
if _, err := redis.Scan(res, &pttl); err != nil {
logf(c, "ProcessCalls: PTTL/DEL failed to scan redis value: %v", err)
continue
}
if pttl <= 0 {
logf(c, "ProcessCalls: message %v expired, dropping call", cp.MsgUUID)
continue
}
ch <- &cp
default:
// error, try again with a different redis connection, in
// case that node went down.
// TODO : jitter/exponential backoff
rc.Close()
rc = nil
}
}
}()
return ch
}
func (c *Connector) ProcessResults() {
}
func (c *Connector) Publish(m *msg.Pub) error {
rc := c.Pool.Get()
defer rc.Close()
_, err := rc.Do("PUBLISH", m.Payload.Channel, m.Payload.Args)
return err
}
func (c *Connector) Subscribe(m *msg.Sub) error {
return c.subUnsub(m.Payload.Channel, m.Payload.Pattern, true)
}
func (c *Connector) Unsubscribe(m *msg.Unsb) error {
return c.subUnsub(m.Payload.Channel, m.Payload.Pattern, false)
}
var subUnsubCmds = map[struct{ pat, sub bool }]string{
{true, true}: "PSUBSCRIBE",
{true, false}: "PUNSUBSCRIBE",
{false, true}: "SUBSCRIBE",
{false, false}: "UNSUBSCRIBE",
}
func (c *Connector) subUnsub(ch string, pat bool, sub bool) error {
// TODO : no, must be on the same connection always...
rc := c.Pool.Get()
defer rc.Close()
cmd := subUnsubCmds[struct{ pat, sub bool }{pat, sub}]
_, err := rc.Do(cmd, ch)
return err
}
func (c *Connector) ProcessEvents() {
// TODO : must be on the same connection as the sub
}
func logf(c *Connector, f string, args ...interface{}) {
if c.LogFunc != nil {
c.LogFunc(f, args...)
} else {
log.Printf(f, args...)
}
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"container/list"
"path"
"strings"
"github.com/unknwon/com"
log "gopkg.in/clog.v1"
"github.com/gogs/git-module"
"gogs.io/gogs/internal/context"
"gogs.io/gogs/internal/db"
"gogs.io/gogs/internal/db/errors"
"gogs.io/gogs/internal/form"
"gogs.io/gogs/internal/setting"
"gogs.io/gogs/internal/tool"
)
const (
FORK = "repo/pulls/fork"
COMPARE_PULL = "repo/pulls/compare"
PULL_COMMITS = "repo/pulls/commits"
PULL_FILES = "repo/pulls/files"
PULL_REQUEST_TEMPLATE_KEY = "PullRequestTemplate"
)
var (
PullRequestTemplateCandidates = []string{
"PULL_REQUEST.md",
".gogs/PULL_REQUEST.md",
".github/PULL_REQUEST.md",
}
)
func parseBaseRepository(c *context.Context) *db.Repository {
baseRepo, err := db.GetRepositoryByID(c.ParamsInt64(":repoid"))
if err != nil {
c.NotFoundOrServerError("GetRepositoryByID", errors.IsRepoNotExist, err)
return nil
}
if !baseRepo.CanBeForked() || !baseRepo.HasAccess(c.User.ID) {
c.NotFound()
return nil
}
c.Data["repo_name"] = baseRepo.Name
c.Data["description"] = baseRepo.Description
c.Data["IsPrivate"] = baseRepo.IsPrivate
if err = baseRepo.GetOwner(); err != nil {
c.ServerError("GetOwner", err)
return nil
}
c.Data["ForkFrom"] = baseRepo.Owner.Name + "/" + baseRepo.Name
if err := c.User.GetOrganizations(true); err != nil {
c.ServerError("GetOrganizations", err)
return nil
}
c.Data["Orgs"] = c.User.Orgs
return baseRepo
}
func Fork(c *context.Context) {
c.Data["Title"] = c.Tr("new_fork")
parseBaseRepository(c)
if c.Written() {
return
}
c.Data["ContextUser"] = c.User
c.Success(FORK)
}
func ForkPost(c *context.Context, f form.CreateRepo) {
c.Data["Title"] = c.Tr("new_fork")
baseRepo := parseBaseRepository(c)
if c.Written() {
return
}
ctxUser := checkContextUser(c, f.UserID)
if c.Written() {
return
}
c.Data["ContextUser"] = ctxUser
if c.HasError() {
c.Success(FORK)
return
}
repo, has, err := db.HasForkedRepo(ctxUser.ID, baseRepo.ID)
if err != nil {
c.ServerError("HasForkedRepo", err)
return
} else if has {
c.Redirect(repo.Link())
return
}
// Check ownership of organization.
if ctxUser.IsOrganization() && !ctxUser.IsOwnedBy(c.User.ID) {
c.Error(403)
return
}
// Cannot fork to same owner
if ctxUser.ID == baseRepo.OwnerID {
c.RenderWithErr(c.Tr("repo.settings.cannot_fork_to_same_owner"), FORK, &f)
return
}
repo, err = db.ForkRepository(c.User, ctxUser, baseRepo, f.RepoName, f.Description)
if err != nil {
c.Data["Err_RepoName"] = true
switch {
case errors.IsReachLimitOfRepo(err):
c.RenderWithErr(c.Tr("repo.form.reach_limit_of_creation", c.User.RepoCreationNum()), FORK, &f)
case db.IsErrRepoAlreadyExist(err):
c.RenderWithErr(c.Tr("repo.settings.new_owner_has_same_repo"), FORK, &f)
case db.IsErrNameReserved(err):
c.RenderWithErr(c.Tr("repo.form.name_reserved", err.(db.ErrNameReserved).Name), FORK, &f)
case db.IsErrNamePatternNotAllowed(err):
c.RenderWithErr(c.Tr("repo.form.name_pattern_not_allowed", err.(db.ErrNamePatternNotAllowed).Pattern), FORK, &f)
default:
c.ServerError("ForkPost", err)
}
return
}
log.Trace("Repository forked from '%s' -> '%s'", baseRepo.FullName(), repo.FullName())
c.Redirect(repo.Link())
}
func checkPullInfo(c *context.Context) *db.Issue {
issue, err := db.GetIssueByIndex(c.Repo.Repository.ID, c.ParamsInt64(":index"))
if err != nil {
c.NotFoundOrServerError("GetIssueByIndex", errors.IsIssueNotExist, err)
return nil
}
c.Data["Title"] = issue.Title
c.Data["Issue"] = issue
if !issue.IsPull {
c.Handle(404, "ViewPullCommits", nil)
return nil
}
if c.IsLogged {
// Update issue-user.
if err = issue.ReadBy(c.User.ID); err != nil {
c.ServerError("ReadBy", err)
return nil
}
}
return issue
}
func PrepareMergedViewPullInfo(c *context.Context, issue *db.Issue) {
pull := issue.PullRequest
c.Data["HasMerged"] = true
c.Data["HeadTarget"] = issue.PullRequest.HeadUserName + "/" + pull.HeadBranch
c.Data["BaseTarget"] = c.Repo.Owner.Name + "/" + pull.BaseBranch
var err error
c.Data["NumCommits"], err = c.Repo.GitRepo.CommitsCountBetween(pull.MergeBase, pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.CommitsCountBetween", err)
return
}
c.Data["NumFiles"], err = c.Repo.GitRepo.FilesCountBetween(pull.MergeBase, pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.FilesCountBetween", err)
return
}
}
func PrepareViewPullInfo(c *context.Context, issue *db.Issue) *git.PullRequestInfo {
repo := c.Repo.Repository
pull := issue.PullRequest
c.Data["HeadTarget"] = pull.HeadUserName + "/" + pull.HeadBranch
c.Data["BaseTarget"] = c.Repo.Owner.Name + "/" + pull.BaseBranch
var (
headGitRepo *git.Repository
err error
)
if pull.HeadRepo != nil {
headGitRepo, err = git.OpenRepository(pull.HeadRepo.RepoPath())
if err != nil {
c.ServerError("OpenRepository", err)
return nil
}
}
if pull.HeadRepo == nil || !headGitRepo.IsBranchExist(pull.HeadBranch) {
c.Data["IsPullReuqestBroken"] = true
c.Data["HeadTarget"] = "deleted"
c.Data["NumCommits"] = 0
c.Data["NumFiles"] = 0
return nil
}
prInfo, err := headGitRepo.GetPullRequestInfo(db.RepoPath(repo.Owner.Name, repo.Name),
pull.BaseBranch, pull.HeadBranch)
if err != nil {
if strings.Contains(err.Error(), "fatal: Not a valid object name") {
c.Data["IsPullReuqestBroken"] = true
c.Data["BaseTarget"] = "deleted"
c.Data["NumCommits"] = 0
c.Data["NumFiles"] = 0
return nil
}
c.ServerError("GetPullRequestInfo", err)
return nil
}
c.Data["NumCommits"] = prInfo.Commits.Len()
c.Data["NumFiles"] = prInfo.NumFiles
return prInfo
}
func ViewPullCommits(c *context.Context) {
c.Data["PageIsPullList"] = true
c.Data["PageIsPullCommits"] = true
issue := checkPullInfo(c)
if c.Written() {
return
}
pull := issue.PullRequest
if pull.HeadRepo != nil {
c.Data["Username"] = pull.HeadUserName
c.Data["Reponame"] = pull.HeadRepo.Name
}
var commits *list.List
if pull.HasMerged {
PrepareMergedViewPullInfo(c, issue)
if c.Written() {
return
}
startCommit, err := c.Repo.GitRepo.GetCommit(pull.MergeBase)
if err != nil {
c.ServerError("Repo.GitRepo.GetCommit", err)
return
}
endCommit, err := c.Repo.GitRepo.GetCommit(pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.GetCommit", err)
return
}
commits, err = c.Repo.GitRepo.CommitsBetween(endCommit, startCommit)
if err != nil {
c.ServerError("Repo.GitRepo.CommitsBetween", err)
return
}
} else {
prInfo := PrepareViewPullInfo(c, issue)
if c.Written() {
return
} else if prInfo == nil {
c.NotFound()
return
}
commits = prInfo.Commits
}
commits = db.ValidateCommitsWithEmails(commits)
c.Data["Commits"] = commits
c.Data["CommitsCount"] = commits.Len()
c.Success(PULL_COMMITS)
}
func ViewPullFiles(c *context.Context) {
c.Data["PageIsPullList"] = true
c.Data["PageIsPullFiles"] = true
issue := checkPullInfo(c)
if c.Written() {
return
}
pull := issue.PullRequest
var (
diffRepoPath string
startCommitID string
endCommitID string
gitRepo *git.Repository
)
if pull.HasMerged {
PrepareMergedViewPullInfo(c, issue)
if c.Written() {
return
}
diffRepoPath = c.Repo.GitRepo.Path
startCommitID = pull.MergeBase
endCommitID = pull.MergedCommitID
gitRepo = c.Repo.GitRepo
} else {
prInfo := PrepareViewPullInfo(c, issue)
if c.Written() {
return
} else if prInfo == nil {
c.Handle(404, "ViewPullFiles", nil)
return
}
headRepoPath := db.RepoPath(pull.HeadUserName, pull.HeadRepo.Name)
headGitRepo, err := git.OpenRepository(headRepoPath)
if err != nil {
c.ServerError("OpenRepository", err)
return
}
headCommitID, err := headGitRepo.GetBranchCommitID(pull.HeadBranch)
if err != nil {
c.ServerError("GetBranchCommitID", err)
return
}
diffRepoPath = headRepoPath
startCommitID = prInfo.MergeBase
endCommitID = headCommitID
gitRepo = headGitRepo
}
diff, err := db.GetDiffRange(diffRepoPath,
startCommitID, endCommitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
c.ServerError("GetDiffRange", err)
return
}
c.Data["Diff"] = diff
c.Data["DiffNotAvailable"] = diff.NumFiles() == 0
commit, err := gitRepo.GetCommit(endCommitID)
if err != nil {
c.ServerError("GetCommit", err)
return
}
setEditorconfigIfExists(c)
if c.Written() {
return
}
c.Data["IsSplitStyle"] = c.Query("style") == "split"
c.Data["IsImageFile"] = commit.IsImageFile
// It is possible head repo has been deleted for merged pull requests
if pull.HeadRepo != nil {
c.Data["Username"] = pull.HeadUserName
c.Data["Reponame"] = pull.HeadRepo.Name
headTarget := path.Join(pull.HeadUserName, pull.HeadRepo.Name)
c.Data["SourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", endCommitID)
c.Data["BeforeSourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", startCommitID)
c.Data["RawPath"] = setting.AppSubURL + "/" + path.Join(headTarget, "raw", endCommitID)
}
c.Data["RequireHighlightJS"] = true
c.Success(PULL_FILES)
}
func MergePullRequest(c *context.Context) {
issue := checkPullInfo(c)
if c.Written() {
return
}
if issue.IsClosed {
c.NotFound()
return
}
pr, err := db.GetPullRequestByIssueID(issue.ID)
if err != nil {
c.NotFoundOrServerError("GetPullRequestByIssueID", db.IsErrPullRequestNotExist, err)
return
}
if !pr.CanAutoMerge() || pr.HasMerged {
c.NotFound()
return
}
pr.Issue = issue
pr.Issue.Repo = c.Repo.Repository
if err = pr.Merge(c.User, c.Repo.GitRepo, db.MergeStyle(c.Query("merge_style")), c.Query("commit_description")); err != nil {
c.ServerError("Merge", err)
return
}
log.Trace("Pull request merged: %d", pr.ID)
c.Redirect(c.Repo.RepoLink + "/pulls/" + com.ToStr(pr.Index))
}
func ParseCompareInfo(c *context.Context) (*db.User, *db.Repository, *git.Repository, *git.PullRequestInfo, string, string) {
baseRepo := c.Repo.Repository
// Get compared branches information
// format: <base branch>...[<head repo>:]<head branch>
// base<-head: master...head:feature
// same repo: master...feature
infos := strings.Split(c.Params("*"), "...")
if len(infos) != 2 {
log.Trace("ParseCompareInfo[%d]: not enough compared branches information %s", baseRepo.ID, infos)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
baseBranch := infos[0]
c.Data["BaseBranch"] = baseBranch
var (
headUser *db.User
headBranch string
isSameRepo bool
err error
)
// If there is no head repository, it means pull request between same repository.
headInfos := strings.Split(infos[1], ":")
if len(headInfos) == 1 {
isSameRepo = true
headUser = c.Repo.Owner
headBranch = headInfos[0]
} else if len(headInfos) == 2 {
headUser, err = db.GetUserByName(headInfos[0])
if err != nil {
c.NotFoundOrServerError("GetUserByName", errors.IsUserNotExist, err)
return nil, nil, nil, nil, "", ""
}
headBranch = headInfos[1]
isSameRepo = headUser.ID == baseRepo.OwnerID
} else {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
c.Data["HeadUser"] = headUser
c.Data["HeadBranch"] = headBranch
c.Repo.PullRequest.SameRepo = isSameRepo
// Check if base branch is valid.
if !c.Repo.GitRepo.IsBranchExist(baseBranch) {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
var (
headRepo *db.Repository
headGitRepo *git.Repository
)
// In case user included redundant head user name for comparison in same repository,
// no need to check the fork relation.
if !isSameRepo {
var has bool
headRepo, has, err = db.HasForkedRepo(headUser.ID, baseRepo.ID)
if err != nil {
c.ServerError("HasForkedRepo", err)
return nil, nil, nil, nil, "", ""
} else if !has {
log.Trace("ParseCompareInfo [base_repo_id: %d]: does not have fork or in same repository", baseRepo.ID)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
headGitRepo, err = git.OpenRepository(db.RepoPath(headUser.Name, headRepo.Name))
if err != nil {
c.ServerError("OpenRepository", err)
return nil, nil, nil, nil, "", ""
}
} else {
headRepo = c.Repo.Repository
headGitRepo = c.Repo.GitRepo
}
if !c.User.IsWriterOfRepo(headRepo) && !c.User.IsAdmin {
log.Trace("ParseCompareInfo [base_repo_id: %d]: does not have write access or site admin", baseRepo.ID)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
// Check if head branch is valid.
if !headGitRepo.IsBranchExist(headBranch) {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
headBranches, err := headGitRepo.GetBranches()
if err != nil {
c.ServerError("GetBranches", err)
return nil, nil, nil, nil, "", ""
}
c.Data["HeadBranches"] = headBranches
prInfo, err := headGitRepo.GetPullRequestInfo(db.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranch, headBranch)
if err != nil {
if git.IsErrNoMergeBase(err) {
c.Data["IsNoMergeBase"] = true
c.Success(COMPARE_PULL)
} else {
c.ServerError("GetPullRequestInfo", err)
}
return nil, nil, nil, nil, "", ""
}
c.Data["BeforeCommitID"] = prInfo.MergeBase
return headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch
}
func PrepareCompareDiff(
c *context.Context,
headUser *db.User,
headRepo *db.Repository,
headGitRepo *git.Repository,
prInfo *git.PullRequestInfo,
baseBranch, headBranch string) bool {
var (
repo = c.Repo.Repository
err error
)
// Get diff information.
c.Data["CommitRepoLink"] = headRepo.Link()
headCommitID, err := headGitRepo.GetBranchCommitID(headBranch)
if err != nil {
c.ServerError("GetBranchCommitID", err)
return false
}
c.Data["AfterCommitID"] = headCommitID
if headCommitID == prInfo.MergeBase {
c.Data["IsNothingToCompare"] = true
return true
}
diff, err := db.GetDiffRange(db.RepoPath(headUser.Name, headRepo.Name),
prInfo.MergeBase, headCommitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
c.ServerError("GetDiffRange", err)
return false
}
c.Data["Diff"] = diff
c.Data["DiffNotAvailable"] = diff.NumFiles() == 0
headCommit, err := headGitRepo.GetCommit(headCommitID)
if err != nil {
c.ServerError("GetCommit", err)
return false
}
prInfo.Commits = db.ValidateCommitsWithEmails(prInfo.Commits)
c.Data["Commits"] = prInfo.Commits
c.Data["CommitCount"] = prInfo.Commits.Len()
c.Data["Username"] = headUser.Name
c.Data["Reponame"] = headRepo.Name
c.Data["IsImageFile"] = headCommit.IsImageFile
headTarget := path.Join(headUser.Name, repo.Name)
c.Data["SourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", headCommitID)
c.Data["BeforeSourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", prInfo.MergeBase)
c.Data["RawPath"] = setting.AppSubURL + "/" + path.Join(headTarget, "raw", headCommitID)
return false
}
func CompareAndPullRequest(c *context.Context) {
c.Data["Title"] = c.Tr("repo.pulls.compare_changes")
c.Data["PageIsComparePull"] = true
c.Data["IsDiffCompare"] = true
c.Data["RequireHighlightJS"] = true
setTemplateIfExists(c, PULL_REQUEST_TEMPLATE_KEY, PullRequestTemplateCandidates)
renderAttachmentSettings(c)
headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch := ParseCompareInfo(c)
if c.Written() {
return
}
pr, err := db.GetUnmergedPullRequest(headRepo.ID, c.Repo.Repository.ID, headBranch, baseBranch)
if err != nil {
if !db.IsErrPullRequestNotExist(err) {
c.ServerError("GetUnmergedPullRequest", err)
return
}
} else {
c.Data["HasPullRequest"] = true
c.Data["PullRequest"] = pr
c.Success(COMPARE_PULL)
return
}
nothingToCompare := PrepareCompareDiff(c, headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch)
if c.Written() {
return
}
if !nothingToCompare {
// Setup information for new form.
RetrieveRepoMetas(c, c.Repo.Repository)
if c.Written() {
return
}
}
setEditorconfigIfExists(c)
if c.Written() {
return
}
c.Data["IsSplitStyle"] = c.Query("style") == "split"
c.Success(COMPARE_PULL)
}
func CompareAndPullRequestPost(c *context.Context, f form.NewIssue) {
c.Data["Title"] = c.Tr("repo.pulls.compare_changes")
c.Data["PageIsComparePull"] = true
c.Data["IsDiffCompare"] = true
c.Data["RequireHighlightJS"] = true
renderAttachmentSettings(c)
var (
repo = c.Repo.Repository
attachments []string
)
headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch := ParseCompareInfo(c)
if c.Written() {
return
}
labelIDs, milestoneID, assigneeID := ValidateRepoMetas(c, f)
if c.Written() {
return
}
if setting.AttachmentEnabled {
attachments = f.Files
}
if c.HasError() {
form.Assign(f, c.Data)
// This stage is already stop creating new pull request, so it does not matter if it has
// something to compare or not.
PrepareCompareDiff(c, headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch)
if c.Written() {
return
}
c.Success(COMPARE_PULL)
return
}
patch, err := headGitRepo.GetPatch(prInfo.MergeBase, headBranch)
if err != nil {
c.ServerError("GetPatch", err)
return
}
pullIssue := &db.Issue{
RepoID: repo.ID,
Index: repo.NextIssueIndex(),
Title: f.Title,
PosterID: c.User.ID,
Poster: c.User,
MilestoneID: milestoneID,
AssigneeID: assigneeID,
IsPull: true,
Content: f.Content,
}
pullRequest := &db.PullRequest{
HeadRepoID: headRepo.ID,
BaseRepoID: repo.ID,
HeadUserName: headUser.Name,
HeadBranch: headBranch,
BaseBranch: baseBranch,
HeadRepo: headRepo,
BaseRepo: repo,
MergeBase: prInfo.MergeBase,
Type: db.PULL_REQUEST_GOGS,
}
// FIXME: check error in the case two people send pull request at almost same time, give nice error prompt
// instead of 500.
if err := db.NewPullRequest(repo, pullIssue, labelIDs, attachments, pullRequest, patch); err != nil {
c.ServerError("NewPullRequest", err)
return
} else if err := pullRequest.PushToBaseRepo(); err != nil {
c.ServerError("PushToBaseRepo", err)
return
}
log.Trace("Pull request created: %d/%d", repo.ID, pullIssue.ID)
c.Redirect(c.Repo.RepoLink + "/pulls/" + com.ToStr(pullIssue.Index))
}
func parseOwnerAndRepo(c *context.Context) (*db.User, *db.Repository) {
owner, err := db.GetUserByName(c.Params(":username"))
if err != nil {
c.NotFoundOrServerError("GetUserByName", errors.IsUserNotExist, err)
return nil, nil
}
repo, err := db.GetRepositoryByName(owner.ID, c.Params(":reponame"))
if err != nil {
c.NotFoundOrServerError("GetRepositoryByName", errors.IsRepoNotExist, err)
return nil, nil
}
return owner, repo
}
func TriggerTask(c *context.Context) {
pusherID := c.QueryInt64("pusher")
branch := c.Query("branch")
secret := c.Query("secret")
if len(branch) == 0 || len(secret) == 0 || pusherID <= 0 {
c.Error(404)
log.Trace("TriggerTask: branch or secret is empty, or pusher ID is not valid")
return
}
owner, repo := parseOwnerAndRepo(c)
if c.Written() {
return
}
if secret != tool.MD5(owner.Salt) {
c.Error(404)
log.Trace("TriggerTask [%s/%s]: invalid secret", owner.Name, repo.Name)
return
}
pusher, err := db.GetUserByID(pusherID)
if err != nil {
c.NotFoundOrServerError("GetUserByID", errors.IsUserNotExist, err)
return
}
log.Trace("TriggerTask '%s/%s' by '%s'", repo.Name, branch, pusher.Name)
go db.HookQueue.Add(repo.ID)
go db.AddTestPullRequestTask(pusher, repo.ID, branch, true)
c.Status(202)
}
repo: able fill pull request title by template from md file (#5901)
* able fill pull request title by template from md file
* fix: unusedresult: result of fmt.Sprintf call not used (from govet)
* fix: remove import fmt -> not used
* after review / PullRequestTitleTemplateCandidates moved to after line 39
* Update pull.go
* Update pull.go
Co-authored-by: ᴜɴᴋɴᴡᴏɴ <51e69892ab49df85c6230ccc57f8e1d1606caccc@gogs.io>
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"container/list"
"path"
"strings"
"github.com/unknwon/com"
log "gopkg.in/clog.v1"
"github.com/gogs/git-module"
"gogs.io/gogs/internal/context"
"gogs.io/gogs/internal/db"
"gogs.io/gogs/internal/db/errors"
"gogs.io/gogs/internal/form"
"gogs.io/gogs/internal/setting"
"gogs.io/gogs/internal/tool"
)
const (
FORK = "repo/pulls/fork"
COMPARE_PULL = "repo/pulls/compare"
PULL_COMMITS = "repo/pulls/commits"
PULL_FILES = "repo/pulls/files"
PULL_REQUEST_TEMPLATE_KEY = "PullRequestTemplate"
PULL_REQUEST_TITLE_TEMPLATE_KEY = "PullRequestTitleTemplate"
)
var (
PullRequestTemplateCandidates = []string{
"PULL_REQUEST.md",
".gogs/PULL_REQUEST.md",
".github/PULL_REQUEST.md",
}
PullRequestTitleTemplateCandidates = []string{
"PULL_REQUEST_TITLE.md",
".gogs/PULL_REQUEST_TITLE.md",
".github/PULL_REQUEST_TITLE.md",
}
)
func parseBaseRepository(c *context.Context) *db.Repository {
baseRepo, err := db.GetRepositoryByID(c.ParamsInt64(":repoid"))
if err != nil {
c.NotFoundOrServerError("GetRepositoryByID", errors.IsRepoNotExist, err)
return nil
}
if !baseRepo.CanBeForked() || !baseRepo.HasAccess(c.User.ID) {
c.NotFound()
return nil
}
c.Data["repo_name"] = baseRepo.Name
c.Data["description"] = baseRepo.Description
c.Data["IsPrivate"] = baseRepo.IsPrivate
if err = baseRepo.GetOwner(); err != nil {
c.ServerError("GetOwner", err)
return nil
}
c.Data["ForkFrom"] = baseRepo.Owner.Name + "/" + baseRepo.Name
if err := c.User.GetOrganizations(true); err != nil {
c.ServerError("GetOrganizations", err)
return nil
}
c.Data["Orgs"] = c.User.Orgs
return baseRepo
}
func Fork(c *context.Context) {
c.Data["Title"] = c.Tr("new_fork")
parseBaseRepository(c)
if c.Written() {
return
}
c.Data["ContextUser"] = c.User
c.Success(FORK)
}
func ForkPost(c *context.Context, f form.CreateRepo) {
c.Data["Title"] = c.Tr("new_fork")
baseRepo := parseBaseRepository(c)
if c.Written() {
return
}
ctxUser := checkContextUser(c, f.UserID)
if c.Written() {
return
}
c.Data["ContextUser"] = ctxUser
if c.HasError() {
c.Success(FORK)
return
}
repo, has, err := db.HasForkedRepo(ctxUser.ID, baseRepo.ID)
if err != nil {
c.ServerError("HasForkedRepo", err)
return
} else if has {
c.Redirect(repo.Link())
return
}
// Check ownership of organization.
if ctxUser.IsOrganization() && !ctxUser.IsOwnedBy(c.User.ID) {
c.Error(403)
return
}
// Cannot fork to same owner
if ctxUser.ID == baseRepo.OwnerID {
c.RenderWithErr(c.Tr("repo.settings.cannot_fork_to_same_owner"), FORK, &f)
return
}
repo, err = db.ForkRepository(c.User, ctxUser, baseRepo, f.RepoName, f.Description)
if err != nil {
c.Data["Err_RepoName"] = true
switch {
case errors.IsReachLimitOfRepo(err):
c.RenderWithErr(c.Tr("repo.form.reach_limit_of_creation", c.User.RepoCreationNum()), FORK, &f)
case db.IsErrRepoAlreadyExist(err):
c.RenderWithErr(c.Tr("repo.settings.new_owner_has_same_repo"), FORK, &f)
case db.IsErrNameReserved(err):
c.RenderWithErr(c.Tr("repo.form.name_reserved", err.(db.ErrNameReserved).Name), FORK, &f)
case db.IsErrNamePatternNotAllowed(err):
c.RenderWithErr(c.Tr("repo.form.name_pattern_not_allowed", err.(db.ErrNamePatternNotAllowed).Pattern), FORK, &f)
default:
c.ServerError("ForkPost", err)
}
return
}
log.Trace("Repository forked from '%s' -> '%s'", baseRepo.FullName(), repo.FullName())
c.Redirect(repo.Link())
}
func checkPullInfo(c *context.Context) *db.Issue {
issue, err := db.GetIssueByIndex(c.Repo.Repository.ID, c.ParamsInt64(":index"))
if err != nil {
c.NotFoundOrServerError("GetIssueByIndex", errors.IsIssueNotExist, err)
return nil
}
c.Data["Title"] = issue.Title
c.Data["Issue"] = issue
if !issue.IsPull {
c.Handle(404, "ViewPullCommits", nil)
return nil
}
if c.IsLogged {
// Update issue-user.
if err = issue.ReadBy(c.User.ID); err != nil {
c.ServerError("ReadBy", err)
return nil
}
}
return issue
}
func PrepareMergedViewPullInfo(c *context.Context, issue *db.Issue) {
pull := issue.PullRequest
c.Data["HasMerged"] = true
c.Data["HeadTarget"] = issue.PullRequest.HeadUserName + "/" + pull.HeadBranch
c.Data["BaseTarget"] = c.Repo.Owner.Name + "/" + pull.BaseBranch
var err error
c.Data["NumCommits"], err = c.Repo.GitRepo.CommitsCountBetween(pull.MergeBase, pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.CommitsCountBetween", err)
return
}
c.Data["NumFiles"], err = c.Repo.GitRepo.FilesCountBetween(pull.MergeBase, pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.FilesCountBetween", err)
return
}
}
func PrepareViewPullInfo(c *context.Context, issue *db.Issue) *git.PullRequestInfo {
repo := c.Repo.Repository
pull := issue.PullRequest
c.Data["HeadTarget"] = pull.HeadUserName + "/" + pull.HeadBranch
c.Data["BaseTarget"] = c.Repo.Owner.Name + "/" + pull.BaseBranch
var (
headGitRepo *git.Repository
err error
)
if pull.HeadRepo != nil {
headGitRepo, err = git.OpenRepository(pull.HeadRepo.RepoPath())
if err != nil {
c.ServerError("OpenRepository", err)
return nil
}
}
if pull.HeadRepo == nil || !headGitRepo.IsBranchExist(pull.HeadBranch) {
c.Data["IsPullReuqestBroken"] = true
c.Data["HeadTarget"] = "deleted"
c.Data["NumCommits"] = 0
c.Data["NumFiles"] = 0
return nil
}
prInfo, err := headGitRepo.GetPullRequestInfo(db.RepoPath(repo.Owner.Name, repo.Name),
pull.BaseBranch, pull.HeadBranch)
if err != nil {
if strings.Contains(err.Error(), "fatal: Not a valid object name") {
c.Data["IsPullReuqestBroken"] = true
c.Data["BaseTarget"] = "deleted"
c.Data["NumCommits"] = 0
c.Data["NumFiles"] = 0
return nil
}
c.ServerError("GetPullRequestInfo", err)
return nil
}
c.Data["NumCommits"] = prInfo.Commits.Len()
c.Data["NumFiles"] = prInfo.NumFiles
return prInfo
}
func ViewPullCommits(c *context.Context) {
c.Data["PageIsPullList"] = true
c.Data["PageIsPullCommits"] = true
issue := checkPullInfo(c)
if c.Written() {
return
}
pull := issue.PullRequest
if pull.HeadRepo != nil {
c.Data["Username"] = pull.HeadUserName
c.Data["Reponame"] = pull.HeadRepo.Name
}
var commits *list.List
if pull.HasMerged {
PrepareMergedViewPullInfo(c, issue)
if c.Written() {
return
}
startCommit, err := c.Repo.GitRepo.GetCommit(pull.MergeBase)
if err != nil {
c.ServerError("Repo.GitRepo.GetCommit", err)
return
}
endCommit, err := c.Repo.GitRepo.GetCommit(pull.MergedCommitID)
if err != nil {
c.ServerError("Repo.GitRepo.GetCommit", err)
return
}
commits, err = c.Repo.GitRepo.CommitsBetween(endCommit, startCommit)
if err != nil {
c.ServerError("Repo.GitRepo.CommitsBetween", err)
return
}
} else {
prInfo := PrepareViewPullInfo(c, issue)
if c.Written() {
return
} else if prInfo == nil {
c.NotFound()
return
}
commits = prInfo.Commits
}
commits = db.ValidateCommitsWithEmails(commits)
c.Data["Commits"] = commits
c.Data["CommitsCount"] = commits.Len()
c.Success(PULL_COMMITS)
}
func ViewPullFiles(c *context.Context) {
c.Data["PageIsPullList"] = true
c.Data["PageIsPullFiles"] = true
issue := checkPullInfo(c)
if c.Written() {
return
}
pull := issue.PullRequest
var (
diffRepoPath string
startCommitID string
endCommitID string
gitRepo *git.Repository
)
if pull.HasMerged {
PrepareMergedViewPullInfo(c, issue)
if c.Written() {
return
}
diffRepoPath = c.Repo.GitRepo.Path
startCommitID = pull.MergeBase
endCommitID = pull.MergedCommitID
gitRepo = c.Repo.GitRepo
} else {
prInfo := PrepareViewPullInfo(c, issue)
if c.Written() {
return
} else if prInfo == nil {
c.Handle(404, "ViewPullFiles", nil)
return
}
headRepoPath := db.RepoPath(pull.HeadUserName, pull.HeadRepo.Name)
headGitRepo, err := git.OpenRepository(headRepoPath)
if err != nil {
c.ServerError("OpenRepository", err)
return
}
headCommitID, err := headGitRepo.GetBranchCommitID(pull.HeadBranch)
if err != nil {
c.ServerError("GetBranchCommitID", err)
return
}
diffRepoPath = headRepoPath
startCommitID = prInfo.MergeBase
endCommitID = headCommitID
gitRepo = headGitRepo
}
diff, err := db.GetDiffRange(diffRepoPath,
startCommitID, endCommitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
c.ServerError("GetDiffRange", err)
return
}
c.Data["Diff"] = diff
c.Data["DiffNotAvailable"] = diff.NumFiles() == 0
commit, err := gitRepo.GetCommit(endCommitID)
if err != nil {
c.ServerError("GetCommit", err)
return
}
setEditorconfigIfExists(c)
if c.Written() {
return
}
c.Data["IsSplitStyle"] = c.Query("style") == "split"
c.Data["IsImageFile"] = commit.IsImageFile
// It is possible head repo has been deleted for merged pull requests
if pull.HeadRepo != nil {
c.Data["Username"] = pull.HeadUserName
c.Data["Reponame"] = pull.HeadRepo.Name
headTarget := path.Join(pull.HeadUserName, pull.HeadRepo.Name)
c.Data["SourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", endCommitID)
c.Data["BeforeSourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", startCommitID)
c.Data["RawPath"] = setting.AppSubURL + "/" + path.Join(headTarget, "raw", endCommitID)
}
c.Data["RequireHighlightJS"] = true
c.Success(PULL_FILES)
}
func MergePullRequest(c *context.Context) {
issue := checkPullInfo(c)
if c.Written() {
return
}
if issue.IsClosed {
c.NotFound()
return
}
pr, err := db.GetPullRequestByIssueID(issue.ID)
if err != nil {
c.NotFoundOrServerError("GetPullRequestByIssueID", db.IsErrPullRequestNotExist, err)
return
}
if !pr.CanAutoMerge() || pr.HasMerged {
c.NotFound()
return
}
pr.Issue = issue
pr.Issue.Repo = c.Repo.Repository
if err = pr.Merge(c.User, c.Repo.GitRepo, db.MergeStyle(c.Query("merge_style")), c.Query("commit_description")); err != nil {
c.ServerError("Merge", err)
return
}
log.Trace("Pull request merged: %d", pr.ID)
c.Redirect(c.Repo.RepoLink + "/pulls/" + com.ToStr(pr.Index))
}
func ParseCompareInfo(c *context.Context) (*db.User, *db.Repository, *git.Repository, *git.PullRequestInfo, string, string) {
baseRepo := c.Repo.Repository
// Get compared branches information
// format: <base branch>...[<head repo>:]<head branch>
// base<-head: master...head:feature
// same repo: master...feature
infos := strings.Split(c.Params("*"), "...")
if len(infos) != 2 {
log.Trace("ParseCompareInfo[%d]: not enough compared branches information %s", baseRepo.ID, infos)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
baseBranch := infos[0]
c.Data["BaseBranch"] = baseBranch
var (
headUser *db.User
headBranch string
isSameRepo bool
err error
)
// If there is no head repository, it means pull request between same repository.
headInfos := strings.Split(infos[1], ":")
if len(headInfos) == 1 {
isSameRepo = true
headUser = c.Repo.Owner
headBranch = headInfos[0]
} else if len(headInfos) == 2 {
headUser, err = db.GetUserByName(headInfos[0])
if err != nil {
c.NotFoundOrServerError("GetUserByName", errors.IsUserNotExist, err)
return nil, nil, nil, nil, "", ""
}
headBranch = headInfos[1]
isSameRepo = headUser.ID == baseRepo.OwnerID
} else {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
c.Data["HeadUser"] = headUser
c.Data["HeadBranch"] = headBranch
c.Repo.PullRequest.SameRepo = isSameRepo
// Check if base branch is valid.
if !c.Repo.GitRepo.IsBranchExist(baseBranch) {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
var (
headRepo *db.Repository
headGitRepo *git.Repository
)
// In case user included redundant head user name for comparison in same repository,
// no need to check the fork relation.
if !isSameRepo {
var has bool
headRepo, has, err = db.HasForkedRepo(headUser.ID, baseRepo.ID)
if err != nil {
c.ServerError("HasForkedRepo", err)
return nil, nil, nil, nil, "", ""
} else if !has {
log.Trace("ParseCompareInfo [base_repo_id: %d]: does not have fork or in same repository", baseRepo.ID)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
headGitRepo, err = git.OpenRepository(db.RepoPath(headUser.Name, headRepo.Name))
if err != nil {
c.ServerError("OpenRepository", err)
return nil, nil, nil, nil, "", ""
}
} else {
headRepo = c.Repo.Repository
headGitRepo = c.Repo.GitRepo
}
if !c.User.IsWriterOfRepo(headRepo) && !c.User.IsAdmin {
log.Trace("ParseCompareInfo [base_repo_id: %d]: does not have write access or site admin", baseRepo.ID)
c.NotFound()
return nil, nil, nil, nil, "", ""
}
// Check if head branch is valid.
if !headGitRepo.IsBranchExist(headBranch) {
c.NotFound()
return nil, nil, nil, nil, "", ""
}
headBranches, err := headGitRepo.GetBranches()
if err != nil {
c.ServerError("GetBranches", err)
return nil, nil, nil, nil, "", ""
}
c.Data["HeadBranches"] = headBranches
prInfo, err := headGitRepo.GetPullRequestInfo(db.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranch, headBranch)
if err != nil {
if git.IsErrNoMergeBase(err) {
c.Data["IsNoMergeBase"] = true
c.Success(COMPARE_PULL)
} else {
c.ServerError("GetPullRequestInfo", err)
}
return nil, nil, nil, nil, "", ""
}
c.Data["BeforeCommitID"] = prInfo.MergeBase
return headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch
}
func PrepareCompareDiff(
c *context.Context,
headUser *db.User,
headRepo *db.Repository,
headGitRepo *git.Repository,
prInfo *git.PullRequestInfo,
baseBranch, headBranch string) bool {
var (
repo = c.Repo.Repository
err error
)
// Get diff information.
c.Data["CommitRepoLink"] = headRepo.Link()
headCommitID, err := headGitRepo.GetBranchCommitID(headBranch)
if err != nil {
c.ServerError("GetBranchCommitID", err)
return false
}
c.Data["AfterCommitID"] = headCommitID
if headCommitID == prInfo.MergeBase {
c.Data["IsNothingToCompare"] = true
return true
}
diff, err := db.GetDiffRange(db.RepoPath(headUser.Name, headRepo.Name),
prInfo.MergeBase, headCommitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
c.ServerError("GetDiffRange", err)
return false
}
c.Data["Diff"] = diff
c.Data["DiffNotAvailable"] = diff.NumFiles() == 0
headCommit, err := headGitRepo.GetCommit(headCommitID)
if err != nil {
c.ServerError("GetCommit", err)
return false
}
prInfo.Commits = db.ValidateCommitsWithEmails(prInfo.Commits)
c.Data["Commits"] = prInfo.Commits
c.Data["CommitCount"] = prInfo.Commits.Len()
c.Data["Username"] = headUser.Name
c.Data["Reponame"] = headRepo.Name
c.Data["IsImageFile"] = headCommit.IsImageFile
headTarget := path.Join(headUser.Name, repo.Name)
c.Data["SourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", headCommitID)
c.Data["BeforeSourcePath"] = setting.AppSubURL + "/" + path.Join(headTarget, "src", prInfo.MergeBase)
c.Data["RawPath"] = setting.AppSubURL + "/" + path.Join(headTarget, "raw", headCommitID)
return false
}
func CompareAndPullRequest(c *context.Context) {
c.Data["Title"] = c.Tr("repo.pulls.compare_changes")
c.Data["PageIsComparePull"] = true
c.Data["IsDiffCompare"] = true
c.Data["RequireHighlightJS"] = true
setTemplateIfExists(c, PULL_REQUEST_TEMPLATE_KEY, PullRequestTemplateCandidates)
renderAttachmentSettings(c)
headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch := ParseCompareInfo(c)
if c.Written() {
return
}
pr, err := db.GetUnmergedPullRequest(headRepo.ID, c.Repo.Repository.ID, headBranch, baseBranch)
if err != nil {
if !db.IsErrPullRequestNotExist(err) {
c.ServerError("GetUnmergedPullRequest", err)
return
}
} else {
c.Data["HasPullRequest"] = true
c.Data["PullRequest"] = pr
c.Success(COMPARE_PULL)
return
}
nothingToCompare := PrepareCompareDiff(c, headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch)
if c.Written() {
return
}
if !nothingToCompare {
// Setup information for new form.
RetrieveRepoMetas(c, c.Repo.Repository)
if c.Written() {
return
}
}
setEditorconfigIfExists(c)
if c.Written() {
return
}
c.Data["IsSplitStyle"] = c.Query("style") == "split"
setTemplateIfExists(c, PULL_REQUEST_TITLE_TEMPLATE_KEY, PullRequestTitleTemplateCandidates)
if c.Data[PULL_REQUEST_TITLE_TEMPLATE_KEY] != nil {
customTitle := c.Data[PULL_REQUEST_TITLE_TEMPLATE_KEY].(string)
r := strings.NewReplacer("{{headBranch}}", headBranch,"{{baseBranch}}", baseBranch)
c.Data["title"] = r.Replace(customTitle)
}
c.Success(COMPARE_PULL)
}
func CompareAndPullRequestPost(c *context.Context, f form.NewIssue) {
c.Data["Title"] = c.Tr("repo.pulls.compare_changes")
c.Data["PageIsComparePull"] = true
c.Data["IsDiffCompare"] = true
c.Data["RequireHighlightJS"] = true
renderAttachmentSettings(c)
var (
repo = c.Repo.Repository
attachments []string
)
headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch := ParseCompareInfo(c)
if c.Written() {
return
}
labelIDs, milestoneID, assigneeID := ValidateRepoMetas(c, f)
if c.Written() {
return
}
if setting.AttachmentEnabled {
attachments = f.Files
}
if c.HasError() {
form.Assign(f, c.Data)
// This stage is already stop creating new pull request, so it does not matter if it has
// something to compare or not.
PrepareCompareDiff(c, headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch)
if c.Written() {
return
}
c.Success(COMPARE_PULL)
return
}
patch, err := headGitRepo.GetPatch(prInfo.MergeBase, headBranch)
if err != nil {
c.ServerError("GetPatch", err)
return
}
pullIssue := &db.Issue{
RepoID: repo.ID,
Index: repo.NextIssueIndex(),
Title: f.Title,
PosterID: c.User.ID,
Poster: c.User,
MilestoneID: milestoneID,
AssigneeID: assigneeID,
IsPull: true,
Content: f.Content,
}
pullRequest := &db.PullRequest{
HeadRepoID: headRepo.ID,
BaseRepoID: repo.ID,
HeadUserName: headUser.Name,
HeadBranch: headBranch,
BaseBranch: baseBranch,
HeadRepo: headRepo,
BaseRepo: repo,
MergeBase: prInfo.MergeBase,
Type: db.PULL_REQUEST_GOGS,
}
// FIXME: check error in the case two people send pull request at almost same time, give nice error prompt
// instead of 500.
if err := db.NewPullRequest(repo, pullIssue, labelIDs, attachments, pullRequest, patch); err != nil {
c.ServerError("NewPullRequest", err)
return
} else if err := pullRequest.PushToBaseRepo(); err != nil {
c.ServerError("PushToBaseRepo", err)
return
}
log.Trace("Pull request created: %d/%d", repo.ID, pullIssue.ID)
c.Redirect(c.Repo.RepoLink + "/pulls/" + com.ToStr(pullIssue.Index))
}
func parseOwnerAndRepo(c *context.Context) (*db.User, *db.Repository) {
owner, err := db.GetUserByName(c.Params(":username"))
if err != nil {
c.NotFoundOrServerError("GetUserByName", errors.IsUserNotExist, err)
return nil, nil
}
repo, err := db.GetRepositoryByName(owner.ID, c.Params(":reponame"))
if err != nil {
c.NotFoundOrServerError("GetRepositoryByName", errors.IsRepoNotExist, err)
return nil, nil
}
return owner, repo
}
func TriggerTask(c *context.Context) {
pusherID := c.QueryInt64("pusher")
branch := c.Query("branch")
secret := c.Query("secret")
if len(branch) == 0 || len(secret) == 0 || pusherID <= 0 {
c.Error(404)
log.Trace("TriggerTask: branch or secret is empty, or pusher ID is not valid")
return
}
owner, repo := parseOwnerAndRepo(c)
if c.Written() {
return
}
if secret != tool.MD5(owner.Salt) {
c.Error(404)
log.Trace("TriggerTask [%s/%s]: invalid secret", owner.Name, repo.Name)
return
}
pusher, err := db.GetUserByID(pusherID)
if err != nil {
c.NotFoundOrServerError("GetUserByID", errors.IsUserNotExist, err)
return
}
log.Trace("TriggerTask '%s/%s' by '%s'", repo.Name, branch, pusher.Name)
go db.HookQueue.Add(repo.ID)
go db.AddTestPullRequestTask(pusher, repo.ID, branch, true)
c.Status(202)
}
|
package sctp
import (
"testing"
"github.com/gotestyourself/gotestyourself/assert"
"github.com/pkg/errors"
)
func TestInitChunk(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x00, 0x00, 0x81, 0x46, 0x9d, 0xfc, 0x01, 0x00, 0x00, 0x56, 0x55,
0xb9, 0x64, 0xa5, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xe8, 0x6d, 0x10, 0x30, 0xc0, 0x00, 0x00, 0x04, 0x80,
0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x9f, 0xeb, 0xbb, 0x5c, 0x50,
0xc9, 0xbf, 0x75, 0x9c, 0xb1, 0x2c, 0x57, 0x4f, 0xa4, 0x5a, 0x51, 0xba, 0x60, 0x17, 0x78, 0x27, 0x94, 0x5c, 0x31, 0xe6,
0x5d, 0x5b, 0x09, 0x47, 0xe2, 0x22, 0x06, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
i, ok := pkt.Chunks[0].(*Init)
if !ok {
t.Error("Failed to cast Chunk -> Init")
}
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init Chunk failed"))
} else if i.initiateTag != 1438213285 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initiate tag exp: %d act: %d", 1438213285, i.initiateTag))
} else if i.advertisedReceiverWindowCredit != 131072 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect advertisedReceiverWindowCredit exp: %d act: %d", 131072, i.advertisedReceiverWindowCredit))
} else if i.numOutboundStreams != 1024 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numOutboundStreams tag exp: %d act: %d", 1024, i.numOutboundStreams))
} else if i.numInboundStreams != 2048 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numInboundStreams exp: %d act: %d", 2048, i.numInboundStreams))
} else if i.initialTSN != 3899461680 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initialTSN exp: %d act: %d", 3899461680, i.initialTSN))
}
}
func TestInitAck(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0xce, 0x15, 0x79, 0xa2, 0x96, 0x19, 0xe8, 0xb2, 0x02, 0x00, 0x00, 0x1c, 0xeb, 0x81, 0x4e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x50, 0xdf, 0x90, 0xd9, 0x00, 0x07, 0x00, 0x08, 0x94, 0x06, 0x2f, 0x93}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
_, ok := pkt.Chunks[0].(*InitAck)
if !ok {
t.Error("Failed to cast Chunk -> Init")
} else if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init Chunk failed"))
}
}
func TestChromeChunk1(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xb3, 0x45, 0xa2, 0x01, 0x00, 0x00, 0x56, 0xce, 0x15, 0x79, 0xa2, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x94, 0x57, 0x95, 0xc0, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xff, 0x5c, 0x49, 0x19, 0x4a, 0x94, 0xe8, 0x2a, 0xec, 0x58, 0x55, 0x62, 0x29, 0x1f, 0x8e, 0x23, 0xcd, 0x7c, 0xe8, 0x46, 0xba, 0x58, 0x1b, 0x3d, 0xab, 0xd7, 0x7e, 0x50, 0xf2, 0x41, 0xb1, 0x2e, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
rawPkt2, err := pkt.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Remarshal failed"))
}
assert.DeepEqual(t, rawPkt, rawPkt2)
}
func TestChromeChunk2(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0xce, 0x15, 0x79, 0xa2, 0xb5, 0xdb, 0x2d, 0x93, 0x02, 0x00, 0x01, 0x90, 0x9b, 0xd5, 0xb3, 0x6f, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xef, 0xb4, 0x72, 0x87, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x2e, 0xf9, 0x9c, 0x10, 0x63, 0x72, 0xed, 0x0d, 0x33, 0xc2, 0xdc, 0x7f, 0x9f, 0xd7, 0xef, 0x1b, 0xc9, 0xc4, 0xa7, 0x41, 0x9a, 0x07, 0x68, 0x6b, 0x66, 0xfb, 0x6a, 0x4e, 0x32, 0x5d, 0xe4, 0x25, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0x00, 0x07, 0x01, 0x38, 0x4b, 0x41, 0x4d, 0x45, 0x2d, 0x42, 0x53, 0x44, 0x20, 0x31, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x1e, 0x49, 0x5b, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x42, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xea, 0x00, 0x00, 0xc4, 0x13, 0x3d, 0xe9, 0x86, 0xb1, 0x85, 0x75, 0xa2, 0x79, 0x15, 0xce, 0x9b, 0xd5, 0xb3, 0x6f, 0x20, 0xe0, 0x9f, 0x89, 0xe0, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0xe0, 0x9f, 0x89, 0xe0, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x56, 0xce, 0x15, 0x79, 0xa2, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x94, 0x57, 0x95, 0xc0, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xff, 0x5c, 0x49, 0x19, 0x4a, 0x94, 0xe8, 0x2a, 0xec, 0x58, 0x55, 0x62, 0x29, 0x1f, 0x8e, 0x23, 0xcd, 0x7c, 0xe8, 0x46, 0xba, 0x58, 0x1b, 0x3d, 0xab, 0xd7, 0x7e, 0x50, 0xf2, 0x41, 0xb1, 0x2e, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0x02, 0x00, 0x01, 0x90, 0x9b, 0xd5, 0xb3, 0x6f, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xef, 0xb4, 0x72, 0x87, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x2e, 0xf9, 0x9c, 0x10, 0x63, 0x72, 0xed, 0x0d, 0x33, 0xc2, 0xdc, 0x7f, 0x9f, 0xd7, 0xef, 0x1b, 0xc9, 0xc4, 0xa7, 0x41, 0x9a, 0x07, 0x68, 0x6b, 0x66, 0xfb, 0x6a, 0x4e, 0x32, 0x5d, 0xe4, 0x25, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0xca, 0x0c, 0x21, 0x11, 0xce, 0xf4, 0xfc, 0xb3, 0x66, 0x99, 0x4f, 0xdb, 0x4f, 0x95, 0x6b, 0x6f, 0x3b, 0xb1, 0xdb, 0x5a}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
rawPkt2, err := pkt.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Remarshal failed"))
}
assert.DeepEqual(t, rawPkt, rawPkt2)
}
func TestInitMarshalUnmarshal(t *testing.T) {
p := &Packet{}
p.DestinationPort = 1
p.SourcePort = 1
p.VerificationTag = 123
initAck := &InitAck{}
initAck.initialTSN = 123
initAck.numOutboundStreams = 1
initAck.numInboundStreams = 1
initAck.initiateTag = 123
initAck.advertisedReceiverWindowCredit = 1024
initAck.params = []Param{NewRandomStateCookie()}
p.Chunks = []Chunk{initAck}
rawPkt, err := p.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Failed to marshal packet"))
}
pkt := &Packet{}
err = pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
i, ok := pkt.Chunks[0].(*InitAck)
if !ok {
t.Error("Failed to cast Chunk -> InitAck")
}
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init ack Chunk failed"))
} else if i.initiateTag != 123 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initiate tag exp: %d act: %d", 123, i.initiateTag))
} else if i.advertisedReceiverWindowCredit != 1024 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect advertisedReceiverWindowCredit exp: %d act: %d", 1024, i.advertisedReceiverWindowCredit))
} else if i.numOutboundStreams != 1 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numOutboundStreams tag exp: %d act: %d", 1, i.numOutboundStreams))
} else if i.numInboundStreams != 1 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numInboundStreams exp: %d act: %d", 1, i.numInboundStreams))
} else if i.initialTSN != 123 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initialTSN exp: %d act: %d", 123, i.initialTSN))
}
}
Fix gotestyourself import
package sctp
import (
"testing"
"github.com/pkg/errors"
"gotest.tools/assert"
)
func TestInitChunk(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x00, 0x00, 0x81, 0x46, 0x9d, 0xfc, 0x01, 0x00, 0x00, 0x56, 0x55,
0xb9, 0x64, 0xa5, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xe8, 0x6d, 0x10, 0x30, 0xc0, 0x00, 0x00, 0x04, 0x80,
0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x9f, 0xeb, 0xbb, 0x5c, 0x50,
0xc9, 0xbf, 0x75, 0x9c, 0xb1, 0x2c, 0x57, 0x4f, 0xa4, 0x5a, 0x51, 0xba, 0x60, 0x17, 0x78, 0x27, 0x94, 0x5c, 0x31, 0xe6,
0x5d, 0x5b, 0x09, 0x47, 0xe2, 0x22, 0x06, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
i, ok := pkt.Chunks[0].(*Init)
if !ok {
t.Error("Failed to cast Chunk -> Init")
}
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init Chunk failed"))
} else if i.initiateTag != 1438213285 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initiate tag exp: %d act: %d", 1438213285, i.initiateTag))
} else if i.advertisedReceiverWindowCredit != 131072 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect advertisedReceiverWindowCredit exp: %d act: %d", 131072, i.advertisedReceiverWindowCredit))
} else if i.numOutboundStreams != 1024 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numOutboundStreams tag exp: %d act: %d", 1024, i.numOutboundStreams))
} else if i.numInboundStreams != 2048 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numInboundStreams exp: %d act: %d", 2048, i.numInboundStreams))
} else if i.initialTSN != 3899461680 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initialTSN exp: %d act: %d", 3899461680, i.initialTSN))
}
}
func TestInitAck(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0xce, 0x15, 0x79, 0xa2, 0x96, 0x19, 0xe8, 0xb2, 0x02, 0x00, 0x00, 0x1c, 0xeb, 0x81, 0x4e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x50, 0xdf, 0x90, 0xd9, 0x00, 0x07, 0x00, 0x08, 0x94, 0x06, 0x2f, 0x93}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
_, ok := pkt.Chunks[0].(*InitAck)
if !ok {
t.Error("Failed to cast Chunk -> Init")
} else if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init Chunk failed"))
}
}
func TestChromeChunk1(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xb3, 0x45, 0xa2, 0x01, 0x00, 0x00, 0x56, 0xce, 0x15, 0x79, 0xa2, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x94, 0x57, 0x95, 0xc0, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xff, 0x5c, 0x49, 0x19, 0x4a, 0x94, 0xe8, 0x2a, 0xec, 0x58, 0x55, 0x62, 0x29, 0x1f, 0x8e, 0x23, 0xcd, 0x7c, 0xe8, 0x46, 0xba, 0x58, 0x1b, 0x3d, 0xab, 0xd7, 0x7e, 0x50, 0xf2, 0x41, 0xb1, 0x2e, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
rawPkt2, err := pkt.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Remarshal failed"))
}
assert.DeepEqual(t, rawPkt, rawPkt2)
}
func TestChromeChunk2(t *testing.T) {
pkt := &Packet{}
rawPkt := []byte{0x13, 0x88, 0x13, 0x88, 0xce, 0x15, 0x79, 0xa2, 0xb5, 0xdb, 0x2d, 0x93, 0x02, 0x00, 0x01, 0x90, 0x9b, 0xd5, 0xb3, 0x6f, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xef, 0xb4, 0x72, 0x87, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x2e, 0xf9, 0x9c, 0x10, 0x63, 0x72, 0xed, 0x0d, 0x33, 0xc2, 0xdc, 0x7f, 0x9f, 0xd7, 0xef, 0x1b, 0xc9, 0xc4, 0xa7, 0x41, 0x9a, 0x07, 0x68, 0x6b, 0x66, 0xfb, 0x6a, 0x4e, 0x32, 0x5d, 0xe4, 0x25, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0x00, 0x07, 0x01, 0x38, 0x4b, 0x41, 0x4d, 0x45, 0x2d, 0x42, 0x53, 0x44, 0x20, 0x31, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x1e, 0x49, 0x5b, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x42, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xea, 0x00, 0x00, 0xc4, 0x13, 0x3d, 0xe9, 0x86, 0xb1, 0x85, 0x75, 0xa2, 0x79, 0x15, 0xce, 0x9b, 0xd5, 0xb3, 0x6f, 0x20, 0xe0, 0x9f, 0x89, 0xe0, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0xe0, 0x9f, 0x89, 0xe0, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x56, 0xce, 0x15, 0x79, 0xa2, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x94, 0x57, 0x95, 0xc0, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xff, 0x5c, 0x49, 0x19, 0x4a, 0x94, 0xe8, 0x2a, 0xec, 0x58, 0x55, 0x62, 0x29, 0x1f, 0x8e, 0x23, 0xcd, 0x7c, 0xe8, 0x46, 0xba, 0x58, 0x1b, 0x3d, 0xab, 0xd7, 0x7e, 0x50, 0xf2, 0x41, 0xb1, 0x2e, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0x02, 0x00, 0x01, 0x90, 0x9b, 0xd5, 0xb3, 0x6f, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0xef, 0xb4, 0x72, 0x87, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0x2e, 0xf9, 0x9c, 0x10, 0x63, 0x72, 0xed, 0x0d, 0x33, 0xc2, 0xdc, 0x7f, 0x9f, 0xd7, 0xef, 0x1b, 0xc9, 0xc4, 0xa7, 0x41, 0x9a, 0x07, 0x68, 0x6b, 0x66, 0xfb, 0x6a, 0x4e, 0x32, 0x5d, 0xe4, 0x25, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, 0xca, 0x0c, 0x21, 0x11, 0xce, 0xf4, 0xfc, 0xb3, 0x66, 0x99, 0x4f, 0xdb, 0x4f, 0x95, 0x6b, 0x6f, 0x3b, 0xb1, 0xdb, 0x5a}
err := pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
rawPkt2, err := pkt.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Remarshal failed"))
}
assert.DeepEqual(t, rawPkt, rawPkt2)
}
func TestInitMarshalUnmarshal(t *testing.T) {
p := &Packet{}
p.DestinationPort = 1
p.SourcePort = 1
p.VerificationTag = 123
initAck := &InitAck{}
initAck.initialTSN = 123
initAck.numOutboundStreams = 1
initAck.numInboundStreams = 1
initAck.initiateTag = 123
initAck.advertisedReceiverWindowCredit = 1024
initAck.params = []Param{NewRandomStateCookie()}
p.Chunks = []Chunk{initAck}
rawPkt, err := p.Marshal()
if err != nil {
t.Error(errors.Wrap(err, "Failed to marshal packet"))
}
pkt := &Packet{}
err = pkt.Unmarshal(rawPkt)
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal failed, has chunk"))
}
i, ok := pkt.Chunks[0].(*InitAck)
if !ok {
t.Error("Failed to cast Chunk -> InitAck")
}
if err != nil {
t.Error(errors.Wrap(err, "Unmarshal init ack Chunk failed"))
} else if i.initiateTag != 123 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initiate tag exp: %d act: %d", 123, i.initiateTag))
} else if i.advertisedReceiverWindowCredit != 1024 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect advertisedReceiverWindowCredit exp: %d act: %d", 1024, i.advertisedReceiverWindowCredit))
} else if i.numOutboundStreams != 1 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numOutboundStreams tag exp: %d act: %d", 1, i.numOutboundStreams))
} else if i.numInboundStreams != 1 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect numInboundStreams exp: %d act: %d", 1, i.numInboundStreams))
} else if i.initialTSN != 123 {
t.Error(errors.Errorf("Unmarshal passed for SCTP packet, but got incorrect initialTSN exp: %d act: %d", 123, i.initialTSN))
}
}
|
package uvm
import (
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/gcs"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/mergemaps"
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
type PreferredRootFSType int
const (
PreferredRootFSTypeInitRd PreferredRootFSType = iota
PreferredRootFSTypeVHD
linuxLogVsockPort = 109
)
// OutputHandler is used to process the output from the program run in the UVM.
type OutputHandler func(io.Reader)
const (
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
InitrdFile = "initrd.img"
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
VhdFile = "rootfs.vhd"
// KernelFile is the default file name for a kernel used to boot LCOW.
KernelFile = "kernel"
// UncompressedKernelFile is the default file name for an uncompressed
// kernel used to boot LCOW with KernelDirect.
UncompressedKernelFile = "vmlinux"
)
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
type OptionsLCOW struct {
*Options
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
KernelDirect bool // Skip UEFI and boot directly to `kernel`
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
KernelBootOptions string // Additional boot options for the kernel
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1.
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
ExecCommandLine string // The command line to exec from init. Defaults to GCS
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
}
// defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW
// OS kernel and root FS files. This default is the subdirectory
// `LinuxBootFiles` in the directory of the executable that started the current
// process; or, if it does not exist, `%ProgramFiles%\Linux Containers`.
func defaultLCOWOSBootFilesPath() string {
localDirPath := filepath.Join(filepath.Dir(os.Args[0]), "LinuxBootFiles")
if _, err := os.Stat(localDirPath); err == nil {
return localDirPath
}
return filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers")
}
// NewDefaultOptionsLCOW creates the default options for a bootable version of
// LCOW.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
// Use KernelDirect boot by default on all builds that support it.
kernelDirectSupported := osversion.Get().Build >= 18286
opts := &OptionsLCOW{
Options: newDefaultOptions(id, owner),
BootFilesPath: defaultLCOWOSBootFilesPath(),
KernelFile: KernelFile,
KernelDirect: kernelDirectSupported,
RootFSFile: InitrdFile,
KernelBootOptions: "",
EnableGraphicsConsole: false,
ConsolePipe: "",
SCSIControllerCount: 1,
UseGuestConnection: true,
ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
ForwardStdout: false,
ForwardStderr: true,
OutputHandler: parseLogrus(id),
VPMemDeviceCount: DefaultVPMEMCount,
VPMemSizeBytes: DefaultVPMemSizeBytes,
PreferredRootFSType: PreferredRootFSTypeInitRd,
}
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
opts.RootFSFile = VhdFile
opts.PreferredRootFSType = PreferredRootFSTypeVHD
}
if kernelDirectSupported {
// KernelDirect supports uncompressed kernel if the kernel is present.
// Default to uncompressed if on box. NOTE: If `kernel` is already
// uncompressed and simply named 'kernel' it will still be used
// uncompressed automatically.
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil {
opts.KernelFile = UncompressedKernelFile
}
}
return opts
}
// CreateLCOW creates an HCS compute system representing a utility VM.
func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) {
op := "uvm::CreateLCOW"
log := logrus.WithFields(logrus.Fields{
logfields.UVMID: opts.ID,
})
log.WithField("options", fmt.Sprintf("%+v", opts)).Debug(op + " - Begin Operation")
defer func() {
if err != nil {
log.Data[logrus.ErrorKey] = err
log.Error(op + " - End Operation - Error")
} else {
log.Debug(op + " - End Operation - Success")
}
}()
if opts.ID == "" {
g, err := guid.NewV4()
if err != nil {
return nil, err
}
opts.ID = g.String()
}
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
if opts.OutputHandler == nil {
opts.OutputHandler = parseLogrus(opts.ID)
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "linux",
scsiControllerCount: opts.SCSIControllerCount,
vpmemMaxCount: opts.VPMemDeviceCount,
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
}
defer func() {
if err != nil {
uvm.Close()
}
}()
// To maintain compatability with Docker we need to automatically downgrade
// a user CPU count if the setting is not possible.
uvm.normalizeProcessorCount(opts.ProcessorCount)
// Align the requested memory size.
memorySizeInMB := uvm.normalizeMemorySize(opts.MemorySizeInMB)
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
}
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
}
if opts.SCSIControllerCount > 1 {
return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4
}
if opts.VPMemDeviceCount > MaxVPMEMCount {
return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount)
}
if uvm.vpmemMaxCount > 0 {
if opts.VPMemSizeBytes%4096 != 0 {
return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096")
}
} else {
if opts.PreferredRootFSType == PreferredRootFSTypeVHD {
return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device")
}
}
if opts.KernelDirect && osversion.Get().Build < 18286 {
return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286")
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: memorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: uvm.processorCount,
Limit: opts.ProcessorLimit,
Weight: opts.ProcessorWeight,
},
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
Plan9: &hcsschema.Plan9{},
},
},
}
// Handle StorageQoS if set
if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 {
doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{
IopsMaximum: opts.StorageQoSIopsMaximum,
BandwidthMaximum: opts.StorageQoSBandwidthMaximum,
}
}
if opts.UseGuestConnection && !opts.ExternalGuestConnection {
doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{
UseVsock: true,
UseConnectedSuspend: true,
}
}
if uvm.scsiControllerCount > 0 {
// TODO: JTERRY75 - this should enumerate scsicount and add an entry per value.
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{
"0": {
Attachments: make(map[string]hcsschema.Attachment),
},
}
}
if uvm.vpmemMaxCount > 0 {
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
MaximumCount: uvm.vpmemMaxCount,
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
}
}
var kernelArgs string
switch opts.PreferredRootFSType {
case PreferredRootFSTypeInitRd:
if !opts.KernelDirect {
kernelArgs = "initrd=/" + opts.RootFSFile
}
case PreferredRootFSTypeVHD:
// Support for VPMem VHD(X) booting rather than initrd..
kernelArgs = "root=/dev/pmem0 ro rootwait init=/init"
imageFormat := "Vhd1"
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
imageFormat = "Vhdx"
}
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
"0": {
HostPath: rootfsFullPath,
ReadOnly: true,
ImageFormat: imageFormat,
},
}
// Add to our internal structure
uvm.vpmemDevices[0] = vpmemInfo{
hostPath: opts.RootFSFile,
uvmPath: "/",
refCount: 1,
}
}
vmDebugging := false
if opts.ConsolePipe != "" {
vmDebugging = true
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
"0": { // Which is actually COM1
NamedPipe: opts.ConsolePipe,
},
}
} else {
kernelArgs += " 8250_core.nr_uarts=0"
}
if opts.EnableGraphicsConsole {
vmDebugging = true
kernelArgs += " console=tty"
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
}
if !vmDebugging {
// Terminate the VM if there is a kernel panic.
kernelArgs += " panic=-1 quiet"
}
if opts.KernelBootOptions != "" {
kernelArgs += " " + opts.KernelBootOptions
}
// With default options, run GCS with stderr pointing to the vsock port
// created below in order to forward guest logs to logrus.
initArgs := "/bin/vsockexec"
if opts.ForwardStdout {
initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
}
if opts.ForwardStderr {
initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
}
initArgs += " " + opts.ExecCommandLine
if vmDebugging {
// Launch a shell on the console.
initArgs = `sh -c "` + initArgs + ` & exec sh"`
}
kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs
if !opts.KernelDirect {
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\` + opts.KernelFile,
DeviceType: "VmbFs",
VmbFsRootPath: opts.BootFilesPath,
OptionalData: kernelArgs,
},
}
} else {
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
KernelFilePath: kernelFullPath,
KernelCmdLine: kernelArgs,
}
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
}
}
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
err = uvm.create(fullDoc)
if err != nil {
return nil, err
}
// Create a socket that the executed program can send to. This is usually
// used by GCS to send log data.
if opts.ForwardStdout || opts.ForwardStderr {
uvm.outputHandler = opts.OutputHandler
uvm.outputProcessingDone = make(chan struct{})
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
if err != nil {
return nil, err
}
}
if opts.UseGuestConnection && opts.ExternalGuestConnection {
l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort)
if err != nil {
return nil, err
}
uvm.gcListener = l
}
return uvm, nil
}
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
return winio.ListenHvsock(&winio.HvsockAddr{
VMID: uvm.runtimeID,
ServiceID: winio.VsockServiceID(port),
})
}
uvm: Enable external guest connection for LCOW
package uvm
import (
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/gcs"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/mergemaps"
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
type PreferredRootFSType int
const (
PreferredRootFSTypeInitRd PreferredRootFSType = iota
PreferredRootFSTypeVHD
linuxLogVsockPort = 109
)
// OutputHandler is used to process the output from the program run in the UVM.
type OutputHandler func(io.Reader)
const (
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
InitrdFile = "initrd.img"
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
VhdFile = "rootfs.vhd"
// KernelFile is the default file name for a kernel used to boot LCOW.
KernelFile = "kernel"
// UncompressedKernelFile is the default file name for an uncompressed
// kernel used to boot LCOW with KernelDirect.
UncompressedKernelFile = "vmlinux"
)
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
type OptionsLCOW struct {
*Options
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
KernelDirect bool // Skip UEFI and boot directly to `kernel`
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
KernelBootOptions string // Additional boot options for the kernel
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1.
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
ExecCommandLine string // The command line to exec from init. Defaults to GCS
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
}
// defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW
// OS kernel and root FS files. This default is the subdirectory
// `LinuxBootFiles` in the directory of the executable that started the current
// process; or, if it does not exist, `%ProgramFiles%\Linux Containers`.
func defaultLCOWOSBootFilesPath() string {
localDirPath := filepath.Join(filepath.Dir(os.Args[0]), "LinuxBootFiles")
if _, err := os.Stat(localDirPath); err == nil {
return localDirPath
}
return filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers")
}
// NewDefaultOptionsLCOW creates the default options for a bootable version of
// LCOW.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
// Use KernelDirect boot by default on all builds that support it.
kernelDirectSupported := osversion.Get().Build >= 18286
opts := &OptionsLCOW{
Options: newDefaultOptions(id, owner),
BootFilesPath: defaultLCOWOSBootFilesPath(),
KernelFile: KernelFile,
KernelDirect: kernelDirectSupported,
RootFSFile: InitrdFile,
KernelBootOptions: "",
EnableGraphicsConsole: false,
ConsolePipe: "",
SCSIControllerCount: 1,
UseGuestConnection: true,
ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
ForwardStdout: false,
ForwardStderr: true,
OutputHandler: parseLogrus(id),
VPMemDeviceCount: DefaultVPMEMCount,
VPMemSizeBytes: DefaultVPMemSizeBytes,
PreferredRootFSType: PreferredRootFSTypeInitRd,
}
// LCOW has more reliable behavior with the external bridge.
opts.Options.ExternalGuestConnection = true
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
opts.RootFSFile = VhdFile
opts.PreferredRootFSType = PreferredRootFSTypeVHD
}
if kernelDirectSupported {
// KernelDirect supports uncompressed kernel if the kernel is present.
// Default to uncompressed if on box. NOTE: If `kernel` is already
// uncompressed and simply named 'kernel' it will still be used
// uncompressed automatically.
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil {
opts.KernelFile = UncompressedKernelFile
}
}
return opts
}
// CreateLCOW creates an HCS compute system representing a utility VM.
func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) {
op := "uvm::CreateLCOW"
log := logrus.WithFields(logrus.Fields{
logfields.UVMID: opts.ID,
})
log.WithField("options", fmt.Sprintf("%+v", opts)).Debug(op + " - Begin Operation")
defer func() {
if err != nil {
log.Data[logrus.ErrorKey] = err
log.Error(op + " - End Operation - Error")
} else {
log.Debug(op + " - End Operation - Success")
}
}()
if opts.ID == "" {
g, err := guid.NewV4()
if err != nil {
return nil, err
}
opts.ID = g.String()
}
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
if opts.OutputHandler == nil {
opts.OutputHandler = parseLogrus(opts.ID)
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "linux",
scsiControllerCount: opts.SCSIControllerCount,
vpmemMaxCount: opts.VPMemDeviceCount,
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
}
defer func() {
if err != nil {
uvm.Close()
}
}()
// To maintain compatability with Docker we need to automatically downgrade
// a user CPU count if the setting is not possible.
uvm.normalizeProcessorCount(opts.ProcessorCount)
// Align the requested memory size.
memorySizeInMB := uvm.normalizeMemorySize(opts.MemorySizeInMB)
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
}
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
}
if opts.SCSIControllerCount > 1 {
return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4
}
if opts.VPMemDeviceCount > MaxVPMEMCount {
return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount)
}
if uvm.vpmemMaxCount > 0 {
if opts.VPMemSizeBytes%4096 != 0 {
return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096")
}
} else {
if opts.PreferredRootFSType == PreferredRootFSTypeVHD {
return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device")
}
}
if opts.KernelDirect && osversion.Get().Build < 18286 {
return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286")
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: memorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: uvm.processorCount,
Limit: opts.ProcessorLimit,
Weight: opts.ProcessorWeight,
},
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
Plan9: &hcsschema.Plan9{},
},
},
}
// Handle StorageQoS if set
if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 {
doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{
IopsMaximum: opts.StorageQoSIopsMaximum,
BandwidthMaximum: opts.StorageQoSBandwidthMaximum,
}
}
if opts.UseGuestConnection && !opts.ExternalGuestConnection {
doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{
UseVsock: true,
UseConnectedSuspend: true,
}
}
if uvm.scsiControllerCount > 0 {
// TODO: JTERRY75 - this should enumerate scsicount and add an entry per value.
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{
"0": {
Attachments: make(map[string]hcsschema.Attachment),
},
}
}
if uvm.vpmemMaxCount > 0 {
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
MaximumCount: uvm.vpmemMaxCount,
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
}
}
var kernelArgs string
switch opts.PreferredRootFSType {
case PreferredRootFSTypeInitRd:
if !opts.KernelDirect {
kernelArgs = "initrd=/" + opts.RootFSFile
}
case PreferredRootFSTypeVHD:
// Support for VPMem VHD(X) booting rather than initrd..
kernelArgs = "root=/dev/pmem0 ro rootwait init=/init"
imageFormat := "Vhd1"
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
imageFormat = "Vhdx"
}
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
"0": {
HostPath: rootfsFullPath,
ReadOnly: true,
ImageFormat: imageFormat,
},
}
// Add to our internal structure
uvm.vpmemDevices[0] = vpmemInfo{
hostPath: opts.RootFSFile,
uvmPath: "/",
refCount: 1,
}
}
vmDebugging := false
if opts.ConsolePipe != "" {
vmDebugging = true
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
"0": { // Which is actually COM1
NamedPipe: opts.ConsolePipe,
},
}
} else {
kernelArgs += " 8250_core.nr_uarts=0"
}
if opts.EnableGraphicsConsole {
vmDebugging = true
kernelArgs += " console=tty"
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
}
if !vmDebugging {
// Terminate the VM if there is a kernel panic.
kernelArgs += " panic=-1 quiet"
}
if opts.KernelBootOptions != "" {
kernelArgs += " " + opts.KernelBootOptions
}
// With default options, run GCS with stderr pointing to the vsock port
// created below in order to forward guest logs to logrus.
initArgs := "/bin/vsockexec"
if opts.ForwardStdout {
initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
}
if opts.ForwardStderr {
initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
}
initArgs += " " + opts.ExecCommandLine
if vmDebugging {
// Launch a shell on the console.
initArgs = `sh -c "` + initArgs + ` & exec sh"`
}
kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs
if !opts.KernelDirect {
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\` + opts.KernelFile,
DeviceType: "VmbFs",
VmbFsRootPath: opts.BootFilesPath,
OptionalData: kernelArgs,
},
}
} else {
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
KernelFilePath: kernelFullPath,
KernelCmdLine: kernelArgs,
}
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
}
}
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
err = uvm.create(fullDoc)
if err != nil {
return nil, err
}
// Create a socket that the executed program can send to. This is usually
// used by GCS to send log data.
if opts.ForwardStdout || opts.ForwardStderr {
uvm.outputHandler = opts.OutputHandler
uvm.outputProcessingDone = make(chan struct{})
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
if err != nil {
return nil, err
}
}
if opts.UseGuestConnection && opts.ExternalGuestConnection {
l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort)
if err != nil {
return nil, err
}
uvm.gcListener = l
}
return uvm, nil
}
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
return winio.ListenHvsock(&winio.HvsockAddr{
VMID: uvm.runtimeID,
ServiceID: winio.VsockServiceID(port),
})
}
|
package version
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"text/tabwriter"
"github.com/blang/semver"
"github.com/cri-o/cri-o/utils"
"github.com/google/renameio"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Version is the version of the build.
const Version = "1.22.0"
// Variables injected during build-time
var (
gitCommit string // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string // state of git tree, either "clean" or "dirty"
buildDate string // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
type Info struct {
Version string `json:"version,omitempty"`
GitCommit string `json:"gitCommit,omitempty"`
GitTreeState string `json:"gitTreeState,omitempty"`
BuildDate string `json:"buildDate,omitempty"`
GoVersion string `json:"goVersion,omitempty"`
Compiler string `json:"compiler,omitempty"`
Platform string `json:"platform,omitempty"`
Linkmode string `json:"linkmode,omitempty"`
}
// ShouldCrioWipe opens the version file, and parses it and the version string
// If there is a parsing error, then crio should wipe, and the error is returned.
// if parsing is successful, it compares the major and minor versions
// and returns whether the major and minor versions are the same.
// If they differ, then crio should wipe.
func ShouldCrioWipe(versionFileName string) (bool, error) {
return shouldCrioWipe(versionFileName, Version)
}
// shouldCrioWipe is an internal function for testing purposes
func shouldCrioWipe(versionFileName, versionString string) (bool, error) {
f, err := os.Open(versionFileName)
if err != nil {
return true, errors.Errorf("version file %s not found: %v", versionFileName, err)
}
r := bufio.NewReader(f)
versionBytes, err := ioutil.ReadAll(r)
if err != nil {
return true, errors.Errorf("reading version file %s failed: %v", versionFileName, err)
}
// parse the version that was laid down by a previous invocation of crio
var oldVersion semver.Version
if err := oldVersion.UnmarshalJSON(versionBytes); err != nil {
return true, errors.Errorf("version file %s malformatted: %v", versionFileName, err)
}
// parse the version of the current binary
newVersion, err := parseVersionConstant(versionString, "")
if err != nil {
return true, errors.Errorf("version constant %s malformatted: %v", versionString, err)
}
// in every case that the minor and major version are out of sync,
// we want to preform a {down,up}grade. The common case here is newVersion > oldVersion,
// but even in the opposite case, images are out of date and could be wiped
return newVersion.Major != oldVersion.Major || newVersion.Minor != oldVersion.Minor, nil
}
// WriteVersionFile writes the version information to a given file
// file is the location of the old version file
// gitCommit is the current git commit version. It will be added to the file
// to aid in debugging, but will not be used to compare versions
func WriteVersionFile(file string) error {
return writeVersionFile(file, gitCommit, Version)
}
// LogVersion logs the version and git information of this build
func LogVersion() {
logrus.Infof("Starting CRI-O, version: %s, git: %v(%s)", Version, gitCommit, gitTreeState)
}
// writeVersionFile is an internal function for testing purposes
func writeVersionFile(file, gitCommit, version string) error {
current, err := parseVersionConstant(version, gitCommit)
// Sanity check-this should never happen
if err != nil {
return err
}
j, err := current.MarshalJSON()
// Sanity check-this should never happen
if err != nil {
return err
}
// Create the top level directory if it doesn't exist
if err := os.MkdirAll(filepath.Dir(file), 0o755); err != nil {
return err
}
return renameio.WriteFile(file, j, 0o644)
}
// parseVersionConstant parses the Version variable above
// a const crioVersion would be kept, but golang doesn't support
// const structs. We will instead spend some runtime on CRI-O startup
// Because the version string doesn't keep track of the git commit,
// but it could be useful for debugging, we pass it in here
// If our version constant is properly formatted, this should never error
func parseVersionConstant(versionString, gitCommit string) (*semver.Version, error) {
v, err := semver.Make(versionString)
if err != nil {
return nil, err
}
if gitCommit != "" {
gitBuild, err := semver.NewBuildVersion(strings.Trim(gitCommit, "\""))
// If gitCommit is empty, silently error, as it's helpful, but not needed.
if err == nil {
v.Build = append(v.Build, gitBuild)
}
}
return &v, nil
}
func Get() *Info {
return &Info{
Version: Version,
GitCommit: gitCommit,
GitTreeState: gitTreeState,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
Linkmode: getLinkmode(),
}
}
// String returns the string representation of the version info
func (i *Info) String() string {
b := strings.Builder{}
w := tabwriter.NewWriter(&b, 0, 0, 2, ' ', 0)
v := reflect.ValueOf(*i)
t := v.Type()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
value := v.FieldByName(field.Name).String()
if value != "" {
fmt.Fprintf(w, "%s:\t%s", field.Name, value)
if i+1 < t.NumField() {
fmt.Fprintf(w, "\n")
}
}
}
w.Flush()
return b.String()
}
func getLinkmode() string {
abspath, err := os.Executable()
if err != nil {
logrus.Warnf("Encountered error finding binary to detect link mode: %v", err)
return ""
}
if _, err = exec.LookPath("ldd"); err != nil {
return ""
}
if _, err = utils.ExecCmd("ldd", abspath); err != nil {
if strings.Contains(err.Error(), "not a dynamic executable") ||
strings.Contains(strings.ToLower(err.Error()), "not a valid dynamic program") {
return "static"
}
logrus.Warnf("Encountered error detecting link mode of binary: %v", err)
return ""
}
return "dynamic"
}
// JSONString returns the JSON representation of the version info
func (i *Info) JSONString() (string, error) {
b, err := json.MarshalIndent(i, "", " ")
if err != nil {
return "", err
}
return string(b), nil
}
Fix linkmode retrieval for non en_US systems
We now set the environment to `LANG=C` before running `ldd`. This way we
can ensure that the error checks are consistent.
Fixes #5165
Signed-off-by: Sascha Grunert <70ab469ddb2ac3e35f32ed7c2fd1cca514b2e879@redhat.com>
package version
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"text/tabwriter"
"github.com/blang/semver"
"github.com/google/renameio"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"sigs.k8s.io/release-utils/command"
)
// Version is the version of the build.
const Version = "1.22.0"
// Variables injected during build-time
var (
gitCommit string // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string // state of git tree, either "clean" or "dirty"
buildDate string // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
type Info struct {
Version string `json:"version,omitempty"`
GitCommit string `json:"gitCommit,omitempty"`
GitTreeState string `json:"gitTreeState,omitempty"`
BuildDate string `json:"buildDate,omitempty"`
GoVersion string `json:"goVersion,omitempty"`
Compiler string `json:"compiler,omitempty"`
Platform string `json:"platform,omitempty"`
Linkmode string `json:"linkmode,omitempty"`
}
// ShouldCrioWipe opens the version file, and parses it and the version string
// If there is a parsing error, then crio should wipe, and the error is returned.
// if parsing is successful, it compares the major and minor versions
// and returns whether the major and minor versions are the same.
// If they differ, then crio should wipe.
func ShouldCrioWipe(versionFileName string) (bool, error) {
return shouldCrioWipe(versionFileName, Version)
}
// shouldCrioWipe is an internal function for testing purposes
func shouldCrioWipe(versionFileName, versionString string) (bool, error) {
f, err := os.Open(versionFileName)
if err != nil {
return true, errors.Errorf("version file %s not found: %v", versionFileName, err)
}
r := bufio.NewReader(f)
versionBytes, err := ioutil.ReadAll(r)
if err != nil {
return true, errors.Errorf("reading version file %s failed: %v", versionFileName, err)
}
// parse the version that was laid down by a previous invocation of crio
var oldVersion semver.Version
if err := oldVersion.UnmarshalJSON(versionBytes); err != nil {
return true, errors.Errorf("version file %s malformatted: %v", versionFileName, err)
}
// parse the version of the current binary
newVersion, err := parseVersionConstant(versionString, "")
if err != nil {
return true, errors.Errorf("version constant %s malformatted: %v", versionString, err)
}
// in every case that the minor and major version are out of sync,
// we want to preform a {down,up}grade. The common case here is newVersion > oldVersion,
// but even in the opposite case, images are out of date and could be wiped
return newVersion.Major != oldVersion.Major || newVersion.Minor != oldVersion.Minor, nil
}
// WriteVersionFile writes the version information to a given file
// file is the location of the old version file
// gitCommit is the current git commit version. It will be added to the file
// to aid in debugging, but will not be used to compare versions
func WriteVersionFile(file string) error {
return writeVersionFile(file, gitCommit, Version)
}
// LogVersion logs the version and git information of this build
func LogVersion() {
logrus.Infof("Starting CRI-O, version: %s, git: %v(%s)", Version, gitCommit, gitTreeState)
}
// writeVersionFile is an internal function for testing purposes
func writeVersionFile(file, gitCommit, version string) error {
current, err := parseVersionConstant(version, gitCommit)
// Sanity check-this should never happen
if err != nil {
return err
}
j, err := current.MarshalJSON()
// Sanity check-this should never happen
if err != nil {
return err
}
// Create the top level directory if it doesn't exist
if err := os.MkdirAll(filepath.Dir(file), 0o755); err != nil {
return err
}
return renameio.WriteFile(file, j, 0o644)
}
// parseVersionConstant parses the Version variable above
// a const crioVersion would be kept, but golang doesn't support
// const structs. We will instead spend some runtime on CRI-O startup
// Because the version string doesn't keep track of the git commit,
// but it could be useful for debugging, we pass it in here
// If our version constant is properly formatted, this should never error
func parseVersionConstant(versionString, gitCommit string) (*semver.Version, error) {
v, err := semver.Make(versionString)
if err != nil {
return nil, err
}
if gitCommit != "" {
gitBuild, err := semver.NewBuildVersion(strings.Trim(gitCommit, "\""))
// If gitCommit is empty, silently error, as it's helpful, but not needed.
if err == nil {
v.Build = append(v.Build, gitBuild)
}
}
return &v, nil
}
func Get() *Info {
return &Info{
Version: Version,
GitCommit: gitCommit,
GitTreeState: gitTreeState,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
Linkmode: getLinkmode(),
}
}
// String returns the string representation of the version info
func (i *Info) String() string {
b := strings.Builder{}
w := tabwriter.NewWriter(&b, 0, 0, 2, ' ', 0)
v := reflect.ValueOf(*i)
t := v.Type()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
value := v.FieldByName(field.Name).String()
if value != "" {
fmt.Fprintf(w, "%s:\t%s", field.Name, value)
if i+1 < t.NumField() {
fmt.Fprintf(w, "\n")
}
}
}
w.Flush()
return b.String()
}
func getLinkmode() string {
const (
unknown = "unknown"
ldd = "ldd"
)
abspath, err := os.Executable()
if err != nil {
logrus.Warnf("Unable to find currently running executable: %v", err)
return unknown
}
if _, err = exec.LookPath(ldd); err != nil {
logrus.Warnf("Unable to find ldd command: %v", err)
return unknown
}
out, err := command.New(ldd, abspath).Env("LANG=C").RunSilent()
if err != nil {
logrus.Warnf("Unable to run ldd command: %v", err)
return unknown
}
if !out.Success() {
if strings.Contains(out.Error(), "not a dynamic executable") ||
strings.Contains(strings.ToLower(out.Error()), "not a valid dynamic program") {
return "static"
}
logrus.Warnf("Encountered error detecting link mode of binary: %s", out.Error())
return unknown
}
return "dynamic"
}
// JSONString returns the JSON representation of the version info
func (i *Info) JSONString() (string, error) {
b, err := json.MarshalIndent(i, "", " ")
if err != nil {
return "", err
}
return string(b), nil
}
|
package version
// Version describes module version
const Version = "1.6.3"
bump up version to release 1.6.4
package version
// Version describes module version
const Version = "1.6.4"
|
package sum
func basic(sum int, nums ...int) bool {
for _, x := range nums {
for _, y := range nums {
s := x + y
if sum == s {
return true
}
}
}
return false
}
func better(sum int, nums ...int) bool {
max := len(nums)
for ix := 0; ix < max; ix++ {
x := nums[ix]
for iy := ix + 1; iy < max; iy++ {
y := nums[iy]
s := x + y
if sum == s {
return true
}
}
}
return false
}
func linear(sum int, nums ...int) bool {
min := 0
max := len(nums) - 1
for min < max {
s := nums[min] + nums[max]
switch {
case s == sum:
return true
case s > sum:
max--
case s < sum:
min++
}
}
return false
}
func complements(sum int, nums ...int) bool {
comp := map[int]struct{}{}
for _, n := range nums {
v := sum - n
if _, ok := comp[v]; ok {
return true
}
comp[v] = struct{}{}
}
return false
}
sum: add optim
package sum
func basic(sum int, nums ...int) bool {
for _, x := range nums {
for _, y := range nums {
s := x + y
if sum == s {
return true
}
}
}
return false
}
func better(sum int, nums ...int) bool {
max := len(nums)
for ix := 0; ix < max; ix++ {
x := nums[ix]
for iy := ix + 1; iy < max; iy++ {
y := nums[iy]
s := x + y
if sum == s {
return true
}
}
}
return false
}
func linear(sum int, nums ...int) bool {
min := 0
max := len(nums) - 1
for min < max {
s := nums[min] + nums[max]
switch {
case s == sum:
return true
case s > sum:
max--
case s < sum:
min++
}
}
return false
}
func complements(sum int, nums ...int) bool {
comp := map[int]struct{}{}
for _, n := range nums {
v := sum - n
if _, ok := comp[v]; ok {
return true
}
comp[v] = struct{}{}
}
return false
}
func optim(sum int, nums ...int) bool {
comp := []int{}
for _, n := range nums {
v := sum - n
for _, c := range comp {
if c == v {
return true
}
}
comp = append(comp, v)
}
return false
}
|
package shellquote
import (
"reflect"
"testing"
)
func TestSimpleSplit(t *testing.T) {
for _, elem := range simpleSplitTest {
output, err := Split(elem.input)
if err != nil {
t.Errorf("Input %q, got error %#v", elem.input, err)
} else if !reflect.DeepEqual(output, elem.output) {
t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output)
}
}
}
func TestErrorSplit(t *testing.T) {
for _, elem := range errorSplitTest {
_, err := Split(elem.input)
if err != elem.error {
t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error)
}
}
}
var simpleSplitTest = []struct {
input string
output []string
}{
{"hello", []string{"hello"}},
{"hello goodbye", []string{"hello", "goodbye"}},
{"hello goodbye", []string{"hello", "goodbye"}},
{"glob* test?", []string{"glob*", "test?"}},
{"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}},
{"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}},
{"one '' two", []string{"one", "", "two"}},
{"text with\\\na newline", []string{"text", "witha", "newline"}},
{"\"quoted\\d\\\\\\\" text with a\\\nnewline\"", []string{"quoted\\d\\\" text with anewline"}},
{"foo\"bar\"baz", []string{"foobarbaz"}},
}
var errorSplitTest = []struct {
input string
error error
}{
{"don't worry", UnterminatedSingleQuoteError},
{"'test'\\''ing", UnterminatedSingleQuoteError},
{"\"foo'bar", UnterminatedDoubleQuoteError},
{"foo\\", UnterminatedEscapeError},
}
Tweak unquote tests for clarity
package shellquote
import (
"reflect"
"testing"
)
func TestSimpleSplit(t *testing.T) {
for _, elem := range simpleSplitTest {
output, err := Split(elem.input)
if err != nil {
t.Errorf("Input %q, got error %#v", elem.input, err)
} else if !reflect.DeepEqual(output, elem.output) {
t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output)
}
}
}
func TestErrorSplit(t *testing.T) {
for _, elem := range errorSplitTest {
_, err := Split(elem.input)
if err != elem.error {
t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error)
}
}
}
var simpleSplitTest = []struct {
input string
output []string
}{
{"hello", []string{"hello"}},
{"hello goodbye", []string{"hello", "goodbye"}},
{"hello goodbye", []string{"hello", "goodbye"}},
{"glob* test?", []string{"glob*", "test?"}},
{"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}},
{"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}},
{"one '' two", []string{"one", "", "two"}},
{"text with\\\na backslash-escaped newline", []string{"text", "witha", "backslash-escaped", "newline"}},
{"text \"with\na\" quoted newline", []string{"text", "with\na", "quoted", "newline"}},
{"\"quoted\\d\\\\\\\" text with\\\na backslash-escaped newline\"", []string{"quoted\\d\\\" text witha backslash-escaped newline"}},
{"foo\"bar\"baz", []string{"foobarbaz"}},
}
var errorSplitTest = []struct {
input string
error error
}{
{"don't worry", UnterminatedSingleQuoteError},
{"'test'\\''ing", UnterminatedSingleQuoteError},
{"\"foo'bar", UnterminatedDoubleQuoteError},
{"foo\\", UnterminatedEscapeError},
}
|
package main
import (
"image/color"
"io"
"strconv"
"github.com/MJKWoolnough/byteio"
"github.com/MJKWoolnough/gopherjs/files"
"github.com/MJKWoolnough/gopherjs/progress"
"github.com/MJKWoolnough/gopherjs/xjs"
"github.com/gopherjs/gopherjs/js"
"github.com/gopherjs/websocket"
"honnef.co/go/js/dom"
)
var uploadDiv = xjs.CreateElement("div")
func upload(c dom.Element) {
if !uploadDiv.HasChildNodes() {
upl := xjs.CreateElement("input")
upl.SetAttribute("name", "file")
upl.SetAttribute("type", "file")
upl.AddEventListener("change", false, func(e dom.Event) {
fs := e.Target().(*dom.HTMLInputElement).Files()
if len(fs) != 1 {
return
}
file := files.NewFile(fs[0])
length := file.Size()
pb := progress.New(color.RGBA{255, 0, 0, 0}, color.RGBA{0, 0, 255, 0}, 400, 50)
uploadDiv.RemoveChild(upl)
status := xjs.CreateElement("div")
xjs.SetInnerText(status, "Uploading...")
uploadDiv.AppendChild(status)
uploadDiv.AppendChild(pb)
addRestart := func() {
reset := xjs.CreateElement("input")
reset.SetAttribute("type", "button")
reset.SetAttribute("value", "Restart")
reset.AddEventListener("click", false, func(dom.Event) {
xjs.RemoveChildren(uploadDiv)
upload(c)
})
uploadDiv.InsertBefore(reset, uploadDiv.FirstChild())
}
setError := func(err string) {
xjs.SetInnerText(status, err)
addRestart()
}
go func() {
conn, err := websocket.Dial("ws://" + js.Global.Get("location").Get("host").String() + "/socket")
if err != nil {
setError(err.Error())
return
}
dom.GetWindow().AddEventListener("beforeunload", false, func(_ dom.Event) {
conn.Close()
})
defer conn.Close()
w := byteio.StickyWriter{Writer: &byteio.LittleEndianWriter{Writer: conn}}
r := byteio.StickyReader{Reader: &byteio.LittleEndianReader{conn}}
w.WriteInt64(int64(length))
if w.Err != nil {
setError(w.Err.Error())
return
}
_, err = io.Copy(conn, pb.Reader(files.NewFileReader(file), file.Size()))
if err != nil {
setError(err.Error())
return
}
statusCode := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
readError := func() {
length := r.ReadUint16()
if r.Err != nil {
setError(r.Err.Error())
return
}
errStr := make([]byte, int(length))
_, err := io.ReadFull(r.Reader, errStr)
if err != nil {
setError(err.Error())
return
}
setError(string(errStr))
}
switch statusCode {
case 0:
readError()
return
case 1:
default:
setError("Unknown Status")
return
}
uploadDiv.RemoveChild(pb)
width := r.ReadInt32()
height := r.ReadInt32()
if r.Err != nil {
setError(err.Error())
return
}
xjs.SetInnerText(status, strconv.FormatInt(int64(width), 10)+"x"+strconv.FormatInt(int64(height), 10))
canvas := xjs.CreateElement("canvas").(*dom.HTMLCanvasElement)
canvas.SetAttribute("width", strconv.FormatInt(int64(width), 10))
canvas.SetAttribute("height", strconv.FormatInt(int64(height), 10))
canvas.Style().Set("width", strconv.FormatInt(int64(width*4), 10)+"px")
canvas.Style().Set("height", strconv.FormatInt(int64(width*4), 10)+"px")
ctx := canvas.GetContext2d()
uploadDiv.AppendChild(canvas)
for {
statusCode := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
switch statusCode {
case 0:
readError()
return
case 1:
x := r.ReadInt32()
y := r.ReadInt32()
red := r.ReadUint8()
green := r.ReadUint8()
blue := r.ReadUint8()
alpha := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
ctx.FillStyle = "rgba(" + strconv.Itoa(int(red)) + ", " + strconv.Itoa(int(green)) + ", " + strconv.Itoa(int(blue)) + ", " + strconv.FormatFloat(float64(alpha)/255, 'f', -1, 32) + ")"
ctx.FillRect(int(x), int(y), 1, 1)
case 2:
length := r.ReadUint16()
message := make([]byte, length)
r.Read(message)
if r.Err != nil {
setError(r.Err.Error())
return
}
xjs.SetInnerText(status, string(message))
case 255:
xjs.SetInnerText(status, "Done")
return
default:
setError("Unknown Error")
return
}
}
}()
})
uploadDiv.AppendChild(upl)
}
c.AppendChild(uploadDiv)
}
Added restart button upon success as well
package main
import (
"image/color"
"io"
"strconv"
"github.com/MJKWoolnough/byteio"
"github.com/MJKWoolnough/gopherjs/files"
"github.com/MJKWoolnough/gopherjs/progress"
"github.com/MJKWoolnough/gopherjs/xjs"
"github.com/gopherjs/gopherjs/js"
"github.com/gopherjs/websocket"
"honnef.co/go/js/dom"
)
var uploadDiv = xjs.CreateElement("div")
func upload(c dom.Element) {
if !uploadDiv.HasChildNodes() {
upl := xjs.CreateElement("input")
upl.SetAttribute("name", "file")
upl.SetAttribute("type", "file")
upl.AddEventListener("change", false, func(e dom.Event) {
fs := e.Target().(*dom.HTMLInputElement).Files()
if len(fs) != 1 {
return
}
file := files.NewFile(fs[0])
length := file.Size()
pb := progress.New(color.RGBA{255, 0, 0, 0}, color.RGBA{0, 0, 255, 0}, 400, 50)
uploadDiv.RemoveChild(upl)
status := xjs.CreateElement("div")
xjs.SetInnerText(status, "Uploading...")
uploadDiv.AppendChild(status)
uploadDiv.AppendChild(pb)
addRestart := func() {
reset := xjs.CreateElement("input")
reset.SetAttribute("type", "button")
reset.SetAttribute("value", "Restart")
reset.AddEventListener("click", false, func(dom.Event) {
xjs.RemoveChildren(uploadDiv)
upload(c)
})
uploadDiv.InsertBefore(reset, uploadDiv.FirstChild())
}
setError := func(err string) {
xjs.SetInnerText(status, err)
addRestart()
}
go func() {
conn, err := websocket.Dial("ws://" + js.Global.Get("location").Get("host").String() + "/socket")
if err != nil {
setError(err.Error())
return
}
dom.GetWindow().AddEventListener("beforeunload", false, func(_ dom.Event) {
conn.Close()
})
defer conn.Close()
w := byteio.StickyWriter{Writer: &byteio.LittleEndianWriter{Writer: conn}}
r := byteio.StickyReader{Reader: &byteio.LittleEndianReader{conn}}
w.WriteInt64(int64(length))
if w.Err != nil {
setError(w.Err.Error())
return
}
_, err = io.Copy(conn, pb.Reader(files.NewFileReader(file), file.Size()))
if err != nil {
setError(err.Error())
return
}
statusCode := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
readError := func() {
length := r.ReadUint16()
if r.Err != nil {
setError(r.Err.Error())
return
}
errStr := make([]byte, int(length))
_, err := io.ReadFull(r.Reader, errStr)
if err != nil {
setError(err.Error())
return
}
setError(string(errStr))
}
switch statusCode {
case 0:
readError()
return
case 1:
default:
setError("Unknown Status")
return
}
uploadDiv.RemoveChild(pb)
width := r.ReadInt32()
height := r.ReadInt32()
if r.Err != nil {
setError(err.Error())
return
}
xjs.SetInnerText(status, strconv.FormatInt(int64(width), 10)+"x"+strconv.FormatInt(int64(height), 10))
canvas := xjs.CreateElement("canvas").(*dom.HTMLCanvasElement)
canvas.SetAttribute("width", strconv.FormatInt(int64(width), 10))
canvas.SetAttribute("height", strconv.FormatInt(int64(height), 10))
canvas.Style().Set("width", strconv.FormatInt(int64(width*4), 10)+"px")
canvas.Style().Set("height", strconv.FormatInt(int64(width*4), 10)+"px")
ctx := canvas.GetContext2d()
uploadDiv.AppendChild(canvas)
for {
statusCode := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
switch statusCode {
case 0:
readError()
return
case 1:
x := r.ReadInt32()
y := r.ReadInt32()
red := r.ReadUint8()
green := r.ReadUint8()
blue := r.ReadUint8()
alpha := r.ReadUint8()
if r.Err != nil {
setError(r.Err.Error())
return
}
ctx.FillStyle = "rgba(" + strconv.Itoa(int(red)) + ", " + strconv.Itoa(int(green)) + ", " + strconv.Itoa(int(blue)) + ", " + strconv.FormatFloat(float64(alpha)/255, 'f', -1, 32) + ")"
ctx.FillRect(int(x), int(y), 1, 1)
case 2:
length := r.ReadUint16()
message := make([]byte, length)
r.Read(message)
if r.Err != nil {
setError(r.Err.Error())
return
}
xjs.SetInnerText(status, string(message))
case 255:
addRestart()
xjs.SetInnerText(status, "Done")
return
default:
setError("Unknown Error")
return
}
}
}()
})
uploadDiv.AppendChild(upl)
}
c.AppendChild(uploadDiv)
}
|
package wfe
import (
"context"
"crypto"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/mail"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
"unicode"
"gopkg.in/square/go-jose.v2"
"github.com/jmhodges/clock"
"github.com/letsencrypt/pebble/acme"
"github.com/letsencrypt/pebble/ca"
"github.com/letsencrypt/pebble/core"
"github.com/letsencrypt/pebble/db"
"github.com/letsencrypt/pebble/va"
)
const (
// Note: We deliberately pick endpoint paths that differ from Boulder to
// exercise clients processing of the /directory response
directoryPath = "/dir"
noncePath = "/nonce-plz"
newAccountPath = "/sign-me-up"
acctPath = "/my-account/"
newOrderPath = "/order-plz"
orderPath = "/my-order/"
orderFinalizePath = "/finalize-order/"
authzPath = "/authZ/"
challengePath = "/chalZ/"
certPath = "/certZ/"
// How long do pending authorizations last before expiring?
pendingAuthzExpire = time.Hour
// How many contacts is an account allowed to have?
maxContactsPerAcct = 2
// badNonceEnvVar defines the environment variable name used to provide
// a percentage value for how often good nonces should be rejected as if they
// were bad. This can be used to exercise client nonce handling/retries.
// To have the WFE not reject any good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=0 pebble
// To have the WFE reject 15% of good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=15
badNonceEnvVar = "PEBBLE_WFE_NONCEREJECT"
// By default when no PEBBLE_WFE_NONCEREJECT is set, what percentage of good
// nonces are rejected?
defaultNonceReject = 15
)
type requestEvent struct {
ClientAddr string `json:",omitempty"`
Endpoint string `json:",omitempty"`
Method string `json:",omitempty"`
UserAgent string `json:",omitempty"`
}
type wfeHandlerFunc func(context.Context, *requestEvent, http.ResponseWriter, *http.Request)
func (f wfeHandlerFunc) ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request) {
ctx := context.TODO()
f(ctx, e, w, r)
}
type wfeHandler interface {
ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request)
}
type topHandler struct {
wfe wfeHandler
}
func (th *topHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(@cpu): consider restoring X-Forwarded-For handling for ClientAddr
rEvent := &requestEvent{
ClientAddr: r.RemoteAddr,
Method: r.Method,
UserAgent: r.Header.Get("User-Agent"),
}
th.wfe.ServeHTTP(rEvent, w, r)
}
type WebFrontEndImpl struct {
log *log.Logger
db *db.MemoryStore
nonce *nonceMap
nonceErrPercent int
clk clock.Clock
va *va.VAImpl
ca *ca.CAImpl
}
const ToSURL = "data:text/plain,Do%20what%20thou%20wilt"
func New(
log *log.Logger,
clk clock.Clock,
db *db.MemoryStore,
va *va.VAImpl,
ca *ca.CAImpl) WebFrontEndImpl {
// Read the % of good nonces that should be rejected as bad nonces from the
// environment
nonceErrPercentVal := os.Getenv(badNonceEnvVar)
var nonceErrPercent int
// Parse the env var value as a base 10 int - if there isn't an error, use it
// as the wfe nonceErrPercent
if val, err := strconv.ParseInt(nonceErrPercentVal, 10, 0); err == nil {
nonceErrPercent = int(val)
} else {
// Otherwise just use the default
nonceErrPercent = defaultNonceReject
}
// If the value is out of the range just clip it sensibly
if nonceErrPercent < 0 {
nonceErrPercent = 0
} else if nonceErrPercent > 100 {
nonceErrPercent = 99
}
log.Printf("Configured to reject %d%% of good nonces", nonceErrPercent)
return WebFrontEndImpl{
log: log,
db: db,
nonce: newNonceMap(),
nonceErrPercent: nonceErrPercent,
clk: clk,
va: va,
ca: ca,
}
}
func (wfe *WebFrontEndImpl) HandleFunc(
mux *http.ServeMux,
pattern string,
handler wfeHandlerFunc,
methods ...string) {
methodsMap := make(map[string]bool)
for _, m := range methods {
methodsMap[m] = true
}
if methodsMap["GET"] && !methodsMap["HEAD"] {
// Allow HEAD for any resource that allows GET
methods = append(methods, "HEAD")
methodsMap["HEAD"] = true
}
methodsStr := strings.Join(methods, ", ")
defaultHandler := http.StripPrefix(pattern,
&topHandler{
wfe: wfeHandlerFunc(func(ctx context.Context, logEvent *requestEvent, response http.ResponseWriter, request *http.Request) {
response.Header().Set("Replay-Nonce", wfe.nonce.createNonce())
logEvent.Endpoint = pattern
if request.URL != nil {
logEvent.Endpoint = path.Join(logEvent.Endpoint, request.URL.Path)
}
addNoCacheHeader(response)
if !methodsMap[request.Method] {
response.Header().Set("Allow", methodsStr)
wfe.sendError(acme.MethodNotAllowed(), response)
return
}
wfe.log.Printf("%s %s -> calling handler()\n", request.Method, logEvent.Endpoint)
// TODO(@cpu): Configureable request timeout
timeout := 1 * time.Minute
ctx, cancel := context.WithTimeout(ctx, timeout)
handler(ctx, logEvent, response, request)
cancel()
},
)})
mux.Handle(pattern, defaultHandler)
}
func (wfe *WebFrontEndImpl) sendError(prob *acme.ProblemDetails, response http.ResponseWriter) {
problemDoc, err := marshalIndent(prob)
if err != nil {
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
}
response.Header().Set("Content-Type", "application/problem+json; charset=utf-8")
response.WriteHeader(prob.HTTPStatus)
response.Write(problemDoc)
}
func (wfe *WebFrontEndImpl) Handler() http.Handler {
m := http.NewServeMux()
wfe.HandleFunc(m, directoryPath, wfe.Directory, "GET")
// Note for noncePath: "GET" also implies "HEAD"
wfe.HandleFunc(m, noncePath, wfe.Nonce, "GET")
wfe.HandleFunc(m, newAccountPath, wfe.NewAccount, "POST")
wfe.HandleFunc(m, newOrderPath, wfe.NewOrder, "POST")
wfe.HandleFunc(m, orderPath, wfe.Order, "GET")
wfe.HandleFunc(m, orderFinalizePath, wfe.FinalizeOrder, "POST")
wfe.HandleFunc(m, authzPath, wfe.Authz, "GET")
wfe.HandleFunc(m, challengePath, wfe.Challenge, "GET", "POST")
wfe.HandleFunc(m, certPath, wfe.Certificate, "GET")
// TODO(@cpu): Handle POST to acctPath for existing account updates
return m
}
func (wfe *WebFrontEndImpl) Directory(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
directoryEndpoints := map[string]string{
"newNonce": noncePath,
"newAccount": newAccountPath,
"newOrder": newOrderPath,
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
relDir, err := wfe.relativeDirectory(request, directoryEndpoints)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("unable to create directory"), response)
return
}
response.Write(relDir)
}
func (wfe *WebFrontEndImpl) relativeDirectory(request *http.Request, directory map[string]string) ([]byte, error) {
// Create an empty map sized equal to the provided directory to store the
// relative-ized result
relativeDir := make(map[string]interface{}, len(directory))
for k, v := range directory {
relativeDir[k] = wfe.relativeEndpoint(request, v)
}
relativeDir["meta"] = map[string]string{
"termsOfService": ToSURL,
}
directoryJSON, err := marshalIndent(relativeDir)
// This should never happen since we are just marshalling known strings
if err != nil {
return nil, err
}
return directoryJSON, nil
}
func (wfe *WebFrontEndImpl) relativeEndpoint(request *http.Request, endpoint string) string {
proto := "http"
host := request.Host
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
// Default to "localhost" when no request.Host is provided. Otherwise requests
// with an empty `Host` produce results like `http:///acme/new-authz`
if request.Host == "" {
host = "localhost"
}
resultUrl := url.URL{Scheme: proto, Host: host, Path: endpoint}
return resultUrl.String()
}
func (wfe *WebFrontEndImpl) Nonce(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
response.WriteHeader(http.StatusNoContent)
}
/*
* keyToID produces a string with the hex representation of the SHA256 digest
* over a provided public key. We use this for acme.Account ID values
* because it makes looking up a account by key easy (required by the spec
* for retreiving existing account), and becauase it makes the reg URLs
* somewhat human digestable/comparable.
*/
func keyToID(key crypto.PublicKey) (string, error) {
switch t := key.(type) {
case *jose.JSONWebKey:
if t == nil {
return "", fmt.Errorf("Cannot compute ID of nil key")
}
return keyToID(t.Key)
case jose.JSONWebKey:
return keyToID(t.Key)
default:
keyDER, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
return "", err
}
spkiDigest := sha256.Sum256(keyDER)
return hex.EncodeToString(spkiDigest[:]), nil
}
}
func (wfe *WebFrontEndImpl) parseJWS(body string) (*jose.JSONWebSignature, error) {
// Parse the raw JWS JSON to check that:
// * the unprotected Header field is not being used.
// * the "signatures" member isn't present, just "signature".
//
// This must be done prior to `jose.parseSigned` since it will strip away
// these headers.
var unprotected struct {
Header map[string]string
Signatures []interface{}
}
if err := json.Unmarshal([]byte(body), &unprotected); err != nil {
return nil, errors.New("Parse error reading JWS")
}
// ACME v2 never uses values from the unprotected JWS header. Reject JWS that
// include unprotected headers.
if unprotected.Header != nil {
return nil, errors.New(
"JWS \"header\" field not allowed. All headers must be in \"protected\" field")
}
// ACME v2 never uses the "signatures" array of JSON serialized JWS, just the
// mandatory "signature" field. Reject JWS that include the "signatures" array.
if len(unprotected.Signatures) > 0 {
return nil, errors.New(
"JWS \"signatures\" field not allowed. Only the \"signature\" field should contain a signature")
}
parsedJWS, err := jose.ParseSigned(body)
if err != nil {
return nil, errors.New("Parse error reading JWS")
}
if len(parsedJWS.Signatures) > 1 {
return nil, errors.New("Too many signatures in POST body")
}
if len(parsedJWS.Signatures) == 0 {
return nil, errors.New("POST JWS not signed")
}
return parsedJWS, nil
}
// extractJWK returns a JSONWebKey embedded in a JWS header.
func (wfe *WebFrontEndImpl) extractJWK(_ *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
key := header.JSONWebKey
if key == nil {
return nil, acme.MalformedProblem("No JWK in JWS header")
}
if !key.Valid() {
return nil, acme.MalformedProblem("Invalid JWK in JWS header")
}
if header.KeyID != "" {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return key, nil
}
// lookupJWK returns a JSONWebKey referenced by the "kid" (key id) field in a JWS header.
func (wfe *WebFrontEndImpl) lookupJWK(request *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
accountURL := header.KeyID
prefix := wfe.relativeEndpoint(request, acctPath)
accountID := strings.TrimPrefix(accountURL, prefix)
if accountID == "" {
return nil, acme.MalformedProblem("No key ID (kid) in JWS header")
}
account := wfe.db.GetAccountByID(accountID)
if account == nil {
return nil, acme.AccountDoesNotExistProblem(fmt.Sprintf(
"Account %s not found.", accountURL))
}
if header.JSONWebKey != nil {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return account.Key, nil
}
// keyExtractor is a function that returns a JSONWebKey based on input from a
// user-provided JSONWebSignature, for instance by extracting it from the input,
// or by looking it up in a database based on the input.
type keyExtractor func(*http.Request, *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails)
// NOTE: Unlike `verifyPOST` from the Boulder WFE this version does not
// presently handle the `regCheck` parameter or do any lookups for existing
// accounts.
func (wfe *WebFrontEndImpl) verifyPOST(
ctx context.Context,
logEvent *requestEvent,
request *http.Request,
kx keyExtractor) ([]byte, *jose.JSONWebKey, *acme.ProblemDetails) {
if _, present := request.Header["Content-Length"]; !present {
return nil, nil, acme.MalformedProblem("missing Content-Length header on POST")
}
// Per 6.4.1 "Replay-Nonce" clients should not send a Replay-Nonce header in
// the HTTP request, it needs to be part of the signed JWS request body
if _, present := request.Header["Replay-Nonce"]; present {
return nil, nil, acme.MalformedProblem("HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field")
}
if request.Body == nil {
return nil, nil, acme.MalformedProblem("no body on POST")
}
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return nil, nil, acme.InternalErrorProblem("unable to read request body")
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
return nil, nil, acme.MalformedProblem(err.Error())
}
pubKey, prob := kx(request, parsedJWS)
if prob != nil {
return nil, nil, prob
}
// TODO(@cpu): `checkAlgorithm()`
payload, err := parsedJWS.Verify(pubKey)
if err != nil {
return nil, nil, acme.MalformedProblem("JWS verification error")
}
nonce := parsedJWS.Signatures[0].Header.Nonce
if len(nonce) == 0 {
return nil, nil, acme.BadNonceProblem("JWS has no anti-replay nonce")
}
// Roll a random number between 0 and 100.
nonceRoll := rand.Intn(100)
// If the nonce is not valid OR if the nonceRoll was less than the
// nonceErrPercent, fail with an error
if !wfe.nonce.validNonce(nonce) || nonceRoll < wfe.nonceErrPercent {
return nil, nil, acme.BadNonceProblem(fmt.Sprintf(
"JWS has an invalid anti-replay nonce: %s", nonce))
}
headerURL, ok := parsedJWS.Signatures[0].Header.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(headerURL) == 0 {
return nil, nil, acme.MalformedProblem("JWS header parameter 'url' required.")
}
expectedURL := url.URL{
// NOTE(@cpu): ACME **REQUIRES** HTTPS and Pebble is hardcoded to offer the
// API over HTTPS.
Scheme: "https",
Host: request.Host,
Path: request.RequestURI,
}
if expectedURL.String() != headerURL {
return nil, nil, acme.MalformedProblem(fmt.Sprintf(
"JWS header parameter 'url' incorrect. Expected %q, got %q",
expectedURL.String(), headerURL))
}
return []byte(payload), pubKey, nil
}
// isASCII determines if every character in a string is encoded in
// the ASCII character set.
func isASCII(str string) bool {
for _, r := range str {
if r > unicode.MaxASCII {
return false
}
}
return true
}
func (wfe *WebFrontEndImpl) verifyContacts(acct acme.Account) *acme.ProblemDetails {
contacts := acct.Contact
// Providing no Contacts is perfectly acceptable
if contacts == nil || len(contacts) == 0 {
return nil
}
if len(contacts) > maxContactsPerAcct {
return acme.MalformedProblem(fmt.Sprintf(
"too many contacts provided: %d > %d", len(contacts), maxContactsPerAcct))
}
for _, c := range contacts {
parsed, err := url.Parse(c)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf("contact %q is invalid", c))
}
if parsed.Scheme != "mailto" {
return acme.UnsupportedContactProblem(fmt.Sprintf(
"contact method %q is not supported", parsed.Scheme))
}
email := parsed.Opaque
// An empty or ommitted Contact array should be used instead of an empty contact
if email == "" {
return acme.InvalidContactProblem("empty contact email")
}
if !isASCII(email) {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q contains non-ASCII characters", email))
}
// NOTE(@cpu): ParseAddress may allow invalid emails since it supports RFC 5322
// display names. This is sufficient for Pebble because we don't intend to
// use the emails for anything and check this as a best effort for client
// developers to test invalid contact problems.
_, err = mail.ParseAddress(email)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q is invalid", email))
}
}
return nil
}
func (wfe *WebFrontEndImpl) NewAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// We use extractJWK rather than lookupJWK here because the account is not yet
// created, so the user provides the full key in a JWS header rather than
// referring to an existing key.
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.extractJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// newAcctReq is the ACME account information submitted by the client
var newAcctReq struct {
Contact []string `json:"contact"`
ToSAgreed bool `json:"termsOfServiceAgreed"`
OnlyReturnExisting bool `json:"onlyReturnExisting"`
}
err := json.Unmarshal(body, &newAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
if newAcctReq.ToSAgreed == false {
response.Header().Add("Link", link(ToSURL, "terms-of-service"))
wfe.sendError(
acme.AgreementRequiredProblem(
"Provided account did not agree to the terms of service"),
response)
return
}
// Create a new account object with the provided contact
newAcct := core.Account{
Account: acme.Account{
Contact: newAcctReq.Contact,
// New accounts are valid to start.
Status: acme.StatusValid,
},
Key: key,
}
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
keyID, err := keyToID(key)
if err != nil {
wfe.sendError(acme.MalformedProblem(err.Error()), response)
return
}
newAcct.ID = keyID
// NOTE: We don't use wfe.getAccountByKey here because we want to treat a
// "missing" account as a non-error
existingAcct := wfe.db.GetAccountByID(newAcct.ID)
if existingAcct != nil {
// If there is an existing account then return a Location header pointing to
// the account and a 200 OK response
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAcct.ID))
response.Header().Set("Location", acctURL)
_ = wfe.writeJsonResponse(response, http.StatusOK, nil)
return
} else if existingAcct == nil && newAcctReq.OnlyReturnExisting {
// If there *isn't* an existing account and the created account request
// contained OnlyReturnExisting then this is an error - return now before
// creating a new account with the key
wfe.sendError(acme.AccountDoesNotExistProblem(
"unable to find existing account for only-return-existing request"), response)
return
}
count, err := wfe.db.AddAccount(&newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error saving account"), response)
return
}
wfe.log.Printf("There are now %d accounts in memory\n", count)
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, newAcct.ID))
response.Header().Add("Location", acctURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
func (wfe *WebFrontEndImpl) verifyOrder(order *core.Order, reg *core.Account) *acme.ProblemDetails {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Shouldn't happen - defensive check
if order == nil {
return acme.InternalErrorProblem("Order is nil")
}
if reg == nil {
return acme.InternalErrorProblem("Account is nil")
}
idents := order.Identifiers
if len(idents) == 0 {
return acme.MalformedProblem("Order did not specify any identifiers")
}
// Check that all of the identifiers in the new-order are DNS type
for _, ident := range idents {
if ident.Type != acme.IdentifierDNS {
return acme.MalformedProblem(fmt.Sprintf(
"Order included non-DNS type identifier: type %q, value %q",
ident.Type, ident.Value))
}
// TODO(@cpu): We _very lightly_ validate the DNS identifiers in an order
// compared to Boulder's full-fledged policy authority. We should consider
// porting more of this logic to Pebble to let ACME clients test error
// handling for policy rejection errors.
rawDomain := ident.Value
// If there is a wildcard character in the ident value there should be only
// *one* instance
if strings.Count(rawDomain, "*") > 1 {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"too many wildcards %q",
rawDomain))
} else if strings.Count(rawDomain, "*") == 1 {
// If there is one wildcard character it should be the only character in
// the leftmost label.
if !strings.HasPrefix(rawDomain, "*.") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"wildcard isn't leftmost prefix %q",
rawDomain))
}
}
}
return nil
}
// makeAuthorizations populates an order with new authz's. The request parameter
// is required to make the authz URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeAuthorizations(order *core.Order, request *http.Request) error {
var auths []string
var authObs []*core.Authorization
// Lock the order for reading
order.RLock()
// Create one authz for each name in the order's parsed CSR
for _, name := range order.Names {
now := wfe.clk.Now().UTC()
expires := now.Add(pendingAuthzExpire)
ident := acme.Identifier{
Type: acme.IdentifierDNS,
Value: name,
}
authz := &core.Authorization{
ID: newToken(),
ExpiresDate: expires,
Order: order,
Authorization: acme.Authorization{
Status: acme.StatusPending,
Identifier: ident,
Expires: expires.UTC().Format(time.RFC3339),
},
}
authz.URL = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
// Create the challenges for this authz
err := wfe.makeChallenges(authz, request)
if err != nil {
return err
}
// Save the authorization in memory
count, err := wfe.db.AddAuthorization(authz)
if err != nil {
return err
}
wfe.log.Printf("There are now %d authorizations in the db\n", count)
authzURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
auths = append(auths, authzURL)
authObs = append(authObs, authz)
}
// Unlock the order from reading
order.RUnlock()
// Lock the order for writing & update the order's authorizations
order.Lock()
order.Authorizations = auths
order.AuthorizationObjects = authObs
order.Unlock()
return nil
}
func (wfe *WebFrontEndImpl) makeChallenge(
chalType string,
authz *core.Authorization,
request *http.Request) (*core.Challenge, error) {
// Create a new challenge of the requested type
id := newToken()
chal := &core.Challenge{
ID: id,
Challenge: acme.Challenge{
Type: chalType,
Token: newToken(),
URL: wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", challengePath, id)),
Status: acme.StatusPending,
},
Authz: authz,
}
// Add it to the in-memory database
_, err := wfe.db.AddChallenge(chal)
if err != nil {
return nil, err
}
return chal, nil
}
// makeChallenges populates an authz with new challenges. The request parameter
// is required to make the challenge URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeChallenges(authz *core.Authorization, request *http.Request) error {
var chals []*core.Challenge
// Authorizations for a wildcard identifier only get a DNS-01 challenges to
// match Boulder/Let's Encrypt wildcard issuance policy
if strings.HasPrefix(authz.Identifier.Value, "*.") {
chal, err := wfe.makeChallenge(acme.ChallengeDNS01, authz, request)
if err != nil {
return err
}
chals = []*core.Challenge{chal}
} else {
// Non-wildcard authorizations get all of the enabled challenge types
enabledChallenges := []string{acme.ChallengeHTTP01, acme.ChallengeTLSSNI02, acme.ChallengeDNS01}
for _, chalType := range enabledChallenges {
chal, err := wfe.makeChallenge(chalType, authz, request)
if err != nil {
return err
}
chals = append(chals, chal)
}
}
// Lock the authorization for writing to update the challenges
authz.Lock()
authz.Challenges = nil
for _, c := range chals {
authz.Challenges = append(authz.Challenges, &c.Challenge)
}
authz.Unlock()
return nil
}
// NewOrder creates a new Order request and populates its authorizations
func (wfe *WebFrontEndImpl) NewOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingReg, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Unpack the order request body
var newOrder acme.Order
err := json.Unmarshal(body, &newOrder)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON: "+err.Error()), response)
return
}
expires := time.Now().AddDate(0, 0, 1)
order := &core.Order{
ID: newToken(),
AccountID: existingReg.ID,
Order: acme.Order{
Status: acme.StatusPending,
Expires: expires.UTC().Format(time.RFC3339),
// Only the Identifiers, NotBefore and NotAfter from the submitted order
// are carried forward
Identifiers: newOrder.Identifiers,
NotBefore: newOrder.NotBefore,
NotAfter: newOrder.NotAfter,
},
ExpiresDate: expires,
}
// Verify the details of the order before creating authorizations
if err := wfe.verifyOrder(order, existingReg); err != nil {
wfe.sendError(err, response)
return
}
// Collect all of the DNS identifier values up into a []string
var orderNames []string
for _, ident := range order.Identifiers {
orderNames = append(orderNames, ident.Value)
}
// Store the unique lower version of the names on the order object
order.Names = uniqueLowerNames(orderNames)
// Create the authorizations for the order
err = wfe.makeAuthorizations(order, request)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error creating authorizations for order"), response)
return
}
// Add the order to the in-memory DB
count, err := wfe.db.AddOrder(order)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error saving order"), response)
return
}
wfe.log.Printf("Added order %q to the db\n", order.ID)
wfe.log.Printf("There are now %d orders in the db\n", count)
// Populate a finalization URL for this order
order.Finalize = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, order.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, order.Order)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// orderForDisplay preps a *core.Order for display by populating some fields
// based on the http.request provided and returning a *acme.Order ready to be
// rendered to JSON for display to an API client.
func (wfe *WebFrontEndImpl) orderForDisplay(
order *core.Order,
request *http.Request) acme.Order {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Populate a finalization URL for this order
order.Finalize = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
// If the order has a cert ID then set the certificate URL by constructing
// a relative path based on the HTTP request & the cert ID
if order.CertificateObject != nil {
order.Certificate = wfe.relativeEndpoint(
request,
certPath+order.CertificateObject.ID)
}
// Return only the initial OrderRequest not the internal object
return order.Order
}
// Order retrieves the details of an existing order
func (wfe *WebFrontEndImpl) Order(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
orderID := strings.TrimPrefix(request.URL.Path, orderPath)
order := wfe.db.GetOrderByID(orderID)
if order == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(order, request)
err := wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) FinalizeOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Verify the POST request
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the account corresponding to the key that authenticated the POST request
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the order specified by the order ID
orderID := strings.TrimPrefix(request.URL.Path, orderFinalizePath)
existingOrder := wfe.db.GetOrderByID(orderID)
if existingOrder == nil {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// Lock the order for reading the properties we need to check
existingOrder.RLock()
orderAccountID := existingOrder.AccountID
orderStatus := existingOrder.Status
orderExpires := existingOrder.ExpiresDate
orderNames := existingOrder.Names
// And then immediately unlock it again - we don't defer() here because
// `maybeIssue` will also acquire a read lock and we call that before
// returning
existingOrder.RUnlock()
// If the order doesn't belong to the account that authenticted the POST
// request then pretend it doesn't exist.
if orderAccountID != existingAcct.ID {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// The existing order must be in a pending status to finalize it
if orderStatus != acme.StatusPending {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Order's status (%q) was not pending", orderStatus)), response)
return
}
// The existing order must not be expired
if orderExpires.Before(wfe.clk.Now()) {
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"Order %q expired %s", orderID, orderExpires)), response)
return
}
// The finalize POST body is expected to be the bytes from a base64 raw url
// encoded CSR
var finalizeMessage struct {
CSR string
}
err := json.Unmarshal(body, &finalizeMessage)
if err != nil {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Error unmarshaling finalize order request body: %s", err.Error())), response)
return
}
csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeMessage.CSR)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error decoding Base64url-encoded CSR: "+err.Error()), response)
return
}
parsedCSR, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error parsing Base64url-encoded CSR: "+err.Error()), response)
return
}
// Check that the CSR has the same number of names as the initial order contained
csrNames := uniqueLowerNames(parsedCSR.DNSNames)
if len(csrNames) != len(orderNames) {
wfe.sendError(acme.UnauthorizedProblem(
"Order includes different number of names than CSR specifieds"), response)
return
}
// Check that the CSR's names match the order names exactly
for i, name := range orderNames {
if name != csrNames[i] {
wfe.sendError(acme.UnauthorizedProblem(
fmt.Sprintf("CSR is missing Order domain %q", name)), response)
return
}
}
// Lock and update the order with the parsed CSR.
existingOrder.Lock()
existingOrder.ParsedCSR = parsedCSR
existingOrder.Unlock()
// Check whether the order is ready to issue, if it isn't, return a problem
prob = wfe.maybeIssue(existingOrder)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(existingOrder, request)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, existingOrder.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) maybeIssue(order *core.Order) *acme.ProblemDetails {
// Lock the order for reading to check whether all authorizations are valid
order.RLock()
authzs := order.AuthorizationObjects
orderID := order.ID
order.RUnlock()
for _, authz := range authzs {
// Lock the authorization for reading to check its status
authz.RLock()
authzStatus := authz.Status
authzExpires := authz.ExpiresDate
ident := authz.Identifier
authz.RUnlock()
// If any of the authorizations are invalid the order isn't ready to issue
if authzStatus != acme.StatusValid {
return acme.UnauthorizedProblem(fmt.Sprintf(
"Authorization for %q is not status valid", ident.Value))
}
// If any of the authorizations are expired the order isn't ready to issue
if authzExpires.Before(wfe.clk.Now()) {
return acme.UnauthorizedProblem(fmt.Sprintf(
"Authorization for %q expired %q", ident.Value, authzExpires))
}
}
// All the authorizations are valid, ask the CA to complete the order in
// a separate goroutine. CompleteOrder will transition the order status to
// pending.
wfe.log.Printf("Order %s is fully authorized. Processing finalization", orderID)
go wfe.ca.CompleteOrder(order)
return nil
}
// prepAuthorizationForDisplay prepares the provided acme.Authorization for
// display to an ACME client.
func prepAuthorizationForDisplay(authz acme.Authorization) *acme.Authorization {
identVal := authz.Identifier.Value
// If the authorization identifier has a wildcard in the value, remove it and
// set the Wildcard field to true
if strings.HasPrefix(identVal, "*.") {
authz.Identifier.Value = strings.TrimPrefix(identVal, "*.")
authz.Wildcard = true
}
return &authz
}
func (wfe *WebFrontEndImpl) Authz(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
authzID := strings.TrimPrefix(request.URL.Path, authzPath)
authz := wfe.db.GetAuthorizationByID(authzID)
if authz == nil {
response.WriteHeader(http.StatusNotFound)
return
}
err := wfe.writeJsonResponse(
response,
http.StatusOK,
prepAuthorizationForDisplay(authz.Authorization))
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling authz"), response)
return
}
}
func (wfe *WebFrontEndImpl) Challenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
if request.Method == "POST" {
wfe.updateChallenge(ctx, logEvent, response, request)
return
}
wfe.getChallenge(ctx, logEvent, response, request)
}
func (wfe *WebFrontEndImpl) getChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
chal := wfe.db.GetChallengeByID(chalID)
if chal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Lock the challenge for reading in order to write the response
chal.RLock()
defer chal.RUnlock()
err := wfe.writeJsonResponse(response, http.StatusOK, chal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
// getAcctByKey finds a account by key or returns a problem pointer if an
// existing account can't be found or the key is invalid.
func (wfe *WebFrontEndImpl) getAcctByKey(key crypto.PublicKey) (*core.Account, *acme.ProblemDetails) {
// Compute the account ID for the signer's key
regID, err := keyToID(key)
if err != nil {
wfe.log.Printf("keyToID err: %s\n", err.Error())
return nil, acme.MalformedProblem("Error computing key digest")
}
// Find the existing account object for that key ID
var existingAcct *core.Account
if existingAcct = wfe.db.GetAccountByID(regID); existingAcct == nil {
return nil, acme.AccountDoesNotExistProblem(
"URL in JWS 'kid' field does not correspond to an account")
}
return existingAcct, nil
}
func (wfe *WebFrontEndImpl) validateChallengeUpdate(
chal *core.Challenge,
update *acme.Challenge,
acct *core.Account) (*core.Authorization, *acme.ProblemDetails) {
// Lock the challenge for reading to do validation
chal.RLock()
defer chal.RUnlock()
// Check that the existing challenge is Pending
if chal.Status != acme.StatusPending {
return nil, acme.MalformedProblem(
fmt.Sprintf("Cannot update challenge with status %s, only status %s",
chal.Status, acme.StatusPending))
}
// Calculate the expected key authorization for the owning account's key
expectedKeyAuth := chal.ExpectedKeyAuthorization(acct.Key)
// Validate the expected key auth matches the provided key auth
if expectedKeyAuth != update.KeyAuthorization {
return nil, acme.MalformedProblem(
fmt.Sprintf("Incorrect key authorization: %q",
update.KeyAuthorization))
}
return chal.Authz, nil
}
// validateAuthzForChallenge checks an authz is:
// 1) for a supported identifier type
// 2) not expired
// 3) associated to an order
// The associated order is returned when no problems are found to avoid needing
// another RLock() for the caller to get the order pointer later.
func (wfe *WebFrontEndImpl) validateAuthzForChallenge(authz *core.Authorization) (*core.Order, *acme.ProblemDetails) {
// Lock the authz for reading
authz.RLock()
defer authz.RUnlock()
ident := authz.Identifier
if ident.Type != acme.IdentifierDNS {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization identifier was type %s, only %s is supported",
ident.Type, acme.IdentifierDNS))
}
now := wfe.clk.Now()
if now.After(authz.ExpiresDate) {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization expired %s",
authz.ExpiresDate.Format(time.RFC3339)))
}
existingOrder := authz.Order
if existingOrder == nil {
return nil, acme.InternalErrorProblem("authz missing associated order")
}
return existingOrder, nil
}
func (wfe *WebFrontEndImpl) updateChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
var chalResp acme.Challenge
err := json.Unmarshal(body, &chalResp)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
existingChal := wfe.db.GetChallengeByID(chalID)
if existingChal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
authz, prob := wfe.validateChallengeUpdate(existingChal, &chalResp, existingAcct)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz == nil {
wfe.sendError(
acme.InternalErrorProblem("challenge missing associated authz"), response)
return
}
existingOrder, prob := wfe.validateAuthzForChallenge(authz)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Lock the order for reading to check the expiry date
existingOrder.RLock()
now := wfe.clk.Now()
if now.After(existingOrder.ExpiresDate) {
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf("order expired %s %s",
existingOrder.ExpiresDate.Format(time.RFC3339))), response)
return
}
existingOrder.RUnlock()
// Lock the authorization to get the identifier value
authz.RLock()
ident := authz.Identifier.Value
authz.RUnlock()
// If the identifier value is for a wildcard domain then strip the wildcard
// prefix before dispatching the validation to ensure the base domain is
// validated.
if strings.HasPrefix(ident, "*.") {
ident = strings.TrimPrefix(ident, "*.")
}
// Submit a validation job to the VA, this will be processed asynchronously
wfe.va.ValidateChallenge(ident, existingChal, existingAcct)
// Lock the challenge for reading in order to write the response
existingChal.RLock()
defer existingChal.RUnlock()
response.Header().Add("Link", link(existingChal.Authz.URL, "up"))
err = wfe.writeJsonResponse(response, http.StatusOK, existingChal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
func (wfe *WebFrontEndImpl) Certificate(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
serial := strings.TrimPrefix(request.URL.Path, certPath)
cert := wfe.db.GetCertificateByID(serial)
if cert == nil {
response.WriteHeader(http.StatusNotFound)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(cert.Chain())
}
func (wfe *WebFrontEndImpl) writeJsonResponse(response http.ResponseWriter, status int, v interface{}) error {
jsonReply, err := marshalIndent(v)
if err != nil {
return err // All callers are responsible for handling this error
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
response.WriteHeader(status)
// Don't worry about returning an error from Write() because the caller will
// never handle it.
_, _ = response.Write(jsonReply)
return nil
}
func addNoCacheHeader(response http.ResponseWriter) {
response.Header().Add("Cache-Control", "public, max-age=0, no-cache")
}
func marshalIndent(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
func link(url, relation string) string {
return fmt.Sprintf("<%s>;rel=\"%s\"", url, relation)
}
// uniqueLowerNames returns the set of all unique names in the input after all
// of them are lowercased. The returned names will be in their lowercased form
// and sorted alphabetically. See Boulder `core/util.go UniqueLowerNames`.
func uniqueLowerNames(names []string) []string {
nameMap := make(map[string]int, len(names))
for _, name := range names {
nameMap[strings.ToLower(name)] = 1
}
unique := make([]string, 0, len(nameMap))
for name := range nameMap {
unique = append(unique, name)
}
sort.Strings(unique)
return unique
}
Reorganise NewAccount to exit early if account already exists (#93)
Previously if the request body were nil, a ToS not accepted error would be returned. This changes the behaviour to return 200 regardless of the request body if an account already exists.
package wfe
import (
"context"
"crypto"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/mail"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
"unicode"
"gopkg.in/square/go-jose.v2"
"github.com/jmhodges/clock"
"github.com/letsencrypt/pebble/acme"
"github.com/letsencrypt/pebble/ca"
"github.com/letsencrypt/pebble/core"
"github.com/letsencrypt/pebble/db"
"github.com/letsencrypt/pebble/va"
)
const (
// Note: We deliberately pick endpoint paths that differ from Boulder to
// exercise clients processing of the /directory response
directoryPath = "/dir"
noncePath = "/nonce-plz"
newAccountPath = "/sign-me-up"
acctPath = "/my-account/"
newOrderPath = "/order-plz"
orderPath = "/my-order/"
orderFinalizePath = "/finalize-order/"
authzPath = "/authZ/"
challengePath = "/chalZ/"
certPath = "/certZ/"
// How long do pending authorizations last before expiring?
pendingAuthzExpire = time.Hour
// How many contacts is an account allowed to have?
maxContactsPerAcct = 2
// badNonceEnvVar defines the environment variable name used to provide
// a percentage value for how often good nonces should be rejected as if they
// were bad. This can be used to exercise client nonce handling/retries.
// To have the WFE not reject any good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=0 pebble
// To have the WFE reject 15% of good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=15
badNonceEnvVar = "PEBBLE_WFE_NONCEREJECT"
// By default when no PEBBLE_WFE_NONCEREJECT is set, what percentage of good
// nonces are rejected?
defaultNonceReject = 15
)
type requestEvent struct {
ClientAddr string `json:",omitempty"`
Endpoint string `json:",omitempty"`
Method string `json:",omitempty"`
UserAgent string `json:",omitempty"`
}
type wfeHandlerFunc func(context.Context, *requestEvent, http.ResponseWriter, *http.Request)
func (f wfeHandlerFunc) ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request) {
ctx := context.TODO()
f(ctx, e, w, r)
}
type wfeHandler interface {
ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request)
}
type topHandler struct {
wfe wfeHandler
}
func (th *topHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(@cpu): consider restoring X-Forwarded-For handling for ClientAddr
rEvent := &requestEvent{
ClientAddr: r.RemoteAddr,
Method: r.Method,
UserAgent: r.Header.Get("User-Agent"),
}
th.wfe.ServeHTTP(rEvent, w, r)
}
type WebFrontEndImpl struct {
log *log.Logger
db *db.MemoryStore
nonce *nonceMap
nonceErrPercent int
clk clock.Clock
va *va.VAImpl
ca *ca.CAImpl
}
const ToSURL = "data:text/plain,Do%20what%20thou%20wilt"
func New(
log *log.Logger,
clk clock.Clock,
db *db.MemoryStore,
va *va.VAImpl,
ca *ca.CAImpl) WebFrontEndImpl {
// Read the % of good nonces that should be rejected as bad nonces from the
// environment
nonceErrPercentVal := os.Getenv(badNonceEnvVar)
var nonceErrPercent int
// Parse the env var value as a base 10 int - if there isn't an error, use it
// as the wfe nonceErrPercent
if val, err := strconv.ParseInt(nonceErrPercentVal, 10, 0); err == nil {
nonceErrPercent = int(val)
} else {
// Otherwise just use the default
nonceErrPercent = defaultNonceReject
}
// If the value is out of the range just clip it sensibly
if nonceErrPercent < 0 {
nonceErrPercent = 0
} else if nonceErrPercent > 100 {
nonceErrPercent = 99
}
log.Printf("Configured to reject %d%% of good nonces", nonceErrPercent)
return WebFrontEndImpl{
log: log,
db: db,
nonce: newNonceMap(),
nonceErrPercent: nonceErrPercent,
clk: clk,
va: va,
ca: ca,
}
}
func (wfe *WebFrontEndImpl) HandleFunc(
mux *http.ServeMux,
pattern string,
handler wfeHandlerFunc,
methods ...string) {
methodsMap := make(map[string]bool)
for _, m := range methods {
methodsMap[m] = true
}
if methodsMap["GET"] && !methodsMap["HEAD"] {
// Allow HEAD for any resource that allows GET
methods = append(methods, "HEAD")
methodsMap["HEAD"] = true
}
methodsStr := strings.Join(methods, ", ")
defaultHandler := http.StripPrefix(pattern,
&topHandler{
wfe: wfeHandlerFunc(func(ctx context.Context, logEvent *requestEvent, response http.ResponseWriter, request *http.Request) {
response.Header().Set("Replay-Nonce", wfe.nonce.createNonce())
logEvent.Endpoint = pattern
if request.URL != nil {
logEvent.Endpoint = path.Join(logEvent.Endpoint, request.URL.Path)
}
addNoCacheHeader(response)
if !methodsMap[request.Method] {
response.Header().Set("Allow", methodsStr)
wfe.sendError(acme.MethodNotAllowed(), response)
return
}
wfe.log.Printf("%s %s -> calling handler()\n", request.Method, logEvent.Endpoint)
// TODO(@cpu): Configureable request timeout
timeout := 1 * time.Minute
ctx, cancel := context.WithTimeout(ctx, timeout)
handler(ctx, logEvent, response, request)
cancel()
},
)})
mux.Handle(pattern, defaultHandler)
}
func (wfe *WebFrontEndImpl) sendError(prob *acme.ProblemDetails, response http.ResponseWriter) {
problemDoc, err := marshalIndent(prob)
if err != nil {
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
}
response.Header().Set("Content-Type", "application/problem+json; charset=utf-8")
response.WriteHeader(prob.HTTPStatus)
response.Write(problemDoc)
}
func (wfe *WebFrontEndImpl) Handler() http.Handler {
m := http.NewServeMux()
wfe.HandleFunc(m, directoryPath, wfe.Directory, "GET")
// Note for noncePath: "GET" also implies "HEAD"
wfe.HandleFunc(m, noncePath, wfe.Nonce, "GET")
wfe.HandleFunc(m, newAccountPath, wfe.NewAccount, "POST")
wfe.HandleFunc(m, newOrderPath, wfe.NewOrder, "POST")
wfe.HandleFunc(m, orderPath, wfe.Order, "GET")
wfe.HandleFunc(m, orderFinalizePath, wfe.FinalizeOrder, "POST")
wfe.HandleFunc(m, authzPath, wfe.Authz, "GET")
wfe.HandleFunc(m, challengePath, wfe.Challenge, "GET", "POST")
wfe.HandleFunc(m, certPath, wfe.Certificate, "GET")
// TODO(@cpu): Handle POST to acctPath for existing account updates
return m
}
func (wfe *WebFrontEndImpl) Directory(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
directoryEndpoints := map[string]string{
"newNonce": noncePath,
"newAccount": newAccountPath,
"newOrder": newOrderPath,
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
relDir, err := wfe.relativeDirectory(request, directoryEndpoints)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("unable to create directory"), response)
return
}
response.Write(relDir)
}
func (wfe *WebFrontEndImpl) relativeDirectory(request *http.Request, directory map[string]string) ([]byte, error) {
// Create an empty map sized equal to the provided directory to store the
// relative-ized result
relativeDir := make(map[string]interface{}, len(directory))
for k, v := range directory {
relativeDir[k] = wfe.relativeEndpoint(request, v)
}
relativeDir["meta"] = map[string]string{
"termsOfService": ToSURL,
}
directoryJSON, err := marshalIndent(relativeDir)
// This should never happen since we are just marshalling known strings
if err != nil {
return nil, err
}
return directoryJSON, nil
}
func (wfe *WebFrontEndImpl) relativeEndpoint(request *http.Request, endpoint string) string {
proto := "http"
host := request.Host
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
// Default to "localhost" when no request.Host is provided. Otherwise requests
// with an empty `Host` produce results like `http:///acme/new-authz`
if request.Host == "" {
host = "localhost"
}
resultUrl := url.URL{Scheme: proto, Host: host, Path: endpoint}
return resultUrl.String()
}
func (wfe *WebFrontEndImpl) Nonce(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
response.WriteHeader(http.StatusNoContent)
}
/*
* keyToID produces a string with the hex representation of the SHA256 digest
* over a provided public key. We use this for acme.Account ID values
* because it makes looking up a account by key easy (required by the spec
* for retreiving existing account), and becauase it makes the reg URLs
* somewhat human digestable/comparable.
*/
func keyToID(key crypto.PublicKey) (string, error) {
switch t := key.(type) {
case *jose.JSONWebKey:
if t == nil {
return "", fmt.Errorf("Cannot compute ID of nil key")
}
return keyToID(t.Key)
case jose.JSONWebKey:
return keyToID(t.Key)
default:
keyDER, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
return "", err
}
spkiDigest := sha256.Sum256(keyDER)
return hex.EncodeToString(spkiDigest[:]), nil
}
}
func (wfe *WebFrontEndImpl) parseJWS(body string) (*jose.JSONWebSignature, error) {
// Parse the raw JWS JSON to check that:
// * the unprotected Header field is not being used.
// * the "signatures" member isn't present, just "signature".
//
// This must be done prior to `jose.parseSigned` since it will strip away
// these headers.
var unprotected struct {
Header map[string]string
Signatures []interface{}
}
if err := json.Unmarshal([]byte(body), &unprotected); err != nil {
return nil, errors.New("Parse error reading JWS")
}
// ACME v2 never uses values from the unprotected JWS header. Reject JWS that
// include unprotected headers.
if unprotected.Header != nil {
return nil, errors.New(
"JWS \"header\" field not allowed. All headers must be in \"protected\" field")
}
// ACME v2 never uses the "signatures" array of JSON serialized JWS, just the
// mandatory "signature" field. Reject JWS that include the "signatures" array.
if len(unprotected.Signatures) > 0 {
return nil, errors.New(
"JWS \"signatures\" field not allowed. Only the \"signature\" field should contain a signature")
}
parsedJWS, err := jose.ParseSigned(body)
if err != nil {
return nil, errors.New("Parse error reading JWS")
}
if len(parsedJWS.Signatures) > 1 {
return nil, errors.New("Too many signatures in POST body")
}
if len(parsedJWS.Signatures) == 0 {
return nil, errors.New("POST JWS not signed")
}
return parsedJWS, nil
}
// extractJWK returns a JSONWebKey embedded in a JWS header.
func (wfe *WebFrontEndImpl) extractJWK(_ *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
key := header.JSONWebKey
if key == nil {
return nil, acme.MalformedProblem("No JWK in JWS header")
}
if !key.Valid() {
return nil, acme.MalformedProblem("Invalid JWK in JWS header")
}
if header.KeyID != "" {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return key, nil
}
// lookupJWK returns a JSONWebKey referenced by the "kid" (key id) field in a JWS header.
func (wfe *WebFrontEndImpl) lookupJWK(request *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
accountURL := header.KeyID
prefix := wfe.relativeEndpoint(request, acctPath)
accountID := strings.TrimPrefix(accountURL, prefix)
if accountID == "" {
return nil, acme.MalformedProblem("No key ID (kid) in JWS header")
}
account := wfe.db.GetAccountByID(accountID)
if account == nil {
return nil, acme.AccountDoesNotExistProblem(fmt.Sprintf(
"Account %s not found.", accountURL))
}
if header.JSONWebKey != nil {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return account.Key, nil
}
// keyExtractor is a function that returns a JSONWebKey based on input from a
// user-provided JSONWebSignature, for instance by extracting it from the input,
// or by looking it up in a database based on the input.
type keyExtractor func(*http.Request, *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails)
// NOTE: Unlike `verifyPOST` from the Boulder WFE this version does not
// presently handle the `regCheck` parameter or do any lookups for existing
// accounts.
func (wfe *WebFrontEndImpl) verifyPOST(
ctx context.Context,
logEvent *requestEvent,
request *http.Request,
kx keyExtractor) ([]byte, *jose.JSONWebKey, *acme.ProblemDetails) {
if _, present := request.Header["Content-Length"]; !present {
return nil, nil, acme.MalformedProblem("missing Content-Length header on POST")
}
// Per 6.4.1 "Replay-Nonce" clients should not send a Replay-Nonce header in
// the HTTP request, it needs to be part of the signed JWS request body
if _, present := request.Header["Replay-Nonce"]; present {
return nil, nil, acme.MalformedProblem("HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field")
}
if request.Body == nil {
return nil, nil, acme.MalformedProblem("no body on POST")
}
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return nil, nil, acme.InternalErrorProblem("unable to read request body")
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
return nil, nil, acme.MalformedProblem(err.Error())
}
pubKey, prob := kx(request, parsedJWS)
if prob != nil {
return nil, nil, prob
}
// TODO(@cpu): `checkAlgorithm()`
payload, err := parsedJWS.Verify(pubKey)
if err != nil {
return nil, nil, acme.MalformedProblem("JWS verification error")
}
nonce := parsedJWS.Signatures[0].Header.Nonce
if len(nonce) == 0 {
return nil, nil, acme.BadNonceProblem("JWS has no anti-replay nonce")
}
// Roll a random number between 0 and 100.
nonceRoll := rand.Intn(100)
// If the nonce is not valid OR if the nonceRoll was less than the
// nonceErrPercent, fail with an error
if !wfe.nonce.validNonce(nonce) || nonceRoll < wfe.nonceErrPercent {
return nil, nil, acme.BadNonceProblem(fmt.Sprintf(
"JWS has an invalid anti-replay nonce: %s", nonce))
}
headerURL, ok := parsedJWS.Signatures[0].Header.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(headerURL) == 0 {
return nil, nil, acme.MalformedProblem("JWS header parameter 'url' required.")
}
expectedURL := url.URL{
// NOTE(@cpu): ACME **REQUIRES** HTTPS and Pebble is hardcoded to offer the
// API over HTTPS.
Scheme: "https",
Host: request.Host,
Path: request.RequestURI,
}
if expectedURL.String() != headerURL {
return nil, nil, acme.MalformedProblem(fmt.Sprintf(
"JWS header parameter 'url' incorrect. Expected %q, got %q",
expectedURL.String(), headerURL))
}
return []byte(payload), pubKey, nil
}
// isASCII determines if every character in a string is encoded in
// the ASCII character set.
func isASCII(str string) bool {
for _, r := range str {
if r > unicode.MaxASCII {
return false
}
}
return true
}
func (wfe *WebFrontEndImpl) verifyContacts(acct acme.Account) *acme.ProblemDetails {
contacts := acct.Contact
// Providing no Contacts is perfectly acceptable
if contacts == nil || len(contacts) == 0 {
return nil
}
if len(contacts) > maxContactsPerAcct {
return acme.MalformedProblem(fmt.Sprintf(
"too many contacts provided: %d > %d", len(contacts), maxContactsPerAcct))
}
for _, c := range contacts {
parsed, err := url.Parse(c)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf("contact %q is invalid", c))
}
if parsed.Scheme != "mailto" {
return acme.UnsupportedContactProblem(fmt.Sprintf(
"contact method %q is not supported", parsed.Scheme))
}
email := parsed.Opaque
// An empty or ommitted Contact array should be used instead of an empty contact
if email == "" {
return acme.InvalidContactProblem("empty contact email")
}
if !isASCII(email) {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q contains non-ASCII characters", email))
}
// NOTE(@cpu): ParseAddress may allow invalid emails since it supports RFC 5322
// display names. This is sufficient for Pebble because we don't intend to
// use the emails for anything and check this as a best effort for client
// developers to test invalid contact problems.
_, err = mail.ParseAddress(email)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q is invalid", email))
}
}
return nil
}
func (wfe *WebFrontEndImpl) NewAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// We use extractJWK rather than lookupJWK here because the account is not yet
// created, so the user provides the full key in a JWS header rather than
// referring to an existing key.
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.extractJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// newAcctReq is the ACME account information submitted by the client
var newAcctReq struct {
Contact []string `json:"contact"`
ToSAgreed bool `json:"termsOfServiceAgreed"`
OnlyReturnExisting bool `json:"onlyReturnExisting"`
}
err := json.Unmarshal(body, &newAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
keyID, err := keyToID(key)
if err != nil {
wfe.sendError(acme.MalformedProblem(err.Error()), response)
return
}
// Lookup existing account to exit early if it exists
// NOTE: We don't use wfe.getAccountByKey here because we want to treat a
// "missing" account as a non-error
existingAcct := wfe.db.GetAccountByID(keyID)
if existingAcct != nil {
// If there is an existing account then return a Location header pointing to
// the account and a 200 OK response
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAcct.ID))
response.Header().Set("Location", acctURL)
_ = wfe.writeJsonResponse(response, http.StatusOK, nil)
return
} else if existingAcct == nil && newAcctReq.OnlyReturnExisting {
// If there *isn't* an existing account and the created account request
// contained OnlyReturnExisting then this is an error - return now before
// creating a new account with the key
wfe.sendError(acme.AccountDoesNotExistProblem(
"unable to find existing account for only-return-existing request"), response)
return
}
if newAcctReq.ToSAgreed == false {
response.Header().Add("Link", link(ToSURL, "terms-of-service"))
wfe.sendError(
acme.AgreementRequiredProblem(
"Provided account did not agree to the terms of service"),
response)
return
}
// Create a new account object with the provided contact
newAcct := core.Account{
Account: acme.Account{
Contact: newAcctReq.Contact,
// New accounts are valid to start.
Status: acme.StatusValid,
},
Key: key,
ID: keyID,
}
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
count, err := wfe.db.AddAccount(&newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error saving account"), response)
return
}
wfe.log.Printf("There are now %d accounts in memory\n", count)
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, newAcct.ID))
response.Header().Add("Location", acctURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
func (wfe *WebFrontEndImpl) verifyOrder(order *core.Order, reg *core.Account) *acme.ProblemDetails {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Shouldn't happen - defensive check
if order == nil {
return acme.InternalErrorProblem("Order is nil")
}
if reg == nil {
return acme.InternalErrorProblem("Account is nil")
}
idents := order.Identifiers
if len(idents) == 0 {
return acme.MalformedProblem("Order did not specify any identifiers")
}
// Check that all of the identifiers in the new-order are DNS type
for _, ident := range idents {
if ident.Type != acme.IdentifierDNS {
return acme.MalformedProblem(fmt.Sprintf(
"Order included non-DNS type identifier: type %q, value %q",
ident.Type, ident.Value))
}
// TODO(@cpu): We _very lightly_ validate the DNS identifiers in an order
// compared to Boulder's full-fledged policy authority. We should consider
// porting more of this logic to Pebble to let ACME clients test error
// handling for policy rejection errors.
rawDomain := ident.Value
// If there is a wildcard character in the ident value there should be only
// *one* instance
if strings.Count(rawDomain, "*") > 1 {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"too many wildcards %q",
rawDomain))
} else if strings.Count(rawDomain, "*") == 1 {
// If there is one wildcard character it should be the only character in
// the leftmost label.
if !strings.HasPrefix(rawDomain, "*.") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"wildcard isn't leftmost prefix %q",
rawDomain))
}
}
}
return nil
}
// makeAuthorizations populates an order with new authz's. The request parameter
// is required to make the authz URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeAuthorizations(order *core.Order, request *http.Request) error {
var auths []string
var authObs []*core.Authorization
// Lock the order for reading
order.RLock()
// Create one authz for each name in the order's parsed CSR
for _, name := range order.Names {
now := wfe.clk.Now().UTC()
expires := now.Add(pendingAuthzExpire)
ident := acme.Identifier{
Type: acme.IdentifierDNS,
Value: name,
}
authz := &core.Authorization{
ID: newToken(),
ExpiresDate: expires,
Order: order,
Authorization: acme.Authorization{
Status: acme.StatusPending,
Identifier: ident,
Expires: expires.UTC().Format(time.RFC3339),
},
}
authz.URL = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
// Create the challenges for this authz
err := wfe.makeChallenges(authz, request)
if err != nil {
return err
}
// Save the authorization in memory
count, err := wfe.db.AddAuthorization(authz)
if err != nil {
return err
}
wfe.log.Printf("There are now %d authorizations in the db\n", count)
authzURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
auths = append(auths, authzURL)
authObs = append(authObs, authz)
}
// Unlock the order from reading
order.RUnlock()
// Lock the order for writing & update the order's authorizations
order.Lock()
order.Authorizations = auths
order.AuthorizationObjects = authObs
order.Unlock()
return nil
}
func (wfe *WebFrontEndImpl) makeChallenge(
chalType string,
authz *core.Authorization,
request *http.Request) (*core.Challenge, error) {
// Create a new challenge of the requested type
id := newToken()
chal := &core.Challenge{
ID: id,
Challenge: acme.Challenge{
Type: chalType,
Token: newToken(),
URL: wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", challengePath, id)),
Status: acme.StatusPending,
},
Authz: authz,
}
// Add it to the in-memory database
_, err := wfe.db.AddChallenge(chal)
if err != nil {
return nil, err
}
return chal, nil
}
// makeChallenges populates an authz with new challenges. The request parameter
// is required to make the challenge URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeChallenges(authz *core.Authorization, request *http.Request) error {
var chals []*core.Challenge
// Authorizations for a wildcard identifier only get a DNS-01 challenges to
// match Boulder/Let's Encrypt wildcard issuance policy
if strings.HasPrefix(authz.Identifier.Value, "*.") {
chal, err := wfe.makeChallenge(acme.ChallengeDNS01, authz, request)
if err != nil {
return err
}
chals = []*core.Challenge{chal}
} else {
// Non-wildcard authorizations get all of the enabled challenge types
enabledChallenges := []string{acme.ChallengeHTTP01, acme.ChallengeTLSSNI02, acme.ChallengeDNS01}
for _, chalType := range enabledChallenges {
chal, err := wfe.makeChallenge(chalType, authz, request)
if err != nil {
return err
}
chals = append(chals, chal)
}
}
// Lock the authorization for writing to update the challenges
authz.Lock()
authz.Challenges = nil
for _, c := range chals {
authz.Challenges = append(authz.Challenges, &c.Challenge)
}
authz.Unlock()
return nil
}
// NewOrder creates a new Order request and populates its authorizations
func (wfe *WebFrontEndImpl) NewOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingReg, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Unpack the order request body
var newOrder acme.Order
err := json.Unmarshal(body, &newOrder)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON: "+err.Error()), response)
return
}
expires := time.Now().AddDate(0, 0, 1)
order := &core.Order{
ID: newToken(),
AccountID: existingReg.ID,
Order: acme.Order{
Status: acme.StatusPending,
Expires: expires.UTC().Format(time.RFC3339),
// Only the Identifiers, NotBefore and NotAfter from the submitted order
// are carried forward
Identifiers: newOrder.Identifiers,
NotBefore: newOrder.NotBefore,
NotAfter: newOrder.NotAfter,
},
ExpiresDate: expires,
}
// Verify the details of the order before creating authorizations
if err := wfe.verifyOrder(order, existingReg); err != nil {
wfe.sendError(err, response)
return
}
// Collect all of the DNS identifier values up into a []string
var orderNames []string
for _, ident := range order.Identifiers {
orderNames = append(orderNames, ident.Value)
}
// Store the unique lower version of the names on the order object
order.Names = uniqueLowerNames(orderNames)
// Create the authorizations for the order
err = wfe.makeAuthorizations(order, request)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error creating authorizations for order"), response)
return
}
// Add the order to the in-memory DB
count, err := wfe.db.AddOrder(order)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error saving order"), response)
return
}
wfe.log.Printf("Added order %q to the db\n", order.ID)
wfe.log.Printf("There are now %d orders in the db\n", count)
// Populate a finalization URL for this order
order.Finalize = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, order.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, order.Order)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// orderForDisplay preps a *core.Order for display by populating some fields
// based on the http.request provided and returning a *acme.Order ready to be
// rendered to JSON for display to an API client.
func (wfe *WebFrontEndImpl) orderForDisplay(
order *core.Order,
request *http.Request) acme.Order {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Populate a finalization URL for this order
order.Finalize = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
// If the order has a cert ID then set the certificate URL by constructing
// a relative path based on the HTTP request & the cert ID
if order.CertificateObject != nil {
order.Certificate = wfe.relativeEndpoint(
request,
certPath+order.CertificateObject.ID)
}
// Return only the initial OrderRequest not the internal object
return order.Order
}
// Order retrieves the details of an existing order
func (wfe *WebFrontEndImpl) Order(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
orderID := strings.TrimPrefix(request.URL.Path, orderPath)
order := wfe.db.GetOrderByID(orderID)
if order == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(order, request)
err := wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) FinalizeOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Verify the POST request
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the account corresponding to the key that authenticated the POST request
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the order specified by the order ID
orderID := strings.TrimPrefix(request.URL.Path, orderFinalizePath)
existingOrder := wfe.db.GetOrderByID(orderID)
if existingOrder == nil {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// Lock the order for reading the properties we need to check
existingOrder.RLock()
orderAccountID := existingOrder.AccountID
orderStatus := existingOrder.Status
orderExpires := existingOrder.ExpiresDate
orderNames := existingOrder.Names
// And then immediately unlock it again - we don't defer() here because
// `maybeIssue` will also acquire a read lock and we call that before
// returning
existingOrder.RUnlock()
// If the order doesn't belong to the account that authenticted the POST
// request then pretend it doesn't exist.
if orderAccountID != existingAcct.ID {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// The existing order must be in a pending status to finalize it
if orderStatus != acme.StatusPending {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Order's status (%q) was not pending", orderStatus)), response)
return
}
// The existing order must not be expired
if orderExpires.Before(wfe.clk.Now()) {
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"Order %q expired %s", orderID, orderExpires)), response)
return
}
// The finalize POST body is expected to be the bytes from a base64 raw url
// encoded CSR
var finalizeMessage struct {
CSR string
}
err := json.Unmarshal(body, &finalizeMessage)
if err != nil {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Error unmarshaling finalize order request body: %s", err.Error())), response)
return
}
csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeMessage.CSR)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error decoding Base64url-encoded CSR: "+err.Error()), response)
return
}
parsedCSR, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error parsing Base64url-encoded CSR: "+err.Error()), response)
return
}
// Check that the CSR has the same number of names as the initial order contained
csrNames := uniqueLowerNames(parsedCSR.DNSNames)
if len(csrNames) != len(orderNames) {
wfe.sendError(acme.UnauthorizedProblem(
"Order includes different number of names than CSR specifieds"), response)
return
}
// Check that the CSR's names match the order names exactly
for i, name := range orderNames {
if name != csrNames[i] {
wfe.sendError(acme.UnauthorizedProblem(
fmt.Sprintf("CSR is missing Order domain %q", name)), response)
return
}
}
// Lock and update the order with the parsed CSR.
existingOrder.Lock()
existingOrder.ParsedCSR = parsedCSR
existingOrder.Unlock()
// Check whether the order is ready to issue, if it isn't, return a problem
prob = wfe.maybeIssue(existingOrder)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(existingOrder, request)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, existingOrder.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) maybeIssue(order *core.Order) *acme.ProblemDetails {
// Lock the order for reading to check whether all authorizations are valid
order.RLock()
authzs := order.AuthorizationObjects
orderID := order.ID
order.RUnlock()
for _, authz := range authzs {
// Lock the authorization for reading to check its status
authz.RLock()
authzStatus := authz.Status
authzExpires := authz.ExpiresDate
ident := authz.Identifier
authz.RUnlock()
// If any of the authorizations are invalid the order isn't ready to issue
if authzStatus != acme.StatusValid {
return acme.UnauthorizedProblem(fmt.Sprintf(
"Authorization for %q is not status valid", ident.Value))
}
// If any of the authorizations are expired the order isn't ready to issue
if authzExpires.Before(wfe.clk.Now()) {
return acme.UnauthorizedProblem(fmt.Sprintf(
"Authorization for %q expired %q", ident.Value, authzExpires))
}
}
// All the authorizations are valid, ask the CA to complete the order in
// a separate goroutine. CompleteOrder will transition the order status to
// pending.
wfe.log.Printf("Order %s is fully authorized. Processing finalization", orderID)
go wfe.ca.CompleteOrder(order)
return nil
}
// prepAuthorizationForDisplay prepares the provided acme.Authorization for
// display to an ACME client.
func prepAuthorizationForDisplay(authz acme.Authorization) *acme.Authorization {
identVal := authz.Identifier.Value
// If the authorization identifier has a wildcard in the value, remove it and
// set the Wildcard field to true
if strings.HasPrefix(identVal, "*.") {
authz.Identifier.Value = strings.TrimPrefix(identVal, "*.")
authz.Wildcard = true
}
return &authz
}
func (wfe *WebFrontEndImpl) Authz(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
authzID := strings.TrimPrefix(request.URL.Path, authzPath)
authz := wfe.db.GetAuthorizationByID(authzID)
if authz == nil {
response.WriteHeader(http.StatusNotFound)
return
}
err := wfe.writeJsonResponse(
response,
http.StatusOK,
prepAuthorizationForDisplay(authz.Authorization))
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling authz"), response)
return
}
}
func (wfe *WebFrontEndImpl) Challenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
if request.Method == "POST" {
wfe.updateChallenge(ctx, logEvent, response, request)
return
}
wfe.getChallenge(ctx, logEvent, response, request)
}
func (wfe *WebFrontEndImpl) getChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
chal := wfe.db.GetChallengeByID(chalID)
if chal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Lock the challenge for reading in order to write the response
chal.RLock()
defer chal.RUnlock()
err := wfe.writeJsonResponse(response, http.StatusOK, chal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
// getAcctByKey finds a account by key or returns a problem pointer if an
// existing account can't be found or the key is invalid.
func (wfe *WebFrontEndImpl) getAcctByKey(key crypto.PublicKey) (*core.Account, *acme.ProblemDetails) {
// Compute the account ID for the signer's key
regID, err := keyToID(key)
if err != nil {
wfe.log.Printf("keyToID err: %s\n", err.Error())
return nil, acme.MalformedProblem("Error computing key digest")
}
// Find the existing account object for that key ID
var existingAcct *core.Account
if existingAcct = wfe.db.GetAccountByID(regID); existingAcct == nil {
return nil, acme.AccountDoesNotExistProblem(
"URL in JWS 'kid' field does not correspond to an account")
}
return existingAcct, nil
}
func (wfe *WebFrontEndImpl) validateChallengeUpdate(
chal *core.Challenge,
update *acme.Challenge,
acct *core.Account) (*core.Authorization, *acme.ProblemDetails) {
// Lock the challenge for reading to do validation
chal.RLock()
defer chal.RUnlock()
// Check that the existing challenge is Pending
if chal.Status != acme.StatusPending {
return nil, acme.MalformedProblem(
fmt.Sprintf("Cannot update challenge with status %s, only status %s",
chal.Status, acme.StatusPending))
}
// Calculate the expected key authorization for the owning account's key
expectedKeyAuth := chal.ExpectedKeyAuthorization(acct.Key)
// Validate the expected key auth matches the provided key auth
if expectedKeyAuth != update.KeyAuthorization {
return nil, acme.MalformedProblem(
fmt.Sprintf("Incorrect key authorization: %q",
update.KeyAuthorization))
}
return chal.Authz, nil
}
// validateAuthzForChallenge checks an authz is:
// 1) for a supported identifier type
// 2) not expired
// 3) associated to an order
// The associated order is returned when no problems are found to avoid needing
// another RLock() for the caller to get the order pointer later.
func (wfe *WebFrontEndImpl) validateAuthzForChallenge(authz *core.Authorization) (*core.Order, *acme.ProblemDetails) {
// Lock the authz for reading
authz.RLock()
defer authz.RUnlock()
ident := authz.Identifier
if ident.Type != acme.IdentifierDNS {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization identifier was type %s, only %s is supported",
ident.Type, acme.IdentifierDNS))
}
now := wfe.clk.Now()
if now.After(authz.ExpiresDate) {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization expired %s",
authz.ExpiresDate.Format(time.RFC3339)))
}
existingOrder := authz.Order
if existingOrder == nil {
return nil, acme.InternalErrorProblem("authz missing associated order")
}
return existingOrder, nil
}
func (wfe *WebFrontEndImpl) updateChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
var chalResp acme.Challenge
err := json.Unmarshal(body, &chalResp)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
existingChal := wfe.db.GetChallengeByID(chalID)
if existingChal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
authz, prob := wfe.validateChallengeUpdate(existingChal, &chalResp, existingAcct)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz == nil {
wfe.sendError(
acme.InternalErrorProblem("challenge missing associated authz"), response)
return
}
existingOrder, prob := wfe.validateAuthzForChallenge(authz)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Lock the order for reading to check the expiry date
existingOrder.RLock()
now := wfe.clk.Now()
if now.After(existingOrder.ExpiresDate) {
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf("order expired %s %s",
existingOrder.ExpiresDate.Format(time.RFC3339))), response)
return
}
existingOrder.RUnlock()
// Lock the authorization to get the identifier value
authz.RLock()
ident := authz.Identifier.Value
authz.RUnlock()
// If the identifier value is for a wildcard domain then strip the wildcard
// prefix before dispatching the validation to ensure the base domain is
// validated.
if strings.HasPrefix(ident, "*.") {
ident = strings.TrimPrefix(ident, "*.")
}
// Submit a validation job to the VA, this will be processed asynchronously
wfe.va.ValidateChallenge(ident, existingChal, existingAcct)
// Lock the challenge for reading in order to write the response
existingChal.RLock()
defer existingChal.RUnlock()
response.Header().Add("Link", link(existingChal.Authz.URL, "up"))
err = wfe.writeJsonResponse(response, http.StatusOK, existingChal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
func (wfe *WebFrontEndImpl) Certificate(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
serial := strings.TrimPrefix(request.URL.Path, certPath)
cert := wfe.db.GetCertificateByID(serial)
if cert == nil {
response.WriteHeader(http.StatusNotFound)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(cert.Chain())
}
func (wfe *WebFrontEndImpl) writeJsonResponse(response http.ResponseWriter, status int, v interface{}) error {
jsonReply, err := marshalIndent(v)
if err != nil {
return err // All callers are responsible for handling this error
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
response.WriteHeader(status)
// Don't worry about returning an error from Write() because the caller will
// never handle it.
_, _ = response.Write(jsonReply)
return nil
}
func addNoCacheHeader(response http.ResponseWriter) {
response.Header().Add("Cache-Control", "public, max-age=0, no-cache")
}
func marshalIndent(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
func link(url, relation string) string {
return fmt.Sprintf("<%s>;rel=\"%s\"", url, relation)
}
// uniqueLowerNames returns the set of all unique names in the input after all
// of them are lowercased. The returned names will be in their lowercased form
// and sorted alphabetically. See Boulder `core/util.go UniqueLowerNames`.
func uniqueLowerNames(names []string) []string {
nameMap := make(map[string]int, len(names))
for _, name := range names {
nameMap[strings.ToLower(name)] = 1
}
unique := make([]string, 0, len(nameMap))
for name := range nameMap {
unique = append(unique, name)
}
sort.Strings(unique)
return unique
}
|
package q
import (
"strings"
"github.com/leeola/fixity/q/operator"
"github.com/leeola/fixity/value"
"github.com/mgutz/str"
)
// FromString produces a Query from the given string.
//
// Intended for constructing Queries from user input.
//
// TODO(leeola): support AND/OR by looking check if one of the parts equals
// AND/OR directly. Can also support -AND and -OR. Though i may have to
// implement my own parsing, to group ( and ), eg AND( ... ).
func FromString(s string) Query {
parts := str.ToArgv(s)
// the fieldless constraint is any parts that do not produce
// another type of constraint. Ie, the resulting string.
var fieldless []string
var cs []Constraint
for _, p := range parts {
op, field, valueStr := splitPart(p)
if op == "" && field == "" {
fieldless = append(fieldless, valueStr)
continue
}
switch op {
case "eq":
op = operator.Equal
}
v := value.String(valueStr)
cs = append(cs, Constraint{
Operator: op,
Field: &field,
Value: &v,
})
}
if len(fieldless) != 0 {
v := value.String(strings.Join(fieldless, " "))
cs = append(cs, Constraint{
Operator: operator.Equal,
Value: &v,
})
}
if len(cs) == 1 {
return New().Const(cs[0])
}
return New().And(cs...)
}
func splitPart(s string) (op, field, value string) {
constStrs := strings.SplitN(s, ":", 3)
switch len(constStrs) {
case 1:
// "value"
value = constStrs[0]
case 2:
// "field:value"
field = constStrs[0]
value = constStrs[1]
default:
// "op:field:value"
op = constStrs[0]
field = constStrs[1]
value = constStrs[2]
}
return
}
feat: default empty op to equal
package q
import (
"strings"
"github.com/leeola/fixity/q/operator"
"github.com/leeola/fixity/value"
"github.com/mgutz/str"
)
// FromString produces a Query from the given string.
//
// Intended for constructing Queries from user input.
//
// TODO(leeola): support AND/OR by looking check if one of the parts equals
// AND/OR directly. Can also support -AND and -OR. Though i may have to
// implement my own parsing, to group ( and ), eg AND( ... ).
func FromString(s string) Query {
parts := str.ToArgv(s)
// the fieldless constraint is any parts that do not produce
// another type of constraint. Ie, the resulting string.
var fieldless []string
var cs []Constraint
for _, p := range parts {
op, field, valueStr := splitPart(p)
if op == "" && field == "" {
fieldless = append(fieldless, valueStr)
continue
}
switch op {
case "eq":
op = operator.Equal
case "":
// default empty ops to equal.
//
// in the future it should probably translate to some type of loose
// operator, maybe fts?
op = operator.Equal
}
v := value.String(valueStr)
cs = append(cs, Constraint{
Operator: op,
Field: &field,
Value: &v,
})
}
if len(fieldless) != 0 {
v := value.String(strings.Join(fieldless, " "))
cs = append(cs, Constraint{
Operator: operator.Equal,
Value: &v,
})
}
if len(cs) == 1 {
return New().Const(cs[0])
}
return New().And(cs...)
}
func splitPart(s string) (op, field, value string) {
constStrs := strings.SplitN(s, ":", 3)
switch len(constStrs) {
case 1:
// "value"
value = constStrs[0]
case 2:
// "field:value"
field = constStrs[0]
value = constStrs[1]
default:
// "op:field:value"
op = constStrs[0]
field = constStrs[1]
value = constStrs[2]
}
return
}
|
// Package chain enables flexible ordering and reuse of context-aware Handler
// wrapper chains.
package chain
import (
"net/http"
"golang.org/x/net/context"
)
// Handler interface must be implemented for a function to be able to be
// wrapped, or served.
type Handler interface {
ServeHTTPContext(context.Context, http.ResponseWriter, *http.Request)
}
// HandlerFunc is an adapter which allows a function with the appropriate
// signature to be treated as a Handler.
type HandlerFunc func(context.Context, http.ResponseWriter, *http.Request)
// ServeHTTPContext calls h(ctx, w, r)
func (h HandlerFunc) ServeHTTPContext(ctx context.Context, w http.ResponseWriter, r *http.Request) {
h(ctx, w, r)
}
// Chain holds the basic components used to order Handler wrapper chains.
type Chain struct {
ctx context.Context
hws []func(Handler) Handler
}
type handlerAdapter struct {
ctx context.Context
h Handler
}
func (ha handlerAdapter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ha.h.ServeHTTPContext(ha.ctx, w, r)
}
type noCtxHandlerAdapter struct {
handlerAdapter
hw func(http.Handler) http.Handler
}
// New takes one or more Handler wrappers, and returns a new Chain.
func New(ctx context.Context, hws ...func(Handler) Handler) Chain {
return Chain{ctx: ctx, hws: hws}
}
// Append takes one or more Handler wrappers, and appends the value to the
// returned Chain.
func (c Chain) Append(hws ...func(Handler) Handler) Chain {
c.hws = append(c.hws, hws...)
return c
}
// End takes a Handler and returns an http.Handler.
func (c Chain) End(h Handler) http.Handler {
if h == nil {
return nil
}
for i := len(c.hws) - 1; i >= 0; i-- {
h = c.hws[i](h)
}
r := handlerAdapter{
ctx: c.ctx, h: h,
}
return r
}
// EndFn takes a func that matches the HandlerFunc type, then passes it to End.
func (c Chain) EndFn(h HandlerFunc) http.Handler {
if h == nil {
return c.End(nil)
}
return c.End(h)
}
// Meld takes a http.Handler wrapper and returns a Handler wrapper. This is
// useful for making non-context aware http.Handler wrappers compatible with
// the rest of a Handler Chain.
func Meld(hw func(http.Handler) http.Handler) func(Handler) Handler {
return func(h Handler) Handler {
return HandlerFunc(
func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
x := noCtxHandlerAdapter{
hw: hw, handlerAdapter: handlerAdapter{ctx: ctx, h: h},
}
hw(x).ServeHTTP(w, r)
},
)
}
}
Added nil end-point handler and updated End/EndFn.
// Package chain enables flexible ordering and reuse of context-aware Handler
// wrapper chains.
package chain
import (
"net/http"
"golang.org/x/net/context"
)
// Handler interface must be implemented for a function to be able to be
// wrapped, or served.
type Handler interface {
ServeHTTPContext(context.Context, http.ResponseWriter, *http.Request)
}
// HandlerFunc is an adapter which allows a function with the appropriate
// signature to be treated as a Handler.
type HandlerFunc func(context.Context, http.ResponseWriter, *http.Request)
// ServeHTTPContext calls h(ctx, w, r)
func (h HandlerFunc) ServeHTTPContext(ctx context.Context, w http.ResponseWriter, r *http.Request) {
h(ctx, w, r)
}
// Chain holds the basic components used to order Handler wrapper chains.
type Chain struct {
ctx context.Context
hws []func(Handler) Handler
}
type handlerAdapter struct {
ctx context.Context
h Handler
}
func (ha handlerAdapter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ha.h.ServeHTTPContext(ha.ctx, w, r)
}
type noCtxHandlerAdapter struct {
handlerAdapter
hw func(http.Handler) http.Handler
}
// New takes one or more Handler wrappers, and returns a new Chain.
func New(ctx context.Context, hws ...func(Handler) Handler) Chain {
return Chain{ctx: ctx, hws: hws}
}
// Append takes one or more Handler wrappers, and appends the value to the
// returned Chain.
func (c Chain) Append(hws ...func(Handler) Handler) Chain {
c.hws = append(c.hws, hws...)
return c
}
// End takes a Handler and returns an http.Handler.
func (c Chain) End(h Handler) http.Handler {
if h == nil {
h = HandlerFunc(noContentHandler)
}
for i := len(c.hws) - 1; i >= 0; i-- {
h = c.hws[i](h)
}
r := handlerAdapter{
ctx: c.ctx, h: h,
}
return r
}
// EndFn takes a func that matches the HandlerFunc type, then passes it to End.
func (c Chain) EndFn(h HandlerFunc) http.Handler {
if h == nil {
h = HandlerFunc(noContentHandler)
}
return c.End(h)
}
// Meld takes a http.Handler wrapper and returns a Handler wrapper. This is
// useful for making non-context aware http.Handler wrappers compatible with
// the rest of a Handler Chain.
func Meld(hw func(http.Handler) http.Handler) func(Handler) Handler {
return func(h Handler) Handler {
return HandlerFunc(
func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
x := noCtxHandlerAdapter{
hw: hw, handlerAdapter: handlerAdapter{ctx: ctx, h: h},
}
hw(x).ServeHTTP(w, r)
},
)
}
}
func noContentHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
return
}
|
package wkg
type ByteOrder uint8
const (
BigEndian = iota
LittleEndian
)
type Coord uint32
const (
Coord2D = iota << 4
CoordZ
CoordM
CoordZM
)
type GeomType uint32
const (
_ = iota
GeomPoint
GeomLineString
GeomPolygon
GeomMultiPoint
GeomMultiLineString
GeomMultiPolygon
GeomGeometryCollection
)
Prototype of Well-Known Binary types
package wkg
import (
"encoding/binary"
"errors"
"math"
"unsafe"
)
type ByteOrder byte
const (
BigEndian = iota
LittleEndian
)
type Kind uint32
const (
_ = iota
GeomPoint
GeomLineString
GeomPolygon
GeomMultiPoint
GeomMultiLineString
GeomMultiPolygon
GeomCollection
)
const (
ByteOrderSize = int(unsafe.Sizeof(ByteOrder(0)))
GeomTypeSize = int(unsafe.Sizeof(Kind(0)))
HeaderSize = ByteOrderSize + GeomTypeSize
Uint32Size = int(unsafe.Sizeof(uint32(0)))
Float64Size = int(unsafe.Sizeof(float64(0)))
PointSize = int(unsafe.Sizeof(Point{}))
)
var (
ErrInvalidStorage = errors.New("Invalid storage type or size")
ErrUnsupportedValue = errors.New("Unsupported value")
)
type Point struct {
X, Y float64
}
type LineString []Point
type Polygon []LinearRing
type MultiPoint []Point
type MultiLineString []LineString
type MultiPolygon []Polygon
type Geometry struct {
Kind Kind
Value interface{}
}
type GeometryCollection []Geometry
type LinearRing []Point
func (p *Point) Scan(src interface{}) error {
b, dec, err := header(src, GeomPoint)
if err != nil {
return err
}
if len(b) < PointSize {
return ErrInvalidStorage
}
b, p.X = readFloat64(b, dec)
_, p.Y = readFloat64(b, dec)
return nil
}
func (ls *LineString) Scan(src interface{}) error {
b, dec, err := header(src, GeomLineString)
if err != nil {
return err
}
_, *ls, err = readPoints(b, dec)
return err
}
func (p *Polygon) Scan(src interface{}) error {
b, dec, err := header(src, GeomPolygon)
if err != nil {
return err
}
_, *p, err = readPolygon(b, dec)
return err
}
func (mp *MultiPoint) Scan(src interface{}) error {
b, dec, err := header(src, GeomMultiPoint)
if err != nil {
return err
}
_, *mp, err = readMultiPoint(b, dec)
return err
}
func (mls *MultiLineString) Scan(src interface{}) error {
b, dec, err := header(src, GeomMultiLineString)
if err != nil {
return err
}
_, *mls, err = readMultiLineString(b, dec)
return err
}
func (mp *MultiPolygon) Scan(src interface{}) error {
b, dec, err := header(src, GeomMultiPolygon)
if err != nil {
return err
}
_, *mp, err = readMultiPolygon(b, dec)
return err
}
func (g *Geometry) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrInvalidStorage
}
err := error(nil)
_, *g, err = readGeometry(b)
return err
}
func readUint32(b []byte, dec binary.ByteOrder) ([]byte, uint32) {
return b[Uint32Size:], dec.Uint32(b)
}
func readCount(b []byte, dec binary.ByteOrder) ([]byte, int, error) {
if len(b) < Uint32Size {
return nil, 0, ErrInvalidStorage
}
b, n := readUint32(b, dec)
return b, int(n), nil
}
func readFloat64(b []byte, dec binary.ByteOrder) ([]byte, float64) {
return b[Float64Size:], math.Float64frombits(dec.Uint64(b))
}
func readPoint(b []byte, dec binary.ByteOrder) ([]byte, *Point, error) {
if len(b) < PointSize {
return nil, nil, ErrInvalidStorage
}
p := &Point{}
b, p.X = readFloat64(b, dec)
b, p.Y = readFloat64(b, dec)
return b, p, nil
}
func readPoints(b []byte, dec binary.ByteOrder) ([]byte, []Point, error) {
b, n, err := readCount(b, dec)
if err != nil {
return nil, nil, err
}
if len(b) < PointSize*n {
return nil, nil, ErrInvalidStorage
}
p := make([]Point, n)
for i := 0; i < n; i++ {
b, p[i].X = readFloat64(b, dec)
b, p[i].Y = readFloat64(b, dec)
}
return b, p, nil
}
func readLineString(b []byte, dec binary.ByteOrder) ([]byte, LineString, error) {
return readPoints(b, dec)
}
func readMultiPoint(b []byte, dec binary.ByteOrder) ([]byte, MultiPoint, error) {
return readPoints(b, dec)
}
func readMultiLineString(b []byte, dec binary.ByteOrder) ([]byte, MultiLineString, error) {
b, n, err := readCount(b, dec)
if err != nil {
return nil, nil, err
}
mls := make([]LineString, n)
for i := 0; i < n; i++ {
b, mls[i], err = readLineString(b, dec)
if err != nil {
return nil, nil, err
}
}
return b, mls, err
}
func readPolygon(b []byte, dec binary.ByteOrder) ([]byte, Polygon, error) {
if len(b) < Uint32Size {
return nil, nil, ErrInvalidStorage
}
b, n, err := readCount(b, dec)
if err != nil {
return nil, nil, err
}
lr := make([]LinearRing, n)
for i := 0; i < n; i++ {
b, lr[i], err = readPoints(b, dec)
if err != nil {
return nil, nil, err
}
}
return b, lr, nil
}
func readMultiPolygon(b []byte, dec binary.ByteOrder) ([]byte, MultiPolygon, error) {
b, n, err := readCount(b, dec)
if err != nil {
return nil, nil, err
}
mp := make([]Polygon, n)
for i := 0; i < n; i++ {
b, mp[i], err = readPolygon(b, dec)
if err != nil {
return nil, nil, err
}
}
return b, mp, nil
}
func readGeometry(b []byte) ([]byte, Geometry, error) {
g := Geometry{}
if len(b) < HeaderSize {
return nil, g, ErrInvalidStorage
}
dec := byteOrder(b[0])
if dec == nil {
return nil, g, ErrInvalidStorage
}
err := error(nil)
b, kind := readUint32(b[ByteOrderSize:], dec)
switch kind {
case GeomPoint:
b, g.Value, err = readPoint(b, dec)
case GeomLineString:
b, g.Value, err = readLineString(b, dec)
case GeomPolygon:
b, g.Value, err = readPolygon(b, dec)
case GeomMultiPoint:
b, g.Value, err = readMultiPoint(b, dec)
case GeomMultiLineString:
b, g.Value, err = readMultiLineString(b, dec)
case GeomMultiPolygon:
b, g.Value, err = readMultiPolygon(b, dec)
case GeomCollection:
b, g.Value, err = readGeometryCollection(b, dec)
default:
err = ErrUnsupportedValue
}
if err != nil {
return nil, g, err
}
return b, g, nil
}
func readGeometryCollection(b []byte, dec binary.ByteOrder) ([]byte, GeometryCollection, error) {
b, n, err := readCount(b, dec)
if err != nil {
return nil, nil, err
}
gc := make([]Geometry, n)
for i := 0; i < n; i++ {
b, gc[i], err = readGeometry(b)
if err != nil {
return nil, nil, err
}
}
return b, gc, nil
}
func header(src interface{}, tpe Kind) ([]byte, binary.ByteOrder, error) {
b, ok := src.([]byte)
if !ok {
return nil, nil, ErrInvalidStorage
}
if len(b) < HeaderSize {
return nil, nil, ErrInvalidStorage
}
dec := byteOrder(b[0])
if dec == nil {
return nil, nil, ErrUnsupportedValue
}
b, kind := readUint32(b[ByteOrderSize:], dec)
if tpe != Kind(kind) {
return nil, nil, ErrUnsupportedValue
}
return b, dec, nil
}
func byteOrder(b byte) binary.ByteOrder {
switch b {
case BigEndian:
return binary.BigEndian
case LittleEndian:
return binary.LittleEndian
default:
return nil
}
}
|
package main
import "flag"
import "fmt"
import "strings"
type Formula struct {
value string
left *Formula
right *Formula
}
func parseFormula(f string) *Formula {
if strings.HasPrefix(f, "(") && strings.HasSuffix(f, ")") {
for _, connective := range []string{"^", "v", ">"} {
if strings.Contains(f, connective) {
endLeft := strings.Index(f, ")"+connective+"(") + 1
startRight := strings.Index(f, ")"+connective+"(") + 2
leftFormula := parseFormula(f[1:endLeft])
rightFormula := parseFormula(f[startRight : len(f)-1])
result := Formula{
value: connective,
left: leftFormula,
right: rightFormula,
}
return &result
}
}
}
if strings.HasPrefix(f, "~") {
leftFormula := parseFormula(f[1:])
result := Formula{
value: "~",
left: leftFormula,
right: nil,
}
return &result
}
result := Formula{
value: f,
left: nil,
right: nil,
}
return &result
}
func printFormula(formula Formula) string {
leftString := ""
if formula.left != nil {
leftString = printFormula(*formula.left)
}
rightString := ""
if formula.right != nil {
rightString = printFormula(*formula.right)
}
openString := "("
closeString := ")"
if formula.left == nil && formula.right == nil {
openString = ""
closeString = ""
}
return openString + leftString + formula.value + rightString + closeString
}
func main() {
formulaString := flag.String("formula", "", "The formula in propositional logic")
flag.Parse()
formula := parseFormula(*formulaString)
fmt.Println("Formula: " + printFormula(*formula))
}
Added first part of algorithm to gather names for each formula component
package main
import "flag"
import "fmt"
import "strconv"
import "strings"
type Formula struct {
value string
left *Formula
right *Formula
}
func parseFormula(f string) *Formula {
if strings.HasPrefix(f, "(") && strings.HasSuffix(f, ")") {
for _, connective := range []string{"^", "v", ">"} {
if strings.Contains(f, connective) {
endLeft := strings.Index(f, ")"+connective+"(") + 1
startRight := strings.Index(f, ")"+connective+"(") + 2
leftFormula := parseFormula(f[1:endLeft])
rightFormula := parseFormula(f[startRight : len(f)-1])
result := Formula{
value: connective,
left: leftFormula,
right: rightFormula,
}
return &result
}
}
}
if strings.HasPrefix(f, "~") {
leftFormula := parseFormula(f[1:])
result := Formula{
value: "~",
left: leftFormula,
right: nil,
}
return &result
}
result := Formula{
value: f,
left: nil,
right: nil,
}
return &result
}
func printFormula(formula Formula) string {
leftString := ""
if formula.left != nil {
leftString = printFormula(*formula.left)
}
rightString := ""
if formula.right != nil {
rightString = printFormula(*formula.right)
}
openString := "("
closeString := ")"
if formula.left == nil && formula.right == nil {
openString = ""
closeString = ""
}
return openString + leftString + formula.value + rightString + closeString
}
func getLiteralName(number int) string {
return "p" + strconv.Itoa(number+1)
}
func gatherNames(names map[string]string, formula *Formula) {
if formula == nil {
return
}
gatherNames(names, formula.left)
gatherNames(names, formula.right)
displayFormula := printFormula(*formula)
_, ok := names[displayFormula]
if !ok {
names[displayFormula] = getLiteralName(len(names))
}
}
func main() {
formulaString := flag.String("formula", "", "The formula in propositional logic")
flag.Parse()
formula := parseFormula(*formulaString)
names := map[string]string{}
gatherNames(names, formula)
for key, value := range names {
fmt.Println(key + ": " + value)
}
fmt.Println("Formula: " + printFormula(*formula))
}
|
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package interfacer
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/loader"
)
func toDiscard(vu *varUsage) bool {
if vu.discard {
return true
}
for to := range vu.assigned {
if toDiscard(to) {
return true
}
}
return false
}
func (v *visitor) interfaceMatching(vr *types.Var, vu *varUsage) (string, string) {
if toDiscard(vu) {
return "", ""
}
allFuncs := typeFuncMap(vr.Type())
if allFuncs == nil {
return "", ""
}
called := make(map[string]string, len(vu.calls))
for fname := range vu.calls {
called[fname] = allFuncs[fname]
}
s := funcMapString(called)
name := v.ifaceOf(s)
if name == "" {
return "", ""
}
return name, s
}
func orderedPkgs(prog *loader.Program) ([]*types.Package, error) {
// InitialPackages() is not in the order that we passed to it
// via Import() calls.
// For now, make it deterministic by sorting import paths
// alphabetically.
unordered := prog.InitialPackages()
paths := make([]string, 0, len(unordered))
byPath := make(map[string]*types.Package, len(unordered))
for _, info := range unordered {
if info.Errors != nil {
return nil, info.Errors[0]
}
path := info.Pkg.Path()
paths = append(paths, path)
byPath[path] = info.Pkg
}
sort.Sort(ByAlph(paths))
pkgs := make([]*types.Package, 0, len(unordered))
for _, path := range paths {
pkgs = append(pkgs, byPath[path])
}
return pkgs, nil
}
// relPathErr converts errors by go/types and go/loader that use
// absolute paths into errors with relative paths
func relPathErr(err error, wd string) error {
errStr := fmt.Sprintf("%v", err)
if strings.HasPrefix(errStr, wd) {
return fmt.Errorf(errStr[len(wd)+1:])
}
return err
}
// CheckArgs checks the packages specified by their import paths in
// args, and writes the results in w. Can give verbose output if
// specified, printing each package as it is checked.
func CheckArgs(args []string, w io.Writer, verbose bool) error {
wd, err := os.Getwd()
if err != nil {
return err
}
paths, err := recurse(args)
if err != nil {
return err
}
c := newCache()
rest, err := c.FromArgs(paths, false)
if err != nil {
return err
}
if len(rest) > 0 {
return fmt.Errorf("unwanted extra args: %v", rest)
}
prog, err := c.Load()
if err != nil {
return err
}
pkgs, err := orderedPkgs(prog)
if err != nil {
return relPathErr(err, wd)
}
c.typesGet(pkgs)
for _, pkg := range pkgs {
info := prog.AllPackages[pkg]
if verbose {
fmt.Fprintln(w, info.Pkg.Path())
}
v := &visitor{
cache: c,
PackageInfo: info,
wd: wd,
w: w,
fset: prog.Fset,
vars: make(map[*types.Var]*varUsage),
}
for _, f := range info.Files {
ast.Walk(v, f)
}
}
return nil
}
type varUsage struct {
calls map[string]struct{}
discard bool
assigned map[*varUsage]struct{}
}
type visitor struct {
*cache
*loader.PackageInfo
wd string
w io.Writer
fset *token.FileSet
signs []*types.Signature
warns [][]string
level int
vars map[*types.Var]*varUsage
}
func paramType(sign *types.Signature, i int) types.Type {
params := sign.Params()
extra := sign.Variadic() && i >= params.Len()-1
if !extra {
if i >= params.Len() {
// builtins with multiple signatures
return nil
}
return params.At(i).Type()
}
last := params.At(params.Len() - 1)
switch x := last.Type().(type) {
case *types.Slice:
return x.Elem()
default:
return x
}
}
func (v *visitor) varUsage(id *ast.Ident) *varUsage {
vr, ok := v.ObjectOf(id).(*types.Var)
if !ok {
return nil
}
if vu, e := v.vars[vr]; e {
return vu
}
vu := &varUsage{
calls: make(map[string]struct{}),
assigned: make(map[*varUsage]struct{}),
}
v.vars[vr] = vu
return vu
}
func (v *visitor) addUsed(id *ast.Ident, as types.Type) {
if as == nil {
return
}
vu := v.varUsage(id)
if vu == nil {
// not a variable
return
}
iface, ok := as.Underlying().(*types.Interface)
if !ok {
vu.discard = true
return
}
for i := 0; i < iface.NumMethods(); i++ {
m := iface.Method(i)
vu.calls[m.Name()] = struct{}{}
}
}
func (v *visitor) addAssign(to, from *ast.Ident) {
pto := v.varUsage(to)
pfrom := v.varUsage(from)
if pto == nil || pfrom == nil {
// either isn't a variable
return
}
pfrom.assigned[pto] = struct{}{}
}
func (v *visitor) discard(e ast.Expr) {
id, ok := e.(*ast.Ident)
if !ok {
return
}
vu := v.varUsage(id)
if vu == nil {
// not a variable
return
}
vu.discard = true
}
func (v *visitor) comparedWith(e ast.Expr, with ast.Expr) {
if _, ok := with.(*ast.BasicLit); ok {
v.discard(e)
}
}
func (v *visitor) implementsIface(sign *types.Signature) bool {
s := signString(sign)
return v.funcOf(s) != ""
}
func (v *visitor) Visit(node ast.Node) ast.Visitor {
var sign *types.Signature
switch x := node.(type) {
case *ast.FuncLit:
sign = v.Types[x].Type.(*types.Signature)
if v.implementsIface(sign) {
return nil
}
case *ast.FuncDecl:
sign = v.Defs[x.Name].Type().(*types.Signature)
if v.implementsIface(sign) {
return nil
}
case *ast.SelectorExpr:
if _, ok := v.TypeOf(x.Sel).(*types.Signature); !ok {
v.discard(x.X)
}
case *ast.UnaryExpr:
v.discard(x.X)
case *ast.IndexExpr:
v.discard(x.X)
case *ast.IncDecStmt:
v.discard(x.X)
case *ast.BinaryExpr:
v.onBinary(x)
case *ast.AssignStmt:
v.onAssign(x)
case *ast.KeyValueExpr:
v.onKeyValue(x)
case *ast.CompositeLit:
v.onComposite(x)
case *ast.CallExpr:
v.onCall(x)
case nil:
if top := v.signs[len(v.signs)-1]; top != nil {
v.funcEnded(top)
}
v.signs = v.signs[:len(v.signs)-1]
}
if node != nil {
v.signs = append(v.signs, sign)
if sign != nil {
v.level++
}
}
return v
}
func (v *visitor) onBinary(be *ast.BinaryExpr) {
switch be.Op {
case token.EQL, token.NEQ:
default:
v.discard(be.X)
v.discard(be.Y)
return
}
v.comparedWith(be.X, be.Y)
v.comparedWith(be.Y, be.X)
}
func (v *visitor) onAssign(as *ast.AssignStmt) {
for i, e := range as.Rhs {
id, ok := e.(*ast.Ident)
if !ok {
continue
}
left := as.Lhs[i]
v.addUsed(id, v.Types[left].Type)
if lid, ok := left.(*ast.Ident); ok {
v.addAssign(lid, id)
}
}
}
func (v *visitor) onKeyValue(kv *ast.KeyValueExpr) {
if id, ok := kv.Key.(*ast.Ident); ok {
v.addUsed(id, v.TypeOf(kv.Value))
}
if id, ok := kv.Value.(*ast.Ident); ok {
v.addUsed(id, v.TypeOf(kv.Key))
}
}
func (v *visitor) onComposite(cl *ast.CompositeLit) {
for _, e := range cl.Elts {
if kv, ok := e.(*ast.KeyValueExpr); ok {
v.onKeyValue(kv)
}
}
}
func (v *visitor) onCall(ce *ast.CallExpr) {
if sign, ok := v.TypeOf(ce.Fun).(*types.Signature); ok {
v.onMethodCall(ce, sign)
return
}
if len(ce.Args) == 1 {
v.discard(ce.Args[0])
}
}
func (v *visitor) onMethodCall(ce *ast.CallExpr, sign *types.Signature) {
for i, e := range ce.Args {
if id, ok := e.(*ast.Ident); ok {
v.addUsed(id, paramType(sign, i))
}
}
sel, ok := ce.Fun.(*ast.SelectorExpr)
if !ok {
return
}
left, ok := sel.X.(*ast.Ident)
if !ok {
return
}
vu := v.varUsage(left)
if vu == nil {
// not a variable
return
}
vu.calls[sel.Sel.Name] = struct{}{}
}
func (v *visitor) funcEnded(sign *types.Signature) {
v.level--
v.warns = append(v.warns, v.funcWarns(sign))
if v.level > 0 {
return
}
for i := len(v.warns) - 1; i >= 0; i-- {
warns := v.warns[i]
for _, warn := range warns {
fmt.Fprintln(v.w, warn)
}
}
v.warns = nil
v.vars = make(map[*types.Var]*varUsage)
}
func (v *visitor) funcWarns(sign *types.Signature) []string {
var warns []string
params := sign.Params()
for i := 0; i < params.Len(); i++ {
vr := params.At(i)
vu := v.vars[vr]
if vu == nil {
continue
}
if warn := v.paramWarn(vr, vu); warn != "" {
warns = append(warns, warn)
}
}
return warns
}
func (v *visitor) paramWarn(vr *types.Var, vu *varUsage) string {
ifname, iftype := v.interfaceMatching(vr, vu)
if ifname == "" {
return ""
}
t := vr.Type()
if _, ok := t.Underlying().(*types.Interface); ok {
if ifname == t.String() {
return ""
}
if have := funcMapString(typeFuncMap(t)); have == iftype {
return ""
}
}
pos := v.fset.Position(vr.Pos())
fname := pos.Filename
// go/loader seems to like absolute paths
if rel, err := filepath.Rel(v.wd, fname); err == nil {
fname = rel
}
pname := v.Pkg.Path()
if strings.HasPrefix(ifname, pname+".") {
ifname = ifname[len(pname)+1:]
}
return fmt.Sprintf("%s:%d:%d: %s can be %s",
fname, pos.Line, pos.Column, vr.Name(), ifname)
}
Make warnings be just the bodies
The position prefix is added later. This simplifies the whole workflow
and makes it easier to add different warnings in the future.
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package interfacer
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/loader"
)
func toDiscard(vu *varUsage) bool {
if vu.discard {
return true
}
for to := range vu.assigned {
if toDiscard(to) {
return true
}
}
return false
}
func (v *visitor) interfaceMatching(vr *types.Var, vu *varUsage) (string, string) {
if toDiscard(vu) {
return "", ""
}
allFuncs := typeFuncMap(vr.Type())
if allFuncs == nil {
return "", ""
}
called := make(map[string]string, len(vu.calls))
for fname := range vu.calls {
called[fname] = allFuncs[fname]
}
s := funcMapString(called)
name := v.ifaceOf(s)
if name == "" {
return "", ""
}
return name, s
}
func orderedPkgs(prog *loader.Program) ([]*types.Package, error) {
// InitialPackages() is not in the order that we passed to it
// via Import() calls.
// For now, make it deterministic by sorting import paths
// alphabetically.
unordered := prog.InitialPackages()
paths := make([]string, 0, len(unordered))
byPath := make(map[string]*types.Package, len(unordered))
for _, info := range unordered {
if info.Errors != nil {
return nil, info.Errors[0]
}
path := info.Pkg.Path()
paths = append(paths, path)
byPath[path] = info.Pkg
}
sort.Sort(ByAlph(paths))
pkgs := make([]*types.Package, 0, len(unordered))
for _, path := range paths {
pkgs = append(pkgs, byPath[path])
}
return pkgs, nil
}
// relPathErr converts errors by go/types and go/loader that use
// absolute paths into errors with relative paths
func relPathErr(err error, wd string) error {
errStr := fmt.Sprintf("%v", err)
if strings.HasPrefix(errStr, wd) {
return fmt.Errorf(errStr[len(wd)+1:])
}
return err
}
// CheckArgs checks the packages specified by their import paths in
// args, and writes the results in w. Can give verbose output if
// specified, printing each package as it is checked.
func CheckArgs(args []string, w io.Writer, verbose bool) error {
wd, err := os.Getwd()
if err != nil {
return err
}
paths, err := recurse(args)
if err != nil {
return err
}
c := newCache()
rest, err := c.FromArgs(paths, false)
if err != nil {
return err
}
if len(rest) > 0 {
return fmt.Errorf("unwanted extra args: %v", rest)
}
prog, err := c.Load()
if err != nil {
return err
}
pkgs, err := orderedPkgs(prog)
if err != nil {
return relPathErr(err, wd)
}
c.typesGet(pkgs)
for _, pkg := range pkgs {
info := prog.AllPackages[pkg]
if verbose {
fmt.Fprintln(w, info.Pkg.Path())
}
v := &visitor{
cache: c,
PackageInfo: info,
wd: wd,
w: w,
fset: prog.Fset,
vars: make(map[*types.Var]*varUsage),
}
for _, f := range info.Files {
ast.Walk(v, f)
}
}
return nil
}
type varUsage struct {
calls map[string]struct{}
discard bool
assigned map[*varUsage]struct{}
}
type visitor struct {
*cache
*loader.PackageInfo
wd string
w io.Writer
fset *token.FileSet
signs []*types.Signature
warns [][]string
level int
vars map[*types.Var]*varUsage
}
func paramType(sign *types.Signature, i int) types.Type {
params := sign.Params()
extra := sign.Variadic() && i >= params.Len()-1
if !extra {
if i >= params.Len() {
// builtins with multiple signatures
return nil
}
return params.At(i).Type()
}
last := params.At(params.Len() - 1)
switch x := last.Type().(type) {
case *types.Slice:
return x.Elem()
default:
return x
}
}
func (v *visitor) varUsage(id *ast.Ident) *varUsage {
vr, ok := v.ObjectOf(id).(*types.Var)
if !ok {
return nil
}
if vu, e := v.vars[vr]; e {
return vu
}
vu := &varUsage{
calls: make(map[string]struct{}),
assigned: make(map[*varUsage]struct{}),
}
v.vars[vr] = vu
return vu
}
func (v *visitor) addUsed(id *ast.Ident, as types.Type) {
if as == nil {
return
}
vu := v.varUsage(id)
if vu == nil {
// not a variable
return
}
iface, ok := as.Underlying().(*types.Interface)
if !ok {
vu.discard = true
return
}
for i := 0; i < iface.NumMethods(); i++ {
m := iface.Method(i)
vu.calls[m.Name()] = struct{}{}
}
}
func (v *visitor) addAssign(to, from *ast.Ident) {
pto := v.varUsage(to)
pfrom := v.varUsage(from)
if pto == nil || pfrom == nil {
// either isn't a variable
return
}
pfrom.assigned[pto] = struct{}{}
}
func (v *visitor) discard(e ast.Expr) {
id, ok := e.(*ast.Ident)
if !ok {
return
}
vu := v.varUsage(id)
if vu == nil {
// not a variable
return
}
vu.discard = true
}
func (v *visitor) comparedWith(e ast.Expr, with ast.Expr) {
if _, ok := with.(*ast.BasicLit); ok {
v.discard(e)
}
}
func (v *visitor) implementsIface(sign *types.Signature) bool {
s := signString(sign)
return v.funcOf(s) != ""
}
func (v *visitor) Visit(node ast.Node) ast.Visitor {
var sign *types.Signature
switch x := node.(type) {
case *ast.FuncLit:
sign = v.Types[x].Type.(*types.Signature)
if v.implementsIface(sign) {
return nil
}
case *ast.FuncDecl:
sign = v.Defs[x.Name].Type().(*types.Signature)
if v.implementsIface(sign) {
return nil
}
case *ast.SelectorExpr:
if _, ok := v.TypeOf(x.Sel).(*types.Signature); !ok {
v.discard(x.X)
}
case *ast.UnaryExpr:
v.discard(x.X)
case *ast.IndexExpr:
v.discard(x.X)
case *ast.IncDecStmt:
v.discard(x.X)
case *ast.BinaryExpr:
v.onBinary(x)
case *ast.AssignStmt:
v.onAssign(x)
case *ast.KeyValueExpr:
v.onKeyValue(x)
case *ast.CompositeLit:
v.onComposite(x)
case *ast.CallExpr:
v.onCall(x)
case nil:
if top := v.signs[len(v.signs)-1]; top != nil {
v.funcEnded(top)
}
v.signs = v.signs[:len(v.signs)-1]
}
if node != nil {
v.signs = append(v.signs, sign)
if sign != nil {
v.level++
}
}
return v
}
func (v *visitor) onBinary(be *ast.BinaryExpr) {
switch be.Op {
case token.EQL, token.NEQ:
default:
v.discard(be.X)
v.discard(be.Y)
return
}
v.comparedWith(be.X, be.Y)
v.comparedWith(be.Y, be.X)
}
func (v *visitor) onAssign(as *ast.AssignStmt) {
for i, e := range as.Rhs {
id, ok := e.(*ast.Ident)
if !ok {
continue
}
left := as.Lhs[i]
v.addUsed(id, v.Types[left].Type)
if lid, ok := left.(*ast.Ident); ok {
v.addAssign(lid, id)
}
}
}
func (v *visitor) onKeyValue(kv *ast.KeyValueExpr) {
if id, ok := kv.Key.(*ast.Ident); ok {
v.addUsed(id, v.TypeOf(kv.Value))
}
if id, ok := kv.Value.(*ast.Ident); ok {
v.addUsed(id, v.TypeOf(kv.Key))
}
}
func (v *visitor) onComposite(cl *ast.CompositeLit) {
for _, e := range cl.Elts {
if kv, ok := e.(*ast.KeyValueExpr); ok {
v.onKeyValue(kv)
}
}
}
func (v *visitor) onCall(ce *ast.CallExpr) {
if sign, ok := v.TypeOf(ce.Fun).(*types.Signature); ok {
v.onMethodCall(ce, sign)
return
}
if len(ce.Args) == 1 {
v.discard(ce.Args[0])
}
}
func (v *visitor) onMethodCall(ce *ast.CallExpr, sign *types.Signature) {
for i, e := range ce.Args {
if id, ok := e.(*ast.Ident); ok {
v.addUsed(id, paramType(sign, i))
}
}
sel, ok := ce.Fun.(*ast.SelectorExpr)
if !ok {
return
}
left, ok := sel.X.(*ast.Ident)
if !ok {
return
}
vu := v.varUsage(left)
if vu == nil {
// not a variable
return
}
vu.calls[sel.Sel.Name] = struct{}{}
}
func (v *visitor) funcEnded(sign *types.Signature) {
v.level--
v.warns = append(v.warns, v.funcWarns(sign))
if v.level > 0 {
return
}
for i := len(v.warns) - 1; i >= 0; i-- {
warns := v.warns[i]
for _, warn := range warns {
fmt.Fprintln(v.w, warn)
}
}
v.warns = nil
v.vars = make(map[*types.Var]*varUsage)
}
func (v *visitor) funcWarns(sign *types.Signature) []string {
var warns []string
params := sign.Params()
for i := 0; i < params.Len(); i++ {
vr := params.At(i)
vu := v.vars[vr]
if vu == nil {
continue
}
warn := v.paramWarn(vr, vu)
if warn == "" {
continue
}
pos := v.fset.Position(vr.Pos())
fname := pos.Filename
// go/loader seems to like absolute paths
if rel, err := filepath.Rel(v.wd, fname); err == nil {
fname = rel
}
warns = append(warns, fmt.Sprintf("%s:%d:%d: %s",
fname, pos.Line, pos.Column, warn))
}
return warns
}
func (v *visitor) paramWarn(vr *types.Var, vu *varUsage) string {
ifname, iftype := v.interfaceMatching(vr, vu)
if ifname == "" {
return ""
}
t := vr.Type()
if _, ok := t.Underlying().(*types.Interface); ok {
if ifname == t.String() {
return ""
}
if have := funcMapString(typeFuncMap(t)); have == iftype {
return ""
}
}
pname := v.Pkg.Path()
if strings.HasPrefix(ifname, pname+".") {
ifname = ifname[len(pname)+1:]
}
return fmt.Sprintf("%s can be %s", vr.Name(), ifname)
}
|
package main
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
)
func check(paths, exclude []string, coverage, verbose bool) error {
excludeMap := make(map[string]struct{})
for _, ex := range exclude {
excludeMap[ex] = struct{}{}
}
var (
files []string
testDirs []string
testFileDirs []string
)
for _, path := range paths {
switch fi, err := os.Stat(path); {
case err != nil:
return err
case fi.IsDir():
if err := checkDir(path, excludeMap, verbose); err != nil {
return err
}
td, err := getTestDirs(path, excludeMap)
if err != nil {
return err
}
testDirs = append(testDirs, td...)
default:
files = append(files, path)
if strings.HasSuffix(path, "_test.go") {
testFileDirs = append(testFileDirs, filepath.Base(path))
}
}
}
if len(files) > 0 {
if err := checkFiles(files, verbose); err != nil {
return err
}
}
// go test
fileDirs := make(map[string]struct{})
for _, d := range testFileDirs {
fileDirs[d] = struct{}{}
}
var dirs []string
for d := range fileDirs {
dirs = append(dirs, d)
}
sort.Strings(dirs)
for _, d := range dirs {
if err := gotest(d, coverage, verbose); err != nil {
return err
}
}
for _, d := range testDirs {
err := gotest("."+string(filepath.Separator)+d, coverage, verbose)
if err != nil {
return err
}
}
return nil
}
func checkDir(path string, excludeMap map[string]struct{}, verbose bool) error {
fis, err := ioutil.ReadDir(path)
if err != nil {
return err
}
var files []string
for _, fi := range fis {
fn := fi.Name()
// ignore hidden directories and files
if !strings.HasPrefix(fn, ".") {
if fi.IsDir() {
_, ok := excludeMap[fn]
if !ok {
err := checkSubdir(filepath.Join(path, fn), verbose)
if err != nil {
return err
}
}
} else if strings.HasSuffix(fn, ".go") {
files = append(files, fn)
}
}
}
if len(files) > 0 {
if err := checkFiles(files, verbose); err != nil {
return err
}
}
return nil
}
func checkFiles(files []string, verbose bool) error {
if err := goimports(files, verbose); err != nil {
return err
}
if err := gofmt(files, verbose); err != nil {
return err
}
if err := golint(files, verbose); err != nil {
return err
}
return govet(files, verbose)
}
func checkSubdir(subdir string, verbose bool) error {
path := []string{subdir}
if err := goimports(path, verbose); err != nil {
return err
}
if err := gofmt(path, verbose); err != nil {
return err
}
if err := golint(path, verbose); err != nil {
return err
}
return govet(path, verbose)
}
func getTestDirs(path string, excludeMap map[string]struct{}) ([]string, error) {
fis, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var (
dirs []string
subDirs []string
include bool
)
for _, fi := range fis {
fn := fi.Name()
// ignore hidden directories and files
if !strings.HasPrefix(fn, ".") {
if fi.IsDir() {
_, ok := excludeMap[fn]
if !ok {
d, err := getTestDirs(filepath.Join(path, fn), excludeMap)
if err != nil {
return nil, err
}
subDirs = append(subDirs, d...)
}
} else if !include && strings.HasSuffix(fn, "_test.go") {
include = true
}
}
}
if include {
dirs = append(dirs, path)
}
dirs = append(dirs, subDirs...)
return dirs, nil
}
call `go vet` only from high-level directory
package main
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
)
func check(paths, exclude []string, coverage, verbose bool) error {
excludeMap := make(map[string]struct{})
for _, ex := range exclude {
excludeMap[ex] = struct{}{}
}
var (
files []string
testDirs []string
testFileDirs []string
)
for _, path := range paths {
switch fi, err := os.Stat(path); {
case err != nil:
return err
case fi.IsDir():
if err := checkDir(path, excludeMap, verbose); err != nil {
return err
}
td, err := getTestDirs(path, excludeMap)
if err != nil {
return err
}
testDirs = append(testDirs, td...)
if err := govet([]string{path}, verbose); err != nil {
return err
}
default:
files = append(files, path)
if strings.HasSuffix(path, "_test.go") {
testFileDirs = append(testFileDirs, filepath.Base(path))
}
}
}
if len(files) > 0 {
if err := checkFiles(files, verbose); err != nil {
return err
}
}
// go test
fileDirs := make(map[string]struct{})
for _, d := range testFileDirs {
fileDirs[d] = struct{}{}
}
var dirs []string
for d := range fileDirs {
dirs = append(dirs, d)
}
sort.Strings(dirs)
for _, d := range dirs {
if err := gotest(d, coverage, verbose); err != nil {
return err
}
}
for _, d := range testDirs {
err := gotest("."+string(filepath.Separator)+d, coverage, verbose)
if err != nil {
return err
}
}
return nil
}
func checkDir(path string, excludeMap map[string]struct{}, verbose bool) error {
fis, err := ioutil.ReadDir(path)
if err != nil {
return err
}
var files []string
for _, fi := range fis {
fn := fi.Name()
// ignore hidden directories and files
if !strings.HasPrefix(fn, ".") {
if fi.IsDir() {
_, ok := excludeMap[fn]
if !ok {
err := checkSubdir(filepath.Join(path, fn), verbose)
if err != nil {
return err
}
}
} else if strings.HasSuffix(fn, ".go") {
files = append(files, fn)
}
}
}
if len(files) > 0 {
if err := checkFiles(files, verbose); err != nil {
return err
}
}
return nil
}
func checkFiles(files []string, verbose bool) error {
if err := goimports(files, verbose); err != nil {
return err
}
if err := gofmt(files, verbose); err != nil {
return err
}
return golint(files, verbose)
}
func checkSubdir(subdir string, verbose bool) error {
path := []string{subdir}
if err := goimports(path, verbose); err != nil {
return err
}
if err := gofmt(path, verbose); err != nil {
return err
}
return golint(path, verbose)
}
func getTestDirs(path string, excludeMap map[string]struct{}) ([]string, error) {
fis, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var (
dirs []string
subDirs []string
include bool
)
for _, fi := range fis {
fn := fi.Name()
// ignore hidden directories and files
if !strings.HasPrefix(fn, ".") {
if fi.IsDir() {
_, ok := excludeMap[fn]
if !ok {
d, err := getTestDirs(filepath.Join(path, fn), excludeMap)
if err != nil {
return nil, err
}
subDirs = append(subDirs, d...)
}
} else if !include && strings.HasSuffix(fn, "_test.go") {
include = true
}
}
}
if include {
dirs = append(dirs, path)
}
dirs = append(dirs, subDirs...)
return dirs, nil
}
|
// 29 december 2012
package main
import (
"fmt"
"os"
"io"
"path/filepath"
"archive/zip"
"strconv"
"crypto/sha1"
"encoding/hex"
"bytes"
"log"
)
var sha1hash = sha1.New()
func crc32match(zipcrc uint32, gamecrc string) bool {
if gamecrc == "" { // assume lack of CRC32 means do not check
return true
}
n, err := strconv.ParseUint(gamecrc, 16, 32)
if err != nil {
log.Fatalf("string convert error reading crc32 (%q): %v", gamecrc, err)
}
return uint32(n) == zipcrc
}
func sha1check(zf *zip.File, expectstring string) (bool, error) {
expected, err := hex.DecodeString(expectstring)
if err != nil {
log.Fatalf("hex decode error reading sha1 (%q): %v", expectstring, err)
}
f, err := zf.Open()
if err != nil {
return false, fmt.Errorf("could not open given zip file entry: %v", err)
}
defer f.Close()
sha1hash.Reset()
n, err := io.Copy(sha1hash, f)
if err != nil {
return false, fmt.Errorf("could not read given zip file entry: %v", err)
}
// TODO could we have integer size/signedness conversion failure here? zf.UncompressedSize is not an int64
if n != int64(zf.UncompressedSize) {
return false, fmt.Errorf("short read from zip file or write to hash but no error returned (expected %d bytes; got %d)", int64(zf.UncompressedSize), n)
}
return bytes.Equal(expected, sha1hash.Sum(nil)), nil
}
func (g *Game) Filename(rompath string) string {
return filepath.Join(rompath, g.Name + ".zip")
}
func (g *Game) CheckIn(rompath string) (bool, error) {
zipname := g.Filename(rompath)
f, err := zip.OpenReader(zipname)
if os.IsNotExist(err) { // if the file does not exist, try the next rompath
return false, nil
}
if err != nil { // something different happened
return false, fmt.Errorf("could not open zip file %s: %v", zipname, err)
}
defer f.Close()
// populate list of ROMS
var roms = make(map[string]*ROM)
for i := range g.ROMs {
roms[g.ROMs[i].Name] = &(g.ROMs[i])
}
// now check
for _, file := range f.File {
rom, ok := roms[file.Name]
if !ok { // not in archive
return false, nil
}
if file.UncompressedSize != rom.Size {
return false, nil
}
if !crc32match(file.CRC32, rom.CRC32) {
return false, nil
}
good, err := sha1check(file, rom.SHA1)
if err != nil {
return false, fmt.Errorf("could not calculate SHA-1 sum of %s in %s: %v", g.Name, zipname, err)
}
if !good {
return false, nil
}
delete(roms, file.Name) // mark as done
}
// if we reached here everything we know about checked out, so if there are any leftover files in the game, that means something is wrong
return len(roms) == 0, nil
}
Split individual zip file checking into its own function, again for parent/clone handling.
// 29 december 2012
package main
import (
"fmt"
"os"
"io"
"path/filepath"
"archive/zip"
"strconv"
"crypto/sha1"
"encoding/hex"
"bytes"
"log"
)
var sha1hash = sha1.New()
func crc32match(zipcrc uint32, gamecrc string) bool {
if gamecrc == "" { // assume lack of CRC32 means do not check
return true
}
n, err := strconv.ParseUint(gamecrc, 16, 32)
if err != nil {
log.Fatalf("string convert error reading crc32 (%q): %v", gamecrc, err)
}
return uint32(n) == zipcrc
}
func sha1check(zf *zip.File, expectstring string) (bool, error) {
expected, err := hex.DecodeString(expectstring)
if err != nil {
log.Fatalf("hex decode error reading sha1 (%q): %v", expectstring, err)
}
f, err := zf.Open()
if err != nil {
return false, fmt.Errorf("could not open given zip file entry: %v", err)
}
defer f.Close()
sha1hash.Reset()
n, err := io.Copy(sha1hash, f)
if err != nil {
return false, fmt.Errorf("could not read given zip file entry: %v", err)
}
// TODO could we have integer size/signedness conversion failure here? zf.UncompressedSize is not an int64
if n != int64(zf.UncompressedSize) {
return false, fmt.Errorf("short read from zip file or write to hash but no error returned (expected %d bytes; got %d)", int64(zf.UncompressedSize), n)
}
return bytes.Equal(expected, sha1hash.Sum(nil)), nil
}
func (g *Game) Filename(rompath string) string {
return filepath.Join(rompath, g.Name + ".zip")
}
func (g *Game) checkOneZip(zipname string, roms map[string]*ROM) (bool, error) {
f, err := zip.OpenReader(zipname)
if os.IsNotExist(err) { // if the file does not exist, try the next rompath
return false, nil
}
if err != nil { // something different happened
return false, fmt.Errorf("could not open zip file %s: %v", zipname, err)
}
defer f.Close()
for _, file := range f.File {
rom, ok := roms[file.Name]
if !ok { // not in archive
return false, nil
}
if file.UncompressedSize != rom.Size {
return false, nil
}
if !crc32match(file.CRC32, rom.CRC32) {
return false, nil
}
good, err := sha1check(file, rom.SHA1)
if err != nil {
return false, fmt.Errorf("could not calculate SHA-1 sum of %s in %s: %v", g.Name, zipname, err)
}
if !good {
return false, nil
}
delete(roms, file.Name) // mark as done
}
return true, nil // all clear on this one
}
func (g *Game) CheckIn(rompath string) (bool, error) {
// populate list of ROMS
var roms = make(map[string]*ROM)
for i := range g.ROMs {
roms[g.ROMs[i].Name] = &(g.ROMs[i])
}
zipname := g.Filename(rompath)
good, err := g.checkOneZip(zipname, roms)
if err != nil {
return false, err
} else if !good {
return false, nil
}
// if we reached here everything we know about checked out, so if there are any leftover files in the game, that means something is wrong
return len(roms) == 0, nil
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package web defines minimal helper routines for accessing HTTP/HTTPS
// resources without requiring external dependenicies on the net package.
//
// If the cmd_go_bootstrap build tag is present, web avoids the use of the net
// package and returns errors for all network operations.
package web
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"strings"
"unicode"
"unicode/utf8"
)
// SecurityMode specifies whether a function should make network
// calls using insecure transports (eg, plain text HTTP).
// The zero value is "secure".
type SecurityMode int
const (
SecureOnly SecurityMode = iota // Reject plain HTTP; validate HTTPS.
DefaultSecurity // Allow plain HTTP if explicit; validate HTTPS.
Insecure // Allow plain HTTP if not explicitly HTTPS; skip HTTPS validation.
)
// An HTTPError describes an HTTP error response (non-200 result).
type HTTPError struct {
URL string // redacted
Status string
StatusCode int
Err error // underlying error, if known
Detail string // limited to maxErrorDetailLines and maxErrorDetailBytes
}
const (
maxErrorDetailLines = 8
maxErrorDetailBytes = maxErrorDetailLines * 81
)
func (e *HTTPError) Error() string {
if e.Detail != "" {
detailSep := " "
if strings.ContainsRune(e.Detail, '\n') {
detailSep = "\n\t"
}
return fmt.Sprintf("reading %s: %v\n\tserver response:%s%s", e.URL, e.Status, detailSep, e.Detail)
}
if err := e.Err; err != nil {
if pErr, ok := e.Err.(*os.PathError); ok && strings.HasSuffix(e.URL, pErr.Path) {
// Remove the redundant copy of the path.
err = pErr.Err
}
return fmt.Sprintf("reading %s: %v", e.URL, err)
}
return fmt.Sprintf("reading %s: %v", e.URL, e.Status)
}
func (e *HTTPError) Is(target error) bool {
return target == os.ErrNotExist && (e.StatusCode == 404 || e.StatusCode == 410)
}
func (e *HTTPError) Unwrap() error {
return e.Err
}
// GetBytes returns the body of the requested resource, or an error if the
// response status was not http.StatusOK.
//
// GetBytes is a convenience wrapper around Get and Response.Err.
func GetBytes(u *url.URL) ([]byte, error) {
resp, err := Get(DefaultSecurity, u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := resp.Err(); err != nil {
return nil, err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading %s: %v", Redacted(u), err)
}
return b, nil
}
type Response struct {
URL string // redacted
Status string
StatusCode int
Header map[string][]string
Body io.ReadCloser // Either the original body or &errorDetail.
fileErr error
errorDetail errorDetailBuffer
}
// Err returns an *HTTPError corresponding to the response r.
// If the response r has StatusCode 200 or 0 (unset), Err returns nil.
// Otherwise, Err may read from r.Body in order to extract relevant error detail.
func (r *Response) Err() error {
if r.StatusCode == 200 || r.StatusCode == 0 {
return nil
}
return &HTTPError{
URL: r.URL,
Status: r.Status,
StatusCode: r.StatusCode,
Err: r.fileErr,
Detail: r.formatErrorDetail(),
}
}
// formatErrorDetail converts r.errorDetail (a prefix of the output of r.Body)
// into a short, tab-indented summary.
func (r *Response) formatErrorDetail() string {
if r.Body != &r.errorDetail {
return "" // Error detail collection not enabled.
}
// Ensure that r.errorDetail has been populated.
_, _ = io.Copy(ioutil.Discard, r.Body)
s := r.errorDetail.buf.String()
if !utf8.ValidString(s) {
return "" // Don't try to recover non-UTF-8 error messages.
}
for _, r := range s {
if !unicode.IsGraphic(r) && !unicode.IsSpace(r) {
return "" // Don't let the server do any funny business with the user's terminal.
}
}
var detail strings.Builder
for i, line := range strings.Split(s, "\n") {
if strings.TrimSpace(line) == "" {
break // Stop at the first blank line.
}
if i > 0 {
detail.WriteString("\n\t")
}
if i >= maxErrorDetailLines {
detail.WriteString("[Truncated: too many lines.]")
break
}
if detail.Len()+len(line) > maxErrorDetailBytes {
detail.WriteString("[Truncated: too long.]")
break
}
detail.WriteString(line)
}
return detail.String()
}
// Get returns the body of the HTTP or HTTPS resource specified at the given URL.
//
// If the URL does not include an explicit scheme, Get first tries "https".
// If the server does not respond under that scheme and the security mode is
// Insecure, Get then tries "http".
// The URL included in the response indicates which scheme was actually used,
// and it is a redacted URL suitable for use in error messages.
//
// For the "https" scheme only, credentials are attached using the
// cmd/go/internal/auth package. If the URL itself includes a username and
// password, it will not be attempted under the "http" scheme unless the
// security mode is Insecure.
//
// Get returns a non-nil error only if the request did not receive a response
// under any applicable scheme. (A non-2xx response does not cause an error.)
func Get(security SecurityMode, u *url.URL) (*Response, error) {
return get(security, u)
}
// Redacted returns a redacted string form of the URL,
// suitable for printing in error messages.
// The string form replaces any non-empty password
// in the original URL with "[redacted]".
func Redacted(u *url.URL) string {
if u.User != nil {
if _, ok := u.User.Password(); ok {
redacted := *u
redacted.User = url.UserPassword(u.User.Username(), "[redacted]")
u = &redacted
}
}
return u.String()
}
// OpenBrowser attempts to open the requested URL in a web browser.
func OpenBrowser(url string) (opened bool) {
return openBrowser(url)
}
// Join returns the result of adding the slash-separated
// path elements to the end of u's path.
func Join(u *url.URL, path string) *url.URL {
j := *u
if path == "" {
return &j
}
j.Path = strings.TrimSuffix(u.Path, "/") + "/" + strings.TrimPrefix(path, "/")
j.RawPath = strings.TrimSuffix(u.RawPath, "/") + "/" + strings.TrimPrefix(path, "/")
return &j
}
// An errorDetailBuffer is an io.ReadCloser that copies up to
// maxErrorDetailLines into a buffer for later inspection.
type errorDetailBuffer struct {
r io.ReadCloser
buf strings.Builder
bufLines int
}
func (b *errorDetailBuffer) Close() error {
return b.r.Close()
}
func (b *errorDetailBuffer) Read(p []byte) (n int, err error) {
n, err = b.r.Read(p)
// Copy the first maxErrorDetailLines+1 lines into b.buf,
// discarding any further lines.
//
// Note that the read may begin or end in the middle of a UTF-8 character,
// so don't try to do anything fancy with characters that encode to larger
// than one byte.
if b.bufLines <= maxErrorDetailLines {
for _, line := range bytes.SplitAfterN(p[:n], []byte("\n"), maxErrorDetailLines-b.bufLines) {
b.buf.Write(line)
if len(line) > 0 && line[len(line)-1] == '\n' {
b.bufLines++
if b.bufLines > maxErrorDetailLines {
break
}
}
}
}
return n, err
}
cmd/go/internal/web: fix a typo
dependenicies -> dependencies
Change-Id: I0b8f06c04cf397c6330ffb43ac3ae5c2f7cf3138
Reviewed-on: https://go-review.googlesource.com/c/go/+/219157
Reviewed-by: Ian Lance Taylor <87e9c6d529889242b7e184afb632328636553ab4@golang.org>
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package web defines minimal helper routines for accessing HTTP/HTTPS
// resources without requiring external dependencies on the net package.
//
// If the cmd_go_bootstrap build tag is present, web avoids the use of the net
// package and returns errors for all network operations.
package web
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"strings"
"unicode"
"unicode/utf8"
)
// SecurityMode specifies whether a function should make network
// calls using insecure transports (eg, plain text HTTP).
// The zero value is "secure".
type SecurityMode int
const (
SecureOnly SecurityMode = iota // Reject plain HTTP; validate HTTPS.
DefaultSecurity // Allow plain HTTP if explicit; validate HTTPS.
Insecure // Allow plain HTTP if not explicitly HTTPS; skip HTTPS validation.
)
// An HTTPError describes an HTTP error response (non-200 result).
type HTTPError struct {
URL string // redacted
Status string
StatusCode int
Err error // underlying error, if known
Detail string // limited to maxErrorDetailLines and maxErrorDetailBytes
}
const (
maxErrorDetailLines = 8
maxErrorDetailBytes = maxErrorDetailLines * 81
)
func (e *HTTPError) Error() string {
if e.Detail != "" {
detailSep := " "
if strings.ContainsRune(e.Detail, '\n') {
detailSep = "\n\t"
}
return fmt.Sprintf("reading %s: %v\n\tserver response:%s%s", e.URL, e.Status, detailSep, e.Detail)
}
if err := e.Err; err != nil {
if pErr, ok := e.Err.(*os.PathError); ok && strings.HasSuffix(e.URL, pErr.Path) {
// Remove the redundant copy of the path.
err = pErr.Err
}
return fmt.Sprintf("reading %s: %v", e.URL, err)
}
return fmt.Sprintf("reading %s: %v", e.URL, e.Status)
}
func (e *HTTPError) Is(target error) bool {
return target == os.ErrNotExist && (e.StatusCode == 404 || e.StatusCode == 410)
}
func (e *HTTPError) Unwrap() error {
return e.Err
}
// GetBytes returns the body of the requested resource, or an error if the
// response status was not http.StatusOK.
//
// GetBytes is a convenience wrapper around Get and Response.Err.
func GetBytes(u *url.URL) ([]byte, error) {
resp, err := Get(DefaultSecurity, u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := resp.Err(); err != nil {
return nil, err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading %s: %v", Redacted(u), err)
}
return b, nil
}
type Response struct {
URL string // redacted
Status string
StatusCode int
Header map[string][]string
Body io.ReadCloser // Either the original body or &errorDetail.
fileErr error
errorDetail errorDetailBuffer
}
// Err returns an *HTTPError corresponding to the response r.
// If the response r has StatusCode 200 or 0 (unset), Err returns nil.
// Otherwise, Err may read from r.Body in order to extract relevant error detail.
func (r *Response) Err() error {
if r.StatusCode == 200 || r.StatusCode == 0 {
return nil
}
return &HTTPError{
URL: r.URL,
Status: r.Status,
StatusCode: r.StatusCode,
Err: r.fileErr,
Detail: r.formatErrorDetail(),
}
}
// formatErrorDetail converts r.errorDetail (a prefix of the output of r.Body)
// into a short, tab-indented summary.
func (r *Response) formatErrorDetail() string {
if r.Body != &r.errorDetail {
return "" // Error detail collection not enabled.
}
// Ensure that r.errorDetail has been populated.
_, _ = io.Copy(ioutil.Discard, r.Body)
s := r.errorDetail.buf.String()
if !utf8.ValidString(s) {
return "" // Don't try to recover non-UTF-8 error messages.
}
for _, r := range s {
if !unicode.IsGraphic(r) && !unicode.IsSpace(r) {
return "" // Don't let the server do any funny business with the user's terminal.
}
}
var detail strings.Builder
for i, line := range strings.Split(s, "\n") {
if strings.TrimSpace(line) == "" {
break // Stop at the first blank line.
}
if i > 0 {
detail.WriteString("\n\t")
}
if i >= maxErrorDetailLines {
detail.WriteString("[Truncated: too many lines.]")
break
}
if detail.Len()+len(line) > maxErrorDetailBytes {
detail.WriteString("[Truncated: too long.]")
break
}
detail.WriteString(line)
}
return detail.String()
}
// Get returns the body of the HTTP or HTTPS resource specified at the given URL.
//
// If the URL does not include an explicit scheme, Get first tries "https".
// If the server does not respond under that scheme and the security mode is
// Insecure, Get then tries "http".
// The URL included in the response indicates which scheme was actually used,
// and it is a redacted URL suitable for use in error messages.
//
// For the "https" scheme only, credentials are attached using the
// cmd/go/internal/auth package. If the URL itself includes a username and
// password, it will not be attempted under the "http" scheme unless the
// security mode is Insecure.
//
// Get returns a non-nil error only if the request did not receive a response
// under any applicable scheme. (A non-2xx response does not cause an error.)
func Get(security SecurityMode, u *url.URL) (*Response, error) {
return get(security, u)
}
// Redacted returns a redacted string form of the URL,
// suitable for printing in error messages.
// The string form replaces any non-empty password
// in the original URL with "[redacted]".
func Redacted(u *url.URL) string {
if u.User != nil {
if _, ok := u.User.Password(); ok {
redacted := *u
redacted.User = url.UserPassword(u.User.Username(), "[redacted]")
u = &redacted
}
}
return u.String()
}
// OpenBrowser attempts to open the requested URL in a web browser.
func OpenBrowser(url string) (opened bool) {
return openBrowser(url)
}
// Join returns the result of adding the slash-separated
// path elements to the end of u's path.
func Join(u *url.URL, path string) *url.URL {
j := *u
if path == "" {
return &j
}
j.Path = strings.TrimSuffix(u.Path, "/") + "/" + strings.TrimPrefix(path, "/")
j.RawPath = strings.TrimSuffix(u.RawPath, "/") + "/" + strings.TrimPrefix(path, "/")
return &j
}
// An errorDetailBuffer is an io.ReadCloser that copies up to
// maxErrorDetailLines into a buffer for later inspection.
type errorDetailBuffer struct {
r io.ReadCloser
buf strings.Builder
bufLines int
}
func (b *errorDetailBuffer) Close() error {
return b.r.Close()
}
func (b *errorDetailBuffer) Read(p []byte) (n int, err error) {
n, err = b.r.Read(p)
// Copy the first maxErrorDetailLines+1 lines into b.buf,
// discarding any further lines.
//
// Note that the read may begin or end in the middle of a UTF-8 character,
// so don't try to do anything fancy with characters that encode to larger
// than one byte.
if b.bufLines <= maxErrorDetailLines {
for _, line := range bytes.SplitAfterN(p[:n], []byte("\n"), maxErrorDetailLines-b.bufLines) {
b.buf.Write(line)
if len(line) > 0 && line[len(line)-1] == '\n' {
b.bufLines++
if b.bufLines > maxErrorDetailLines {
break
}
}
}
}
return n, err
}
|
package xpc
/*
#include "xpc_wrapper.h"
*/
import "C"
import (
"errors"
"fmt"
"log"
"reflect"
"strings"
"unsafe"
)
type XPC struct {
conn C.xpc_connection_t
}
func (x *XPC) Send(msg interface{}, verbose bool) {
// verbose == true converts the type from bool to C._Bool
C.XpcSendMessage(x.conn, goToXpc(msg), true, verbose == true)
}
//
// minimal XPC support required for BLE
//
// a dictionary of things
type Dict map[string]interface{}
func (d Dict) Contains(k string) bool {
_, ok := d[k]
return ok
}
func (d Dict) MustGetDict(k string) Dict {
return d[k].(Dict)
}
func (d Dict) MustGetArray(k string) Array {
return d[k].(Array)
}
func (d Dict) MustGetBytes(k string) []byte {
return d[k].([]byte)
}
func (d Dict) MustGetHexBytes(k string) string {
return fmt.Sprintf("%x", d[k].([]byte))
}
func (d Dict) MustGetInt(k string) int {
return int(d[k].(int64))
}
func (d Dict) MustGetUUID(k string) UUID {
return d[k].(UUID)
}
func (d Dict) GetString(k, defv string) string {
if v := d[k]; v != nil {
//log.Printf("GetString %s %#v\n", k, v)
return v.(string)
} else {
//log.Printf("GetString %s default %#v\n", k, defv)
return defv
}
}
func (d Dict) GetBytes(k string, defv []byte) []byte {
if v := d[k]; v != nil {
//log.Printf("GetBytes %s %#v\n", k, v)
return v.([]byte)
} else {
//log.Printf("GetBytes %s default %#v\n", k, defv)
return defv
}
}
func (d Dict) GetInt(k string, defv int) int {
if v := d[k]; v != nil {
//log.Printf("GetString %s %#v\n", k, v)
return int(v.(int64))
} else {
//log.Printf("GetString %s default %#v\n", k, defv)
return defv
}
}
func (d Dict) GetUUID(k string) UUID {
return GetUUID(d[k])
}
// an Array of things
type Array []interface{}
func (a Array) GetUUID(k int) UUID {
return GetUUID(a[k])
}
// a UUID
type UUID [16]byte
func MakeUUID(s string) UUID {
var sl []byte
s = strings.Replace(s, "-", "", -1)
fmt.Sscanf(s, "%32x", &sl)
var uuid [16]byte
copy(uuid[:], sl)
return UUID(uuid)
}
func MustUUID(s string) UUID {
var sl []byte
s = strings.Replace(s, "-", "", -1)
if len(s) != 32 {
log.Fatal("invalid UUID")
}
if n, err := fmt.Sscanf(s, "%32x", &sl); err != nil || n != 1 {
log.Fatal("invalid UUID ", s, " len ", n, " error ", err)
}
var uuid [16]byte
copy(uuid[:], sl)
return UUID(uuid)
}
func (uuid UUID) String() string {
return fmt.Sprintf("%x", [16]byte(uuid))
}
func GetUUID(v interface{}) UUID {
if v == nil {
return UUID{}
}
if uuid, ok := v.(UUID); ok {
return uuid
}
if bytes, ok := v.([]byte); ok {
uuid := UUID{}
for i, b := range bytes {
uuid[i] = b
}
return uuid
}
if bytes, ok := v.([]uint8); ok {
uuid := UUID{}
for i, b := range bytes {
uuid[i] = b
}
return uuid
}
log.Fatalf("invalid type for UUID: %#v", v)
return UUID{}
}
var (
CONNECTION_INVALID = errors.New("connection invalid")
CONNECTION_INTERRUPTED = errors.New("connection interrupted")
CONNECTION_TERMINATED = errors.New("connection terminated")
TYPE_OF_UUID = reflect.TypeOf(UUID{})
TYPE_OF_BYTES = reflect.TypeOf([]byte{})
handlers = map[uintptr]XpcEventHandler{}
)
type XpcEventHandler interface {
HandleXpcEvent(event Dict, err error)
}
func XpcConnect(service string, eh XpcEventHandler) XPC {
// func XpcConnect(service string, eh XpcEventHandler) C.xpc_connection_t {
ctx := uintptr(unsafe.Pointer(&eh))
handlers[ctx] = eh
cservice := C.CString(service)
defer C.free(unsafe.Pointer(cservice))
// return C.XpcConnect(cservice, C.uintptr_t(ctx))
return XPC{conn: C.XpcConnect(cservice, C.uintptr_t(ctx))}
}
//export handleXpcEvent
func handleXpcEvent(event C.xpc_object_t, p C.ulong) {
//log.Printf("handleXpcEvent %#v %#v\n", event, p)
t := C.xpc_get_type(event)
eh := handlers[uintptr(p)]
if eh == nil {
//log.Println("no handler for", p)
return
}
if t == C.TYPE_ERROR {
if event == C.ERROR_CONNECTION_INVALID {
// The client process on the other end of the connection has either
// crashed or cancelled the connection. After receiving this error,
// the connection is in an invalid state, and you do not need to
// call xpc_connection_cancel(). Just tear down any associated state
// here.
//log.Println("connection invalid")
eh.HandleXpcEvent(nil, CONNECTION_INVALID)
} else if event == C.ERROR_CONNECTION_INTERRUPTED {
//log.Println("connection interrupted")
eh.HandleXpcEvent(nil, CONNECTION_INTERRUPTED)
} else if event == C.ERROR_CONNECTION_TERMINATED {
// Handle per-connection termination cleanup.
//log.Println("connection terminated")
eh.HandleXpcEvent(nil, CONNECTION_TERMINATED)
} else {
//log.Println("got some error", event)
eh.HandleXpcEvent(nil, fmt.Errorf("%v", event))
}
} else {
eh.HandleXpcEvent(xpcToGo(event).(Dict), nil)
}
}
// goToXpc converts a go object to an xpc object
func goToXpc(o interface{}) C.xpc_object_t {
return valueToXpc(reflect.ValueOf(o))
}
// valueToXpc converts a go Value to an xpc object
//
// note that not all the types are supported, but only the subset required for Blued
func valueToXpc(val reflect.Value) C.xpc_object_t {
if !val.IsValid() {
return nil
}
var xv C.xpc_object_t
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
xv = C.xpc_int64_create(C.int64_t(val.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
xv = C.xpc_int64_create(C.int64_t(val.Uint()))
case reflect.String:
xv = C.xpc_string_create(C.CString(val.String()))
case reflect.Map:
xv = C.xpc_dictionary_create(nil, nil, 0)
for _, k := range val.MapKeys() {
v := valueToXpc(val.MapIndex(k))
C.xpc_dictionary_set_value(xv, C.CString(k.String()), v)
if v != nil {
C.xpc_release(v)
}
}
case reflect.Array, reflect.Slice:
if val.Type() == TYPE_OF_UUID {
// Array of bytes
var uuid [16]byte
reflect.Copy(reflect.ValueOf(uuid[:]), val)
xv = C.xpc_uuid_create(C.ptr_to_uuid(unsafe.Pointer(&uuid[0])))
} else if val.Type() == TYPE_OF_BYTES {
// slice of bytes
xv = C.xpc_data_create(unsafe.Pointer(val.Pointer()), C.size_t(val.Len()))
} else {
xv = C.xpc_array_create(nil, 0)
l := val.Len()
for i := 0; i < l; i++ {
v := valueToXpc(val.Index(i))
C.xpc_array_append_value(xv, v)
if v != nil {
C.xpc_release(v)
}
}
}
case reflect.Interface, reflect.Ptr:
xv = valueToXpc(val.Elem())
default:
log.Fatalf("unsupported %#v", val.String())
}
return xv
}
//export arraySet
func arraySet(u C.uintptr_t, i C.int, v C.xpc_object_t) {
a := *(*Array)(unsafe.Pointer(uintptr(u)))
a[i] = xpcToGo(v)
}
//export dictSet
func dictSet(u C.uintptr_t, k *C.char, v C.xpc_object_t) {
d := *(*Dict)(unsafe.Pointer(uintptr(u)))
d[C.GoString(k)] = xpcToGo(v)
}
// xpcToGo converts an xpc object to a go object
//
// note that not all the types are supported, but only the subset required for Blued
func xpcToGo(v C.xpc_object_t) interface{} {
t := C.xpc_get_type(v)
switch t {
case C.TYPE_ARRAY:
a := make(Array, C.int(C.xpc_array_get_count(v)))
p := uintptr(unsafe.Pointer(&a))
C.XpcArrayApply(C.uintptr_t(p), v)
return a
case C.TYPE_DATA:
return C.GoBytes(C.xpc_data_get_bytes_ptr(v), C.int(C.xpc_data_get_length(v)))
case C.TYPE_DICT:
d := make(Dict)
p := uintptr(unsafe.Pointer(&d))
C.XpcDictApply(C.uintptr_t(p), v)
return d
case C.TYPE_INT64:
return int64(C.xpc_int64_get_value(v))
case C.TYPE_STRING:
return C.GoString(C.xpc_string_get_string_ptr(v))
case C.TYPE_UUID:
a := [16]byte{}
C.XpcUUIDGetBytes(unsafe.Pointer(&a), v)
return UUID(a)
default:
log.Fatalf("unexpected type %#v, value %#v", t, v)
}
return nil
}
// xpc_release is needed by tests, since they can't use CGO
func xpc_release(xv C.xpc_object_t) {
C.xpc_release(xv)
}
// this is used to check the OS version
type Utsname struct {
Sysname string
Nodename string
Release string
Version string
Machine string
}
func Uname(utsname *Utsname) error {
var cstruct C.struct_utsname
if err := C.uname(&cstruct); err != 0 {
return errors.New("utsname error")
}
// XXX: this may crash if any value is exactly 256 characters (no 0 terminator)
utsname.Sysname = C.GoString(&cstruct.sysname[0])
utsname.Nodename = C.GoString(&cstruct.nodename[0])
utsname.Release = C.GoString(&cstruct.release[0])
utsname.Version = C.GoString(&cstruct.version[0])
utsname.Machine = C.GoString(&cstruct.machine[0])
return nil
}
Cleanup
package xpc
/*
#include "xpc_wrapper.h"
*/
import "C"
import (
"errors"
"fmt"
"log"
"reflect"
"strings"
"unsafe"
)
type XPC struct {
conn C.xpc_connection_t
}
func (x *XPC) Send(msg interface{}, verbose bool) {
// verbose == true converts the type from bool to C._Bool
C.XpcSendMessage(x.conn, goToXpc(msg), true, verbose == true)
}
//
// minimal XPC support required for BLE
//
// a dictionary of things
type Dict map[string]interface{}
func (d Dict) Contains(k string) bool {
_, ok := d[k]
return ok
}
func (d Dict) MustGetDict(k string) Dict {
return d[k].(Dict)
}
func (d Dict) MustGetArray(k string) Array {
return d[k].(Array)
}
func (d Dict) MustGetBytes(k string) []byte {
return d[k].([]byte)
}
func (d Dict) MustGetHexBytes(k string) string {
return fmt.Sprintf("%x", d[k].([]byte))
}
func (d Dict) MustGetInt(k string) int {
return int(d[k].(int64))
}
func (d Dict) MustGetUUID(k string) UUID {
return d[k].(UUID)
}
func (d Dict) GetString(k, defv string) string {
if v := d[k]; v != nil {
//log.Printf("GetString %s %#v\n", k, v)
return v.(string)
}
//log.Printf("GetString %s default %#v\n", k, defv)
return defv
}
func (d Dict) GetBytes(k string, defv []byte) []byte {
if v := d[k]; v != nil {
//log.Printf("GetBytes %s %#v\n", k, v)
return v.([]byte)
}
//log.Printf("GetBytes %s default %#v\n", k, defv)
return defv
}
func (d Dict) GetInt(k string, defv int) int {
if v := d[k]; v != nil {
//log.Printf("GetString %s %#v\n", k, v)
return int(v.(int64))
}
//log.Printf("GetString %s default %#v\n", k, defv)
return defv
}
func (d Dict) GetUUID(k string) UUID {
return GetUUID(d[k])
}
// an Array of things
type Array []interface{}
func (a Array) GetUUID(k int) UUID {
return GetUUID(a[k])
}
// a UUID
type UUID [16]byte
func MakeUUID(s string) UUID {
var sl []byte
s = strings.Replace(s, "-", "", -1)
fmt.Sscanf(s, "%32x", &sl)
var uuid [16]byte
copy(uuid[:], sl)
return UUID(uuid)
}
func MustUUID(s string) UUID {
var sl []byte
s = strings.Replace(s, "-", "", -1)
if len(s) != 32 {
log.Fatal("invalid UUID")
}
if n, err := fmt.Sscanf(s, "%32x", &sl); err != nil || n != 1 {
log.Fatal("invalid UUID ", s, " len ", n, " error ", err)
}
var uuid [16]byte
copy(uuid[:], sl)
return UUID(uuid)
}
func (uuid UUID) String() string {
return fmt.Sprintf("%x", [16]byte(uuid))
}
func GetUUID(v interface{}) UUID {
if v == nil {
return UUID{}
}
if uuid, ok := v.(UUID); ok {
return uuid
}
if bytes, ok := v.([]byte); ok {
uuid := UUID{}
for i, b := range bytes {
uuid[i] = b
}
return uuid
}
if bytes, ok := v.([]uint8); ok {
uuid := UUID{}
for i, b := range bytes {
uuid[i] = b
}
return uuid
}
log.Fatalf("invalid type for UUID: %#v", v)
return UUID{}
}
var (
CONNECTION_INVALID = errors.New("connection invalid")
CONNECTION_INTERRUPTED = errors.New("connection interrupted")
CONNECTION_TERMINATED = errors.New("connection terminated")
TYPE_OF_UUID = reflect.TypeOf(UUID{})
TYPE_OF_BYTES = reflect.TypeOf([]byte{})
handlers = map[uintptr]XpcEventHandler{}
)
type XpcEventHandler interface {
HandleXpcEvent(event Dict, err error)
}
func XpcConnect(service string, eh XpcEventHandler) XPC {
// func XpcConnect(service string, eh XpcEventHandler) C.xpc_connection_t {
ctx := uintptr(unsafe.Pointer(&eh))
handlers[ctx] = eh
cservice := C.CString(service)
defer C.free(unsafe.Pointer(cservice))
// return C.XpcConnect(cservice, C.uintptr_t(ctx))
return XPC{conn: C.XpcConnect(cservice, C.uintptr_t(ctx))}
}
//export handleXpcEvent
func handleXpcEvent(event C.xpc_object_t, p C.ulong) {
//log.Printf("handleXpcEvent %#v %#v\n", event, p)
t := C.xpc_get_type(event)
eh := handlers[uintptr(p)]
if eh == nil {
//log.Println("no handler for", p)
return
}
if t == C.TYPE_ERROR {
switch event {
case C.ERROR_CONNECTION_INVALID:
// The client process on the other end of the connection has either
// crashed or cancelled the connection. After receiving this error,
// the connection is in an invalid state, and you do not need to
// call xpc_connection_cancel(). Just tear down any associated state
// here.
//log.Println("connection invalid")
eh.HandleXpcEvent(nil, CONNECTION_INVALID)
case C.ERROR_CONNECTION_INTERRUPTED:
//log.Println("connection interrupted")
eh.HandleXpcEvent(nil, CONNECTION_INTERRUPTED)
case C.ERROR_CONNECTION_TERMINATED:
// Handle per-connection termination cleanup.
//log.Println("connection terminated")
eh.HandleXpcEvent(nil, CONNECTION_TERMINATED)
default:
//log.Println("got some error", event)
eh.HandleXpcEvent(nil, fmt.Errorf("%v", event))
}
} else {
eh.HandleXpcEvent(xpcToGo(event).(Dict), nil)
}
}
// goToXpc converts a go object to an xpc object
func goToXpc(o interface{}) C.xpc_object_t {
return valueToXpc(reflect.ValueOf(o))
}
// valueToXpc converts a go Value to an xpc object
//
// note that not all the types are supported, but only the subset required for Blued
func valueToXpc(val reflect.Value) C.xpc_object_t {
if !val.IsValid() {
return nil
}
var xv C.xpc_object_t
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
xv = C.xpc_int64_create(C.int64_t(val.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
xv = C.xpc_int64_create(C.int64_t(val.Uint()))
case reflect.String:
xv = C.xpc_string_create(C.CString(val.String()))
case reflect.Map:
xv = C.xpc_dictionary_create(nil, nil, 0)
for _, k := range val.MapKeys() {
v := valueToXpc(val.MapIndex(k))
C.xpc_dictionary_set_value(xv, C.CString(k.String()), v)
if v != nil {
C.xpc_release(v)
}
}
case reflect.Array, reflect.Slice:
if val.Type() == TYPE_OF_UUID {
// Array of bytes
var uuid [16]byte
reflect.Copy(reflect.ValueOf(uuid[:]), val)
xv = C.xpc_uuid_create(C.ptr_to_uuid(unsafe.Pointer(&uuid[0])))
} else if val.Type() == TYPE_OF_BYTES {
// slice of bytes
xv = C.xpc_data_create(unsafe.Pointer(val.Pointer()), C.size_t(val.Len()))
} else {
xv = C.xpc_array_create(nil, 0)
l := val.Len()
for i := 0; i < l; i++ {
v := valueToXpc(val.Index(i))
C.xpc_array_append_value(xv, v)
if v != nil {
C.xpc_release(v)
}
}
}
case reflect.Interface, reflect.Ptr:
xv = valueToXpc(val.Elem())
default:
log.Fatalf("unsupported %#v", val.String())
}
return xv
}
//export arraySet
func arraySet(u C.uintptr_t, i C.int, v C.xpc_object_t) {
a := *(*Array)(unsafe.Pointer(uintptr(u)))
a[i] = xpcToGo(v)
}
//export dictSet
func dictSet(u C.uintptr_t, k *C.char, v C.xpc_object_t) {
d := *(*Dict)(unsafe.Pointer(uintptr(u)))
d[C.GoString(k)] = xpcToGo(v)
}
// xpcToGo converts an xpc object to a go object
//
// note that not all the types are supported, but only the subset required for Blued
func xpcToGo(v C.xpc_object_t) interface{} {
t := C.xpc_get_type(v)
switch t {
case C.TYPE_ARRAY:
a := make(Array, C.int(C.xpc_array_get_count(v)))
p := uintptr(unsafe.Pointer(&a))
C.XpcArrayApply(C.uintptr_t(p), v)
return a
case C.TYPE_DATA:
return C.GoBytes(C.xpc_data_get_bytes_ptr(v), C.int(C.xpc_data_get_length(v)))
case C.TYPE_DICT:
d := make(Dict)
p := uintptr(unsafe.Pointer(&d))
C.XpcDictApply(C.uintptr_t(p), v)
return d
case C.TYPE_INT64:
return int64(C.xpc_int64_get_value(v))
case C.TYPE_STRING:
return C.GoString(C.xpc_string_get_string_ptr(v))
case C.TYPE_UUID:
a := [16]byte{}
C.XpcUUIDGetBytes(unsafe.Pointer(&a), v)
return UUID(a)
default:
log.Fatalf("unexpected type %#v, value %#v", t, v)
}
return nil
}
// xpc_release is needed by tests, since they can't use CGO
func xpc_release(xv C.xpc_object_t) {
C.xpc_release(xv)
}
// this is used to check the OS version
type Utsname struct {
Sysname string
Nodename string
Release string
Version string
Machine string
}
func Uname(utsname *Utsname) error {
var cstruct C.struct_utsname
if err := C.uname(&cstruct); err != 0 {
return errors.New("utsname error")
}
// XXX: this may crash if any value is exactly 256 characters (no 0 terminator)
utsname.Sysname = C.GoString(&cstruct.sysname[0])
utsname.Nodename = C.GoString(&cstruct.nodename[0])
utsname.Release = C.GoString(&cstruct.release[0])
utsname.Version = C.GoString(&cstruct.version[0])
utsname.Machine = C.GoString(&cstruct.machine[0])
return nil
}
|
/*******************************************************************************
The MIT License (MIT)
Copyright (c) 2013 Hajime Nakagami
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************/
package firebirdsql
import (
"math"
"time"
"bytes"
"encoding/binary"
)
const (
SQL_TYPE_TEXT = 452
SQL_TYPE_VARYING = 448
SQL_TYPE_SHORT = 500
SQL_TYPE_LONG = 496
SQL_TYPE_FLOAT = 482
SQL_TYPE_DOUBLE = 480
SQL_TYPE_D_FLOAT = 530
SQL_TYPE_TIMESTAMP = 510
SQL_TYPE_BLOB = 520
SQL_TYPE_ARRAY = 540
SQL_TYPE_QUAD = 550
SQL_TYPE_TIME = 560
SQL_TYPE_DATE = 570
SQL_TYPE_INT64 = 580
SQL_TYPE_BOOLEAN = 32764
SQL_TYPE_NULL = 32766
)
var xsqlvarTypeLength = map[int]int {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 4,
SQL_TYPE_LONG: 4,
SQL_TYPE_FLOAT: 4,
SQL_TYPE_TIME: 4,
SQL_TYPE_DATE: 4,
SQL_TYPE_DOUBLE: 8,
SQL_TYPE_TIMESTAMP: 8,
SQL_TYPE_BLOB: 8,
SQL_TYPE_ARRAY: 8,
SQL_TYPE_QUAD: 8,
SQL_TYPE_INT64: 8,
SQL_TYPE_BOOLEAN: 1,
}
var xsqlvarTypeDisplayLength = map[int]int {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 6,
SQL_TYPE_LONG: 11,
SQL_TYPE_FLOAT: 17,
SQL_TYPE_TIME: 11,
SQL_TYPE_DATE: 10,
SQL_TYPE_DOUBLE: 17,
SQL_TYPE_TIMESTAMP: 22,
SQL_TYPE_BLOB: 0,
SQL_TYPE_ARRAY: -1,
SQL_TYPE_QUAD: 20,
SQL_TYPE_INT64: 20,
SQL_TYPE_BOOLEAN: 5,
}
type xSQLVAR struct {
sqltype int
sqlscale int
sqlsubtype int
sqllen int
null_ok bool
fieldname string
relname string
ownname string
aliasname string
}
func (x *xSQLVAR) ioLength() int {
if x.sqltype == SQL_TYPE_TEXT {
return x.sqllen
} else {
return xsqlvarTypeLength[x.sqltype]
}
}
func (x *xSQLVAR) displayLenght() int {
if x.sqltype == SQL_TYPE_TEXT {
return x.sqllen
} else {
return xsqlvarTypeDisplayLength[x.sqltype]
}
}
func (x *xSQLVAR) _parseDate(raw_value []byte) time.Time {
nday := int(bytes_to_int32(raw_value)) + 678882
century := (4 * nday -1) / 146097
nday = 4 * nday - 1 - 146097 * century
day := nday / 4
nday = (4 * day + 3) / 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) / 4
month := (5 * day -3) / 153
day = 5 * day - 3 - 153 * month
day = (day + 5) / 5
year := 100 * century + nday
if month < 10 {
month += 3
} else {
month -= 9
year += 1
}
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
/*
func (x *xSQLVAR) _parseTime(raw_value []byte) time.Time {
n := int(bytes_to_bint32(raw_value))
s := n / 10000
m := s / 60
h := m / 60
m = m % 60
s = s % 60
return time.Time(h, m, s, (n % 10000) * 100)
}
*/
func (x *xSQLVAR) value(raw_value []byte) (v interface{}, err error) {
switch x.sqltype {
case SQL_TYPE_TEXT:
if x.sqlsubtype == 1 { // OCTETS
v = raw_value
} else {
v = bytes.NewBuffer(raw_value).String()
}
case SQL_TYPE_VARYING:
if x.sqlsubtype == 1 { // OCTETS
v = raw_value
} else {
v = bytes.NewBuffer(raw_value).String()
}
case SQL_TYPE_SHORT:
i16 := bytes_to_bint16(raw_value)
if x.sqlscale > 0 {
v = int64(i16) * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i16) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i16
}
case SQL_TYPE_LONG:
i32 := bytes_to_bint32(raw_value)
if x.sqlscale > 0 {
v = int64(i32) * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i32) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i32
}
case SQL_TYPE_INT64:
i64 := bytes_to_bint64(raw_value)
if x.sqlscale > 0 {
v = i64 * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i64) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i64
}
case SQL_TYPE_DATE:
v = x._parseDate(raw_value)
// case SQL_TYPE_TIME:
// return x._parseTime(raw_value)
// case SQL_TYPE_TIMESTAMP:
// yyyy, mm, dd = self._parse_date(raw_value[:4])
// h, m, s, ms = self._parse_time(raw_value[4:])
// return datetime.datetime(yyyy, mm, dd, h, m, s, ms)
case SQL_TYPE_FLOAT:
var f32 float32
b := bytes.NewReader(raw_value)
err = binary.Read(b, binary.BigEndian, &f32)
v = f32
case SQL_TYPE_DOUBLE:
b := bytes.NewReader(raw_value)
var f64 float64
err = binary.Read(b, binary.BigEndian, &f64)
v = f64
case SQL_TYPE_BOOLEAN:
v = raw_value[0] != 0
}
return
}
parseDate
/*******************************************************************************
The MIT License (MIT)
Copyright (c) 2013 Hajime Nakagami
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************/
package firebirdsql
import (
"math"
"time"
"bytes"
"encoding/binary"
)
const (
SQL_TYPE_TEXT = 452
SQL_TYPE_VARYING = 448
SQL_TYPE_SHORT = 500
SQL_TYPE_LONG = 496
SQL_TYPE_FLOAT = 482
SQL_TYPE_DOUBLE = 480
SQL_TYPE_D_FLOAT = 530
SQL_TYPE_TIMESTAMP = 510
SQL_TYPE_BLOB = 520
SQL_TYPE_ARRAY = 540
SQL_TYPE_QUAD = 550
SQL_TYPE_TIME = 560
SQL_TYPE_DATE = 570
SQL_TYPE_INT64 = 580
SQL_TYPE_BOOLEAN = 32764
SQL_TYPE_NULL = 32766
)
var xsqlvarTypeLength = map[int]int {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 4,
SQL_TYPE_LONG: 4,
SQL_TYPE_FLOAT: 4,
SQL_TYPE_TIME: 4,
SQL_TYPE_DATE: 4,
SQL_TYPE_DOUBLE: 8,
SQL_TYPE_TIMESTAMP: 8,
SQL_TYPE_BLOB: 8,
SQL_TYPE_ARRAY: 8,
SQL_TYPE_QUAD: 8,
SQL_TYPE_INT64: 8,
SQL_TYPE_BOOLEAN: 1,
}
var xsqlvarTypeDisplayLength = map[int]int {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 6,
SQL_TYPE_LONG: 11,
SQL_TYPE_FLOAT: 17,
SQL_TYPE_TIME: 11,
SQL_TYPE_DATE: 10,
SQL_TYPE_DOUBLE: 17,
SQL_TYPE_TIMESTAMP: 22,
SQL_TYPE_BLOB: 0,
SQL_TYPE_ARRAY: -1,
SQL_TYPE_QUAD: 20,
SQL_TYPE_INT64: 20,
SQL_TYPE_BOOLEAN: 5,
}
type xSQLVAR struct {
sqltype int
sqlscale int
sqlsubtype int
sqllen int
null_ok bool
fieldname string
relname string
ownname string
aliasname string
}
func (x *xSQLVAR) ioLength() int {
if x.sqltype == SQL_TYPE_TEXT {
return x.sqllen
} else {
return xsqlvarTypeLength[x.sqltype]
}
}
func (x *xSQLVAR) displayLenght() int {
if x.sqltype == SQL_TYPE_TEXT {
return x.sqllen
} else {
return xsqlvarTypeDisplayLength[x.sqltype]
}
}
func (x *xSQLVAR) _parseDate(raw_value []byte) (int, int, int) {
nday := int(bytes_to_bint32(raw_value)) + 678882
century := (4 * nday -1) / 146097
nday = 4 * nday - 1 - 146097 * century
day := nday / 4
nday = (4 * day + 3) / 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) / 4
month := (5 * day -3) / 153
day = 5 * day - 3 - 153 * month
day = (day + 5) / 5
year := 100 * century + nday
if month < 10 {
month += 3
} else {
month -= 9
year += 1
}
return year, month, day
}
func (x *xSQLVAR) parseDate(raw_value []byte) time.Time {
year, month, day := x._parseDate(raw_value)
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
/*
func (x *xSQLVAR) _parseTime(raw_value []byte) time.Time {
n := int(bytes_to_bint32(raw_value))
s := n / 10000
m := s / 60
h := m / 60
m = m % 60
s = s % 60
return time.Time(h, m, s, (n % 10000) * 100)
}
*/
func (x *xSQLVAR) value(raw_value []byte) (v interface{}, err error) {
switch x.sqltype {
case SQL_TYPE_TEXT:
if x.sqlsubtype == 1 { // OCTETS
v = raw_value
} else {
v = bytes.NewBuffer(raw_value).String()
}
case SQL_TYPE_VARYING:
if x.sqlsubtype == 1 { // OCTETS
v = raw_value
} else {
v = bytes.NewBuffer(raw_value).String()
}
case SQL_TYPE_SHORT:
i16 := bytes_to_bint16(raw_value)
if x.sqlscale > 0 {
v = int64(i16) * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i16) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i16
}
case SQL_TYPE_LONG:
i32 := bytes_to_bint32(raw_value)
if x.sqlscale > 0 {
v = int64(i32) * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i32) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i32
}
case SQL_TYPE_INT64:
i64 := bytes_to_bint64(raw_value)
if x.sqlscale > 0 {
v = i64 * int64(math.Pow(10.0, float64(x.sqlscale)))
} else if x.sqlscale < 0 {
v = float64(i64) * math.Pow(10.0, float64(x.sqlscale))
} else {
v = i64
}
case SQL_TYPE_DATE:
v = x.parseDate(raw_value)
// case SQL_TYPE_TIME:
// return x._parseTime(raw_value)
// case SQL_TYPE_TIMESTAMP:
// yyyy, mm, dd = self._parse_date(raw_value[:4])
// h, m, s, ms = self._parse_time(raw_value[4:])
// return datetime.datetime(yyyy, mm, dd, h, m, s, ms)
case SQL_TYPE_FLOAT:
var f32 float32
b := bytes.NewReader(raw_value)
err = binary.Read(b, binary.BigEndian, &f32)
v = f32
case SQL_TYPE_DOUBLE:
b := bytes.NewReader(raw_value)
var f64 float64
err = binary.Read(b, binary.BigEndian, &f64)
v = f64
case SQL_TYPE_BOOLEAN:
v = raw_value[0] != 0
}
return
}
|
// Package clock is a low consumption, low latency support for frequent updates of large capacity timing manager:
// 1、能够添加一次性、重复性任务,并能在其执行前撤销或频繁更改。
// 2、支持同一时间点,多个任务提醒。
// 3、适用于中等密度,大跨度的单次、多次定时任务。
// 4、支持10万次/秒的定时任务执行、提醒、撤销或添加操作,平均延迟10微秒内
// 5、支持注册任务的函数调用,及事件通知。
// 基本处理逻辑:
// 1、重复性任务,流程是:
// a、注册重复任务
// b、时间抵达时,控制器调用注册函数,并发送通知
// c、如果次数达到限制,则撤销;否则,控制器更新该任务的下次执行时间点
// d、控制器等待下一个最近需要执行的任务
// 2、一次性任务,可以是服务运行时,当前时间点之后的任意事件,流程是:
// a、注册一次性任务
// b、时间抵达时,控制器调用注册函数,并发送通知
// c、控制器释放该任务
// d、控制器等待下一个最近需要执行的任务
// 使用方式,参见示例代码。
package clock
import (
"github.com/HuKeping/rbtree"
"math"
"sync"
"sync/atomic"
"time"
)
const _UNTOUCHED = time.Duration(math.MaxInt64)
var (
defaultClock *Clock
oncedo sync.Once
)
//Default return singal default clock
func Default() *Clock {
oncedo.Do(initClock)
return defaultClock
}
func initClock() {
defaultClock = NewClock()
}
// Clock is jobs schedule
type Clock struct {
seq uint64
jobQueue *rbtree.Rbtree //inner memory storage
count uint64 //已执行次数,不得大于times
waitJobsNum uint64
pauseChan chan struct{}
resumeChan chan struct{}
exitChan chan struct{}
}
var singal = struct{}{}
//NewClock Create a task queue controller
func NewClock() *Clock {
c := &Clock{
jobQueue: rbtree.New(),
pauseChan: make(chan struct{}, 0),
resumeChan: make(chan struct{}, 0),
exitChan: make(chan struct{}, 0),
}
c.start()
return c
}
func (jl *Clock) start() {
now := time.Now()
untouchedJob := jobItem{
createTime: now,
intervalTime: time.Duration(math.MaxInt64),
fn: func() {
//this jobItem is untouched.
},
}
_, inserted := jl.addJob(now, untouchedJob.intervalTime, 1, untouchedJob.fn)
if !inserted {
panic("[clock] internal error.Reason cannot insert job.")
}
//开启守护协程
go jl.schedule()
jl.resume()
}
func (jl *Clock) pause() {
jl.pauseChan <- singal
}
func (jl *Clock) resume() {
jl.resumeChan <- singal
}
func (jl *Clock) exit() {
jl.exitChan <- singal
}
func (jl *Clock) immediate() {
for {
if item := jl.jobQueue.Min(); item != nil {
atomic.AddUint64(&jl.count, 1)
job := item.(*jobItem)
job.action(false)
jl.removeJob(job)
} else {
break
}
}
}
func (jl *Clock) schedule() {
var (
timeout time.Duration
job *jobItem
//timer = time.NewTimer(_UNTOUCHED)
timer=newSafeTimer(_UNTOUCHED)
)
defer timer.Stop()
Pause:
<-jl.resumeChan
for {
job, _ = jl.jobQueue.Min().(*jobItem) //ignore ok-assert
timeout = job.actionTime.Sub(time.Now())
//if !timer.Stop() {
// select {
// case <-timer.C:
// default:
// }
//}
//if !timer.Reset(timeout){
// fmt.Println("reset failure")
//}
timer.SafeReset(timeout)
select {
case <-timer.C:
timer.SCR()
atomic.AddUint64(&jl.count, 1)
job.action(true)
if job.actionTimes == 0 || job.actionTimes > job.count {
jl.jobQueue.Delete(job)
job.actionTime = job.actionTime.Add(job.intervalTime)
jl.jobQueue.Insert(job)
} else {
jl.removeJob(job)
}
case <-jl.pauseChan:
goto Pause
case <-jl.exitChan:
goto Exit
}
}
Exit:
}
// UpdateJobTimeout update a timed task with time duration after now
// @job: job identifier
// @actionTime: new job schedule time,must be greater than 0
func (jl *Clock) UpdateJobTimeout(job Job, actionTime time.Duration) (updated bool) {
if actionTime.Nanoseconds() <= 0 {
return false
}
now := time.Now()
item, ok := job.(*jobItem)
if !ok {
return false
}
jl.pause()
defer jl.resume()
// update jobitem in job queue
jl.jobQueue.Delete(item)
item.actionTime = now.Add(actionTime)
jl.jobQueue.Insert(item)
updated = true
return
}
// AddJobWithInterval insert a timed task with time duration after now
// @actionTime: Duration after now
// @jobFunc: Callback function,not nil
// return
// @jobScheduled: A reference to a task that has been scheduled.
func (jl *Clock) AddJobWithInterval(actionInterval time.Duration, jobFunc func()) (jobScheduled Job, inserted bool) {
if jobFunc == nil || actionInterval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, actionInterval, 1, jobFunc)
jl.resume()
return
}
// AddJobWithDeadtime insert a timed task with time point after now
// @actionTime: Execution start time. must after now
// @jobFunc: Callback function,not nil
// return
// @jobScheduled : A reference to a task that has been scheduled.
// @inserted : return false ,if actionTime before time.Now or jobFunc is nil
func (jl *Clock) AddJobWithDeadtime(actionTime time.Time, jobFunc func()) (jobScheduled Job, inserted bool) {
actionInterval := actionTime.Sub(time.Now())
if jobFunc == nil || actionInterval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, actionInterval, 1, jobFunc)
jl.resume()
return
}
// AddJobRepeat add a repeat task with interval duration
// @interval: The interval between two actions of the job
// @jobTimes: The number of job execution
// @jobFunc: Callback function,not nil
// return
// @jobScheduled : A reference to a task that has been scheduled.
// @inserted : return false ,if interval is not Positiveor jobFunc is nil
//Note:
// when jobTimes==0,the job will be executed without limitation。If you no longer use, be sure to call the DelJob method to release
func (jl *Clock) AddJobRepeat(interval time.Duration, jobTimes uint64, jobFunc func()) (jobScheduled Job, inserted bool) {
if jobFunc == nil || interval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, interval, jobTimes, jobFunc)
jl.resume()
return
}
func (jl *Clock) addJob(createTime time.Time, actionInterval time.Duration, actionTimes uint64, jobFunc func()) (job *jobItem, inserted bool) {
jl.seq++
jl.waitJobsNum++
job = &jobItem{
id: jl.seq,
actionTimes: actionTimes,
createTime: createTime,
actionTime: createTime.Add(actionInterval),
intervalTime: actionInterval,
msgChan: make(chan Job, 10),
fn: jobFunc,
clock: jl,
}
jl.jobQueue.Insert(job)
inserted = true
return
}
func (jl *Clock) removeJob(item *jobItem) {
jl.jobQueue.Delete(item)
jl.waitJobsNum--
close(item.msgChan)
return
}
func (jl *Clock) rmJob(job Job) {
item, ok := job.(*jobItem)
if !ok || job == nil {
return
}
jl.pause()
defer jl.resume()
jl.removeJob(item)
return
}
// Count 已经执行的任务数。对于重复任务,会计算多次
func (jl *Clock) Count() uint64 {
return atomic.LoadUint64(&jl.count)
}
//重置Clock的内部状态
func (jl *Clock) Reset() *Clock {
jl.exit()
jl.count = 0
jl.cleanJobs()
jl.start()
return jl
}
func (jl *Clock) cleanJobs() {
item := jl.jobQueue.Min()
for item != nil {
job, ok := item.(*jobItem)
if ok {
jl.removeJob(job)
}
item = jl.jobQueue.Min()
}
}
//WaitJobs get how much jobs waiting for call
func (jl *Clock) WaitJobs() uint64 {
jobs := atomic.LoadUint64(&jl.waitJobsNum) - 1
return jobs
}
//Stop stop clock , and cancel all waiting jobs
func (jl *Clock) Stop() {
jl.exit()
jl.cleanJobs()
}
//StopGracefull stop clock ,and do once every waiting job including Once\Reapeat
//Note:对于任务队列中,即使安排执行多次或者不限次数的,也仅仅执行一次。
func (jl *Clock) StopGraceful() {
jl.exit()
jl.immediate()
}
strengthen the UpdateJobTimeout method & clean code
// Package clock is a low consumption, low latency support for frequent updates of large capacity timing manager:
// 1、能够添加一次性、重复性任务,并能在其执行前撤销或频繁更改。
// 2、支持同一时间点,多个任务提醒。
// 3、适用于中等密度,大跨度的单次、多次定时任务。
// 4、支持10万次/秒的定时任务执行、提醒、撤销或添加操作,平均延迟10微秒内
// 5、支持注册任务的函数调用,及事件通知。
// 基本处理逻辑:
// 1、重复性任务,流程是:
// a、注册重复任务
// b、时间抵达时,控制器调用注册函数,并发送通知
// c、如果次数达到限制,则撤销;否则,控制器更新该任务的下次执行时间点
// d、控制器等待下一个最近需要执行的任务
// 2、一次性任务,可以是服务运行时,当前时间点之后的任意事件,流程是:
// a、注册一次性任务
// b、时间抵达时,控制器调用注册函数,并发送通知
// c、控制器释放该任务
// d、控制器等待下一个最近需要执行的任务
// 使用方式,参见示例代码。
package clock
import (
"github.com/HuKeping/rbtree"
"math"
"sync"
"sync/atomic"
"time"
)
const _UNTOUCHED = time.Duration(math.MaxInt64)
var (
defaultClock *Clock
oncedo sync.Once
)
//Default return singal default clock
func Default() *Clock {
oncedo.Do(initClock)
return defaultClock
}
func initClock() {
defaultClock = NewClock()
}
// Clock is jobs schedule
type Clock struct {
seq uint64
jobQueue *rbtree.Rbtree //inner memory storage
count uint64 //已执行次数,不得大于times
waitJobsNum uint64
pauseChan chan struct{}
resumeChan chan struct{}
exitChan chan struct{}
}
var singal = struct{}{}
//NewClock Create a task queue controller
func NewClock() *Clock {
c := &Clock{
jobQueue: rbtree.New(),
pauseChan: make(chan struct{}, 0),
resumeChan: make(chan struct{}, 0),
exitChan: make(chan struct{}, 0),
}
c.start()
return c
}
func (jl *Clock) start() {
now := time.Now()
untouchedJob := jobItem{
createTime: now,
intervalTime: time.Duration(math.MaxInt64),
fn: func() {
//this jobItem is untouched.
},
}
_, inserted := jl.addJob(now, untouchedJob.intervalTime, 1, untouchedJob.fn)
if !inserted {
panic("[clock] internal error.Reason cannot insert job.")
}
//开启守护协程
go jl.schedule()
jl.resume()
}
func (jl *Clock) pause() {
jl.pauseChan <- singal
}
func (jl *Clock) resume() {
jl.resumeChan <- singal
}
func (jl *Clock) exit() {
jl.exitChan <- singal
}
func (jl *Clock) immediate() {
for {
if item := jl.jobQueue.Min(); item != nil {
atomic.AddUint64(&jl.count, 1)
job := item.(*jobItem)
job.action(false)
jl.removeJob(job)
} else {
break
}
}
}
func (jl *Clock) schedule() {
var (
timeout time.Duration
job *jobItem
timer = newSafeTimer(_UNTOUCHED)
)
defer timer.Stop()
Pause:
<-jl.resumeChan
for {
job, _ = jl.jobQueue.Min().(*jobItem) //ignore ok-assert
timeout = job.actionTime.Sub(time.Now())
timer.SafeReset(timeout)
select {
case <-timer.C:
timer.SCR()
atomic.AddUint64(&jl.count, 1)
job.action(true)
if job.actionTimes == 0 || job.actionTimes > job.count {
jl.jobQueue.Delete(job)
job.actionTime = job.actionTime.Add(job.intervalTime)
jl.jobQueue.Insert(job)
} else {
jl.removeJob(job)
}
case <-jl.pauseChan:
goto Pause
case <-jl.exitChan:
goto Exit
}
}
Exit:
}
// UpdateJobTimeout update a timed task with time duration after now
// @job: job identifier
// @actionTime: new job schedule time,must be greater than 0
func (jl *Clock) UpdateJobTimeout(job Job, actionTime time.Duration) (updated bool) {
if job == nil || actionTime.Nanoseconds() <= 0 {
return false
}
now := time.Now()
item, ok := job.(*jobItem)
if !ok {
return false
}
jl.pause()
defer jl.resume()
// update jobitem in job queue
jl.jobQueue.Delete(item)
item.actionTime = now.Add(actionTime)
jl.jobQueue.Insert(item)
updated = true
return
}
// AddJobWithInterval insert a timed task with time duration after now
// @actionTime: Duration after now
// @jobFunc: Callback function,not nil
// return
// @jobScheduled: A reference to a task that has been scheduled.
func (jl *Clock) AddJobWithInterval(actionInterval time.Duration, jobFunc func()) (jobScheduled Job, inserted bool) {
if jobFunc == nil || actionInterval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, actionInterval, 1, jobFunc)
jl.resume()
return
}
// AddJobWithDeadtime insert a timed task with time point after now
// @actionTime: Execution start time. must after now
// @jobFunc: Callback function,not nil
// return
// @jobScheduled : A reference to a task that has been scheduled.
// @inserted : return false ,if actionTime before time.Now or jobFunc is nil
func (jl *Clock) AddJobWithDeadtime(actionTime time.Time, jobFunc func()) (jobScheduled Job, inserted bool) {
actionInterval := actionTime.Sub(time.Now())
if jobFunc == nil || actionInterval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, actionInterval, 1, jobFunc)
jl.resume()
return
}
// AddJobRepeat add a repeat task with interval duration
// @interval: The interval between two actions of the job
// @jobTimes: The number of job execution
// @jobFunc: Callback function,not nil
// return
// @jobScheduled : A reference to a task that has been scheduled.
// @inserted : return false ,if interval is not Positiveor jobFunc is nil
//Note:
// when jobTimes==0,the job will be executed without limitation。If you no longer use, be sure to call the DelJob method to release
func (jl *Clock) AddJobRepeat(interval time.Duration, jobTimes uint64, jobFunc func()) (jobScheduled Job, inserted bool) {
if jobFunc == nil || interval.Nanoseconds() <= 0 {
return
}
now := time.Now()
jl.pause()
jobScheduled, inserted = jl.addJob(now, interval, jobTimes, jobFunc)
jl.resume()
return
}
func (jl *Clock) addJob(createTime time.Time, actionInterval time.Duration, actionTimes uint64, jobFunc func()) (job *jobItem, inserted bool) {
jl.seq++
jl.waitJobsNum++
job = &jobItem{
id: jl.seq,
actionTimes: actionTimes,
createTime: createTime,
actionTime: createTime.Add(actionInterval),
intervalTime: actionInterval,
msgChan: make(chan Job, 10),
fn: jobFunc,
clock: jl,
}
jl.jobQueue.Insert(job)
inserted = true
return
}
func (jl *Clock) removeJob(item *jobItem) {
jl.jobQueue.Delete(item)
jl.waitJobsNum--
close(item.msgChan)
return
}
func (jl *Clock) rmJob(job Job) {
item, ok := job.(*jobItem)
if !ok || job == nil {
return
}
jl.pause()
defer jl.resume()
jl.removeJob(item)
return
}
// Count 已经执行的任务数。对于重复任务,会计算多次
func (jl *Clock) Count() uint64 {
return atomic.LoadUint64(&jl.count)
}
//重置Clock的内部状态
func (jl *Clock) Reset() *Clock {
jl.exit()
jl.count = 0
jl.cleanJobs()
jl.start()
return jl
}
func (jl *Clock) cleanJobs() {
item := jl.jobQueue.Min()
for item != nil {
job, ok := item.(*jobItem)
if ok {
jl.removeJob(job)
}
item = jl.jobQueue.Min()
}
}
//WaitJobs get how much jobs waiting for call
func (jl *Clock) WaitJobs() uint64 {
jobs := atomic.LoadUint64(&jl.waitJobsNum) - 1
return jobs
}
//Stop stop clock , and cancel all waiting jobs
func (jl *Clock) Stop() {
jl.exit()
jl.cleanJobs()
}
//StopGracefull stop clock ,and do once every waiting job including Once\Reapeat
//Note:对于任务队列中,即使安排执行多次或者不限次数的,也仅仅执行一次。
func (jl *Clock) StopGraceful() {
jl.exit()
jl.immediate()
}
|
package cmdns
import (
"errors"
"fmt"
"github.com/spf13/cobra"
)
var (
// DefaultNamespacer is the default namespacer for the package
DefaultNamespacer = New()
// DefaultNamespaceSeperator is the char that seperates commands
DefaultNamespaceSeperator = ':'
)
// SetOverrideUsageFunc when set to true will overide the command's usage function with the package's usage function that displays namespaces using the default namespacer
func SetOverrideUsageFunc(v bool) *CobraNamespacer {
return DefaultNamespacer.SetOverrideUsageFunc(v)
}
// Namespace enables namespacing for the command using the DefaultCmdNS
func Namespace(cmd *cobra.Command) error {
return DefaultNamespacer.Namespace(cmd)
}
// CobraNamespacer is the struct represting the component that namespaces cobra's sucommands
type CobraNamespacer struct {
// Namespaces is the collection of cobra namespaces
Namespaces []*CobraNamespace
OverrideUsageFunc bool
}
// New returns a new instance of the CobraNamespacer
func New() *CobraNamespacer {
return &CobraNamespacer{
Namespaces: make([]*CobraNamespace, 0),
OverrideUsageFunc: true,
}
}
// SetOverrideUsageFunc when set to true will overide the command's usage
// function with the package's usage function that displays namespaces
func (c *CobraNamespacer) SetOverrideUsageFunc(v bool) *CobraNamespacer {
c.OverrideUsageFunc = v
return c
}
// Namespace enables namespacing for the command's subcommands
func (c *CobraNamespacer) Namespace(cmd *cobra.Command) error {
if cmd == nil {
return errors.New("cmdns: cmd cannot be nil")
}
for _, child := range cmd.Commands() {
n := NewCobraNamespace()
n.OverrideUsageFunc = c.OverrideUsageFunc
c.Namespaces = append(c.Namespaces, n)
if err := n.Namespace(child); err != nil {
return err
}
}
return nil
}
// CobraNamespace represents a namespace for a command. This is usually then second level command.
type CobraNamespace struct {
OverrideUsageFunc bool
cmd *cobra.Command
commands []*cobra.Command
}
// NewCobraNamespace returns a new Namespace
func NewCobraNamespace() *CobraNamespace {
return &CobraNamespace{
OverrideUsageFunc: true,
commands: make([]*cobra.Command, 0),
}
}
// AvailableCommands returns the namespaced commands that are available
func (n *CobraNamespace) AvailableCommands() []*cobra.Command {
return n.commands
}
// Command returns the command for the namespace
func (n *CobraNamespace) Command() *cobra.Command {
return n.cmd
}
// Namespace enables namespacing for a sub-commmand and its immediated children. It returns an error if the command does not have a parent.
func (n *CobraNamespace) Namespace(cmd *cobra.Command) error {
if !cmd.HasParent() {
return errors.New("cmdns: command requires a parent")
}
// Do not bind if there are not available sub commands
if !cmd.HasAvailableSubCommands() {
return nil
}
if n.OverrideUsageFunc {
cmd.SetUsageFunc(n.UsageFunc())
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() {
continue
}
// copy the command add it to the root command with a prefix of its parent.
nc := *c
nc.Use = cmd.Name() + ":" + c.Use
// add this command to the root and hide it so it does not show in available commands list
c.Parent().Parent().AddCommand(&nc)
nc.Hidden = true
n.commands = append(n.commands, &nc)
}
n.cmd = cmd
return nil
}
// UsageFunc returns the usage function for the command that renders namespaces
func (n *CobraNamespace) UsageFunc() (f func(*cobra.Command) error) {
return func(*cobra.Command) error {
err := tmpl(n.Command().Out(), usageTemplate, n)
if err != nil {
fmt.Print(err)
}
return err
}
}
var usageTemplate = `{{$ns := .}}{{with .Command}}Usage: {{if .Runnable}}{{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if gt .Aliases 0}}
Aliases:
{{.NameAndAliases}}
{{end}}{{if .HasExample}}
Examples:
{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}
Additional commands, use "{{.Parent.CommandPath}} COMMAND --help" for more information about a command.
{{range $ns.AvailableCommands}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{ if .HasLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsHelpCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}
{{end}}
`
Use DefaultNamespaceSeparator
package cmdns
import (
"errors"
"fmt"
"github.com/spf13/cobra"
)
var (
// DefaultNamespacer is the default namespacer for the package
DefaultNamespacer = New()
// DefaultNamespaceSeparator is the char that seperates commands
DefaultNamespaceSeparator = ":"
)
// SetOverrideUsageFunc when set to true will overide the command's usage function with the package's usage function that displays namespaces using the default namespacer
func SetOverrideUsageFunc(v bool) *CobraNamespacer {
return DefaultNamespacer.SetOverrideUsageFunc(v)
}
// Namespace enables namespacing for the command using the DefaultCmdNS
func Namespace(cmd *cobra.Command) error {
return DefaultNamespacer.Namespace(cmd)
}
// CobraNamespacer is the struct represting the component that namespaces cobra's sucommands
type CobraNamespacer struct {
// Namespaces is the collection of cobra namespaces
Namespaces []*CobraNamespace
OverrideUsageFunc bool
}
// New returns a new instance of the CobraNamespacer
func New() *CobraNamespacer {
return &CobraNamespacer{
Namespaces: make([]*CobraNamespace, 0),
OverrideUsageFunc: true,
}
}
// SetOverrideUsageFunc when set to true will overide the command's usage
// function with the package's usage function that displays namespaces
func (c *CobraNamespacer) SetOverrideUsageFunc(v bool) *CobraNamespacer {
c.OverrideUsageFunc = v
return c
}
// Namespace enables namespacing for the command's subcommands
func (c *CobraNamespacer) Namespace(cmd *cobra.Command) error {
if cmd == nil {
return errors.New("cmdns: cmd cannot be nil")
}
for _, child := range cmd.Commands() {
n := NewCobraNamespace()
n.OverrideUsageFunc = c.OverrideUsageFunc
c.Namespaces = append(c.Namespaces, n)
if err := n.Namespace(child); err != nil {
return err
}
}
return nil
}
// CobraNamespace represents a namespace for a command. This is usually then second level command.
type CobraNamespace struct {
OverrideUsageFunc bool
cmd *cobra.Command
commands []*cobra.Command
}
// NewCobraNamespace returns a new Namespace
func NewCobraNamespace() *CobraNamespace {
return &CobraNamespace{
OverrideUsageFunc: true,
commands: make([]*cobra.Command, 0),
}
}
// AvailableCommands returns the namespaced commands that are available
func (n *CobraNamespace) AvailableCommands() []*cobra.Command {
return n.commands
}
// Command returns the command for the namespace
func (n *CobraNamespace) Command() *cobra.Command {
return n.cmd
}
// Namespace enables namespacing for a sub-commmand and its immediated children. It returns an error if the command does not have a parent.
func (n *CobraNamespace) Namespace(cmd *cobra.Command) error {
if !cmd.HasParent() {
return errors.New("cmdns: command requires a parent")
}
// Do not bind if there are not available sub commands
if !cmd.HasAvailableSubCommands() {
return nil
}
if n.OverrideUsageFunc {
cmd.SetUsageFunc(n.UsageFunc())
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() {
continue
}
// copy the command add it to the root command with a prefix of its parent.
nc := *c
nc.Use = cmd.Name() + DefaultNamespaceSeparator + c.Use
// add this command to the root and hide it so it does not show in available commands list
c.Parent().Parent().AddCommand(&nc)
nc.Hidden = true
n.commands = append(n.commands, &nc)
}
n.cmd = cmd
return nil
}
// UsageFunc returns the usage function for the command that renders namespaces
func (n *CobraNamespace) UsageFunc() (f func(*cobra.Command) error) {
return func(*cobra.Command) error {
err := tmpl(n.Command().Out(), usageTemplate, n)
if err != nil {
fmt.Print(err)
}
return err
}
}
var usageTemplate = `{{$ns := .}}{{with .Command}}Usage: {{if .Runnable}}{{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if gt .Aliases 0}}
Aliases:
{{.NameAndAliases}}
{{end}}{{if .HasExample}}
Examples:
{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}
Additional commands, use "{{.Parent.CommandPath}} COMMAND --help" for more information about a command.
{{range $ns.AvailableCommands}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{ if .HasLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsHelpCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}
{{end}}
`
|
package color
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
var Output = colorable.NewColorableStdout()
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Print(a...) }
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) { c.Printf(format, a...) }
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Println(a...) }
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjuction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
var colorsCache = make(map[Attribute]*Color)
var colorsCacheMu = new(sync.Mutex) // protects colorsCache
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func printColor(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if len(a) == 0 {
a = append(a, format)
format = "%s"
}
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
c.Printf(format, a...)
}
func printString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
a = append(a, format)
format = "%s"
}
return c.SprintfFunc()(format, a...)
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return printString(format, FgBlack, a...) }
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return printString(format, FgRed, a...) }
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return printString(format, FgGreen, a...) }
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return printString(format, FgYellow, a...) }
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return printString(format, FgBlue, a...) }
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return printString(format, FgMagenta, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return printString(format, FgCyan, a...) }
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return printString(format, FgWhite, a...) }
Use print functions if there is no format
package color
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
var Output = colorable.NewColorableStdout()
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Print(a...) }
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) { c.Printf(format, a...) }
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Println(a...) }
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjuction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
var colorsCache = make(map[Attribute]*Color)
var colorsCacheMu = new(sync.Mutex) // protects colorsCache
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func printColor(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
return
}
c.Printf(format, a...)
}
func printString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return printString(format, FgBlack, a...) }
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return printString(format, FgRed, a...) }
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return printString(format, FgGreen, a...) }
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return printString(format, FgYellow, a...) }
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return printString(format, FgBlue, a...) }
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return printString(format, FgMagenta, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return printString(format, FgCyan, a...) }
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return printString(format, FgWhite, a...) }
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
)
func doHTTP(url string, input io.Reader) (io.ReadCloser, error) {
method := "GET"
if input != nil {
method = "POST"
}
rq, err := http.NewRequest(method, url, input)
if err != nil {
return nil, err
}
rq.Header.Set("User-Agent", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)")
if input != nil {
rq.Header.Set("Content-Type", "application/json")
}
rsp, err := http.DefaultClient.Do(rq)
if err != nil {
return nil, err
}
if rsp.StatusCode != http.StatusOK {
rsp.Body.Close()
return nil, fmt.Errorf("got %d fetching %s", rsp.StatusCode, url)
}
return rsp.Body, nil
}
func doJson(url string, input interface{}, output interface{}) error {
var post io.Reader
if input != nil {
body := &bytes.Buffer{}
err := json.NewEncoder(body).Encode(input)
if err != nil {
return err
}
post = body
}
result, err := doHTTP(url, post)
if err != nil {
return err
}
defer result.Close()
return json.NewDecoder(result).Decode(output)
}
type SearchPaging struct {
Range int `json:"range"`
StartIndex int `json:"startIndex"`
}
type SearchSorts struct {
Direction string `json:"direction"`
Type string `json:"type"`
}
type SearchFilters struct {
EnableFilter bool `json:"activeFiltre"`
Functions []int `json:"fonctions"`
Places []int `json:"lieux"`
Keywords string `json:"motsCles"`
Experience []int `json:"niveauxExperience"`
Paging SearchPaging `json:"pagination"`
MinSalary int `json:"salaireMinimum"`
MaxSalary int `json:"salaireMaximum"`
Sectors []int `json:"secteursActivite"`
Sorts []SearchSorts `json:"sorts"`
ClientType string `json:"typeClient"`
ContractTypes []int `json:"typesContrat"`
ConventionTypes []int `json:"typesConvention"`
}
func searchOffers(start, count int) ([]string, error) {
filter := &SearchFilters{
EnableFilter: true,
Functions: []int{},
Places: []int{ /*705*/ },
Experience: []int{},
Paging: SearchPaging{
Range: count,
StartIndex: start,
},
MinSalary: 60,
MaxSalary: 120,
Sectors: []int{},
Sorts: []SearchSorts{
{
Direction: "DESCENDING",
Type: "DATE",
},
},
ClientType: "CADRE",
ContractTypes: []int{},
ConventionTypes: []int{},
}
results := &struct {
Results []struct {
URI string `json:"@uriOffre"`
} `json:"resultats"`
}{}
url := "https://cadres.apec.fr/cms/webservices/rechercheOffre/ids"
err := doJson(url, filter, results)
if err != nil {
return nil, err
}
ids := []string{}
for _, uri := range results.Results {
parts := strings.Split(uri.URI, "numeroOffre=")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid offer identifier: %s", uri.URI)
}
ids = append(ids, parts[1])
}
return ids, nil
}
func getOffer(id string) ([]byte, error) {
u := "https://cadres.apec.fr/cms/webservices/offre/public?numeroOffre=" + id
output, err := doHTTP(u, nil)
if err != nil {
return nil, err
}
defer output.Close()
return ioutil.ReadAll(output)
}
var (
crawlCmd = app.Command("crawl", "crawl APEC offers")
crawlStoreDir = crawlCmd.Arg("store", "data store directory").Required().String()
)
func crawlOffers() error {
store, err := CreateStore(*crawlStoreDir)
if err != nil {
return err
}
start := 0
count := 250
for {
fmt.Printf("fetching from %d to %d\n", start, start+count)
ids, err := searchOffers(start, count)
if err != nil {
return err
}
start += count
fetched := 0
for _, id := range ids {
if store.Has(id) {
fmt.Printf("skipping %s\n", id)
continue
}
fmt.Printf("fetching %s\n", id)
data, err := getOffer(id)
fetched += 1
if err != nil {
return err
}
time.Sleep(time.Second)
written, err := store.Write(id, data)
if err != nil {
return err
}
if !written {
fmt.Printf("racing %s\n", id)
continue
}
}
if len(ids) < count {
break
}
if fetched == 0 {
time.Sleep(time.Second)
}
}
return nil
}
crawl: be more resilient when listing offers
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
)
func doHTTP(url string, input io.Reader) (io.ReadCloser, error) {
method := "GET"
if input != nil {
method = "POST"
}
rq, err := http.NewRequest(method, url, input)
if err != nil {
return nil, err
}
rq.Header.Set("User-Agent", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)")
if input != nil {
rq.Header.Set("Content-Type", "application/json")
}
rsp, err := http.DefaultClient.Do(rq)
if err != nil {
return nil, err
}
if rsp.StatusCode != http.StatusOK {
rsp.Body.Close()
return nil, fmt.Errorf("got %d fetching %s", rsp.StatusCode, url)
}
return rsp.Body, nil
}
func doJson(url string, input interface{}, output interface{}) error {
var post io.Reader
if input != nil {
body := &bytes.Buffer{}
err := json.NewEncoder(body).Encode(input)
if err != nil {
return err
}
post = body
}
result, err := doHTTP(url, post)
if err != nil {
return err
}
defer result.Close()
return json.NewDecoder(result).Decode(output)
}
type SearchPaging struct {
Range int `json:"range"`
StartIndex int `json:"startIndex"`
}
type SearchSorts struct {
Direction string `json:"direction"`
Type string `json:"type"`
}
type SearchFilters struct {
EnableFilter bool `json:"activeFiltre"`
Functions []int `json:"fonctions"`
Places []int `json:"lieux"`
Keywords string `json:"motsCles"`
Experience []int `json:"niveauxExperience"`
Paging SearchPaging `json:"pagination"`
MinSalary int `json:"salaireMinimum"`
MaxSalary int `json:"salaireMaximum"`
Sectors []int `json:"secteursActivite"`
Sorts []SearchSorts `json:"sorts"`
ClientType string `json:"typeClient"`
ContractTypes []int `json:"typesContrat"`
ConventionTypes []int `json:"typesConvention"`
}
func searchOffers(start, count int) ([]string, error) {
filter := &SearchFilters{
EnableFilter: true,
Functions: []int{},
Places: []int{ /*705*/ },
Experience: []int{},
Paging: SearchPaging{
Range: count,
StartIndex: start,
},
MinSalary: 60,
MaxSalary: 120,
Sectors: []int{},
Sorts: []SearchSorts{
{
Direction: "DESCENDING",
Type: "DATE",
},
},
ClientType: "CADRE",
ContractTypes: []int{},
ConventionTypes: []int{},
}
results := &struct {
Results []struct {
URI string `json:"@uriOffre"`
} `json:"resultats"`
}{}
url := "https://cadres.apec.fr/cms/webservices/rechercheOffre/ids"
err := doJson(url, filter, results)
if err != nil {
return nil, err
}
ids := []string{}
for _, uri := range results.Results {
parts := strings.Split(uri.URI, "numeroOffre=")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid offer identifier: %s", uri.URI)
}
ids = append(ids, parts[1])
}
return ids, nil
}
func getOffer(id string) ([]byte, error) {
u := "https://cadres.apec.fr/cms/webservices/offre/public?numeroOffre=" + id
output, err := doHTTP(u, nil)
if err != nil {
return nil, err
}
defer output.Close()
return ioutil.ReadAll(output)
}
var (
crawlCmd = app.Command("crawl", "crawl APEC offers")
crawlStoreDir = crawlCmd.Arg("store", "data store directory").Required().String()
)
func enumerateOffers(callback func([]string) error) error {
start := 0
count := 250
baseDelay := 5 * time.Second
maxDelay := 5 * time.Minute
delay := baseDelay
for ; ; time.Sleep(delay) {
time.Sleep(delay)
fmt.Printf("fetching from %d to %d\n", start, start+count)
ids, err := searchOffers(start, count)
if err != nil {
fmt.Printf("fetching failed with: %s\n", err)
delay *= 2
if delay > maxDelay {
return err
}
continue
}
delay = baseDelay
start += count
err = callback(ids)
if err != nil {
return err
}
if len(ids) < count {
break
}
}
return nil
}
func crawlOffers() error {
store, err := CreateStore(*crawlStoreDir)
if err != nil {
return err
}
return enumerateOffers(func(ids []string) error {
for _, id := range ids {
if store.Has(id) {
fmt.Printf("skipping %s\n", id)
continue
}
fmt.Printf("fetching %s\n", id)
data, err := getOffer(id)
if err != nil {
return err
}
time.Sleep(time.Second)
written, err := store.Write(id, data)
if err != nil {
return err
}
if !written {
fmt.Printf("racing %s\n", id)
continue
}
}
return nil
})
}
|
package dalga
import (
"database/sql"
"flag"
"fmt"
"log"
"net"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/streadway/amqp"
)
const createTableSQL = "" +
"CREATE TABLE `%s` (" +
" `routing_key` VARCHAR(255) NOT NULL," +
" `body` BLOB(767) NOT NULL," +
" `interval` INT UNSIGNED NOT NULL," +
" `next_run` DATETIME NOT NULL," +
"" +
" PRIMARY KEY (`routing_key`, `body`(767))," +
" KEY `idx_next_run` (`next_run`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8"
var debugging = flag.Bool("d", false, "turn on debug messages")
func debug(args ...interface{}) {
if *debugging {
log.Println(args...)
}
}
type Dalga struct {
C *Config
db *sql.DB
rabbit *amqp.Connection
channel *amqp.Channel
listener net.Listener
newJobs chan *Job
canceledJobs chan *Job
quitPublisher chan bool
publisherFinished chan bool
}
func NewDalga(config *Config) *Dalga {
return &Dalga{
C: config,
newJobs: make(chan *Job),
canceledJobs: make(chan *Job),
quitPublisher: make(chan bool),
publisherFinished: make(chan bool),
}
}
// Start starts the publisher and http server goroutines.
func (d *Dalga) Start() error {
err := d.connectDB()
if err != nil {
return err
}
err = d.connectMQ()
if err != nil {
return err
}
server, err := d.makeServer()
if err != nil {
return err
}
go d.publisher()
go server()
return nil
}
// Run starts the dalga and waits until Shutdown() is called.
func (d *Dalga) Run() error {
err := d.Start()
if err != nil {
return err
}
debug("Waiting a message from publisherFinished channel")
<-d.publisherFinished
debug("Received message from publisherFinished channel")
return nil
}
func (d *Dalga) Shutdown() error {
return d.listener.Close()
}
func (d *Dalga) connectDB() error {
var err error
d.db, err = d.newMySQLConnection()
return err
}
func (d *Dalga) newMySQLConnection() (*sql.DB, error) {
my := d.C.MySQL
dsn := my.User + ":" + my.Password + "@" + "tcp(" + my.Host + ":" + my.Port + ")/" + my.Db + "?parseTime=true"
db, err := sql.Open("mysql", dsn)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
log.Println("Connected to MySQL")
return db, nil
}
func (d *Dalga) connectMQ() error {
var err error
rabbit := d.C.RabbitMQ
uri := "amqp://" + rabbit.User + ":" + rabbit.Password + "@" + rabbit.Host + ":" + rabbit.Port + rabbit.VHost
d.rabbit, err = amqp.Dial(uri)
if err != nil {
return err
}
d.channel, err = d.rabbit.Channel()
log.Println("Connected to RabbitMQ")
return err
}
func (d *Dalga) CreateTable() error {
db, err := d.newMySQLConnection()
if err != nil {
return err
}
defer db.Close()
sql := fmt.Sprintf(createTableSQL, d.C.MySQL.Table)
_, err = db.Exec(sql)
if err != nil {
return err
}
return nil
}
func (d *Dalga) Schedule(routingKey string, body []byte, interval uint32) error {
job := NewJob(routingKey, body, interval)
err := d.insert(job)
if err != nil {
return err
}
// Wake up the publisher.
//
// publisher() may be sleeping for the next job on the queue
// at the time we schedule a new Job. Let it wake up so it can
// re-fetch the new Job from the front of the queue.
//
// The code below is an idiom for non-blocking send to a channel.
select {
case d.newJobs <- job:
debug("Sent new job signal")
default:
debug("Did not send new job signal")
}
return nil
}
func (d *Dalga) Cancel(routingKey string, body []byte) error {
err := d.delete(routingKey, body)
if err != nil {
return err
}
select {
case d.canceledJobs <- &Job{RoutingKey: routingKey, Body: body}:
debug("Sent cancel signal")
default:
debug("Did not send cancel signal")
}
return nil
}
// front returns the first job to be run in the queue.
func (d *Dalga) front() (*Job, error) {
var interval uint32
var j Job
row := d.db.QueryRow("SELECT routing_key, body, `interval`, next_run " +
"FROM " + d.C.MySQL.Table + " " +
"ORDER BY next_run ASC LIMIT 1")
err := row.Scan(&j.RoutingKey, &j.Body, &interval, &j.NextRun)
if err != nil {
return nil, err
}
j.Interval = time.Duration(interval) * time.Second
return &j, nil
}
// publish sends a message to exchange defined in the config and
// updates the Job's next run time on the database.
func (d *Dalga) publish(j *Job) error {
debug("publish", *j)
// Update next run time
_, err := d.db.Exec("UPDATE "+d.C.MySQL.Table+" "+
"SET next_run=? "+
"WHERE routing_key=? AND body=?",
time.Now().UTC().Add(j.Interval), j.RoutingKey, j.Body)
if err != nil {
return err
}
// Send a message to RabbitMQ
pub := func() error {
return d.channel.Publish(d.C.RabbitMQ.Exchange, j.RoutingKey, false, false, amqp.Publishing{
Headers: amqp.Table{
"interval": j.Interval.Seconds(),
"published_at": time.Now().UTC().String(),
},
ContentType: "application/octet-stream",
Body: j.Body,
DeliveryMode: amqp.Persistent,
Priority: 0,
Expiration: strconv.FormatUint(uint64(j.Interval.Seconds()), 10) + "000",
})
}
err = pub()
if err != nil {
if strings.Contains(err.Error(), "channel/connection is not open") {
// Retry again
err = d.connectMQ()
if err != nil {
return err
}
pub()
}
}
return nil
}
// insert puts the job to the waiting queue.
func (d *Dalga) insert(j *Job) error {
interval := j.Interval.Seconds()
_, err := d.db.Exec("INSERT INTO "+d.C.MySQL.Table+" "+
"(routing_key, body, `interval`, next_run) "+
"VALUES(?, ?, ?, ?) "+
"ON DUPLICATE KEY UPDATE "+
"next_run=DATE_ADD(next_run, INTERVAL (? - `interval`) SECOND), "+
"`interval`=?",
j.RoutingKey, j.Body, interval, j.NextRun, interval, interval)
return err
}
// delete removes the job from the waiting queue.
func (d *Dalga) delete(routingKey string, body []byte) error {
_, err := d.db.Exec("DELETE FROM "+d.C.MySQL.Table+" "+
"WHERE routing_key=? AND body=?", routingKey, body)
return err
}
// publisher runs a loop that reads the next Job from the queue and publishes it.
func (d *Dalga) publisher() {
publish := func(j *Job) {
err := d.publish(j)
if err != nil {
log.Println(err)
time.Sleep(time.Duration(1) * time.Second)
}
}
for {
debug("---")
select {
case <-d.quitPublisher:
debug("Came message from channel 1: quitPublisher")
goto end
default:
}
job, err := d.front()
if err != nil {
if strings.Contains(err.Error(), "no rows in result set") {
debug("No waiting jobs in the queue")
debug("Waiting wakeup signal")
select {
case job = <-d.newJobs:
case <-d.quitPublisher:
debug("Came message from channel 2: quitPublisher")
goto end
}
debug("Got wakeup signal")
} else {
log.Println(err)
time.Sleep(time.Duration(1) * time.Second)
continue
}
}
CheckNextRun:
remaining := job.Remaining()
debug("Next job:", job, "Remaining:", remaining)
now := time.Now().UTC()
if job.NextRun.After(now) {
// Wait until the next Job time or
// the webserver's /schedule handler wakes us up
debug("Sleeping for job:", remaining)
select {
case <-time.After(remaining):
debug("Job sleep time finished")
publish(job)
case newJob := <-d.newJobs:
debug("A new job has been scheduled")
if newJob.NextRun.Before(job.NextRun) {
debug("The new job comes before out current job")
job = newJob // Process the new job next
}
// Continue processing the current job without fetching from database
goto CheckNextRun
case canceledJob := <-d.canceledJobs:
debug("A job has been cancelled")
if job.Equals(canceledJob) {
// The job we are waiting for has been canceled.
// We need to fetch the next job in the queue.
debug("The cancelled job is our current job")
continue
}
// Continue to process our current job
goto CheckNextRun
case <-d.quitPublisher:
debug("Came message from channel 3: quitPublisher")
goto end
}
} else {
publish(job)
}
}
end:
d.publisherFinished <- true
}
comments
package dalga
import (
"database/sql"
"flag"
"fmt"
"log"
"net"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/streadway/amqp"
)
const createTableSQL = "" +
"CREATE TABLE `%s` (" +
" `routing_key` VARCHAR(255) NOT NULL," +
" `body` BLOB(767) NOT NULL," + // 767 is the max index size
" `interval` INT UNSIGNED NOT NULL," + // 32-bit
" `next_run` DATETIME NOT NULL," +
"" +
" PRIMARY KEY (`routing_key`, `body`(767))," +
" KEY `idx_next_run` (`next_run`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8"
var debugging = flag.Bool("d", false, "turn on debug messages")
func debug(args ...interface{}) {
if *debugging {
log.Println(args...)
}
}
type Dalga struct {
C *Config
db *sql.DB
rabbit *amqp.Connection
channel *amqp.Channel
listener net.Listener
newJobs chan *Job
canceledJobs chan *Job
quitPublisher chan bool
publisherFinished chan bool
}
func NewDalga(config *Config) *Dalga {
return &Dalga{
C: config,
newJobs: make(chan *Job),
canceledJobs: make(chan *Job),
quitPublisher: make(chan bool),
publisherFinished: make(chan bool),
}
}
// Start starts the publisher and http server goroutines.
func (d *Dalga) Start() error {
err := d.connectDB()
if err != nil {
return err
}
err = d.connectMQ()
if err != nil {
return err
}
server, err := d.makeServer()
if err != nil {
return err
}
go d.publisher()
go server()
return nil
}
// Run starts the dalga and waits until Shutdown() is called.
func (d *Dalga) Run() error {
err := d.Start()
if err != nil {
return err
}
debug("Waiting a message from publisherFinished channel")
<-d.publisherFinished
debug("Received message from publisherFinished channel")
return nil
}
func (d *Dalga) Shutdown() error {
return d.listener.Close()
}
func (d *Dalga) connectDB() error {
var err error
d.db, err = d.newMySQLConnection()
return err
}
func (d *Dalga) newMySQLConnection() (*sql.DB, error) {
my := d.C.MySQL
dsn := my.User + ":" + my.Password + "@" + "tcp(" + my.Host + ":" + my.Port + ")/" + my.Db + "?parseTime=true"
db, err := sql.Open("mysql", dsn)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
log.Println("Connected to MySQL")
return db, nil
}
func (d *Dalga) connectMQ() error {
var err error
rabbit := d.C.RabbitMQ
uri := "amqp://" + rabbit.User + ":" + rabbit.Password + "@" + rabbit.Host + ":" + rabbit.Port + rabbit.VHost
d.rabbit, err = amqp.Dial(uri)
if err != nil {
return err
}
d.channel, err = d.rabbit.Channel()
log.Println("Connected to RabbitMQ")
return err
}
func (d *Dalga) CreateTable() error {
db, err := d.newMySQLConnection()
if err != nil {
return err
}
defer db.Close()
sql := fmt.Sprintf(createTableSQL, d.C.MySQL.Table)
_, err = db.Exec(sql)
if err != nil {
return err
}
return nil
}
func (d *Dalga) Schedule(routingKey string, body []byte, interval uint32) error {
job := NewJob(routingKey, body, interval)
err := d.insert(job)
if err != nil {
return err
}
// Wake up the publisher.
//
// publisher() may be sleeping for the next job on the queue
// at the time we schedule a new Job. Let it wake up so it can
// re-fetch the new Job from the front of the queue.
//
// The code below is an idiom for non-blocking send to a channel.
select {
case d.newJobs <- job:
debug("Sent new job signal")
default:
debug("Did not send new job signal")
}
return nil
}
func (d *Dalga) Cancel(routingKey string, body []byte) error {
err := d.delete(routingKey, body)
if err != nil {
return err
}
select {
case d.canceledJobs <- &Job{RoutingKey: routingKey, Body: body}:
debug("Sent cancel signal")
default:
debug("Did not send cancel signal")
}
return nil
}
// front returns the first job to be run in the queue.
func (d *Dalga) front() (*Job, error) {
var interval uint32
var j Job
row := d.db.QueryRow("SELECT routing_key, body, `interval`, next_run " +
"FROM " + d.C.MySQL.Table + " " +
"ORDER BY next_run ASC LIMIT 1")
err := row.Scan(&j.RoutingKey, &j.Body, &interval, &j.NextRun)
if err != nil {
return nil, err
}
j.Interval = time.Duration(interval) * time.Second
return &j, nil
}
// publish sends a message to exchange defined in the config and
// updates the Job's next run time on the database.
func (d *Dalga) publish(j *Job) error {
debug("publish", *j)
// Update next run time
_, err := d.db.Exec("UPDATE "+d.C.MySQL.Table+" "+
"SET next_run=? "+
"WHERE routing_key=? AND body=?",
time.Now().UTC().Add(j.Interval), j.RoutingKey, j.Body)
if err != nil {
return err
}
// Send a message to RabbitMQ
pub := func() error {
return d.channel.Publish(d.C.RabbitMQ.Exchange, j.RoutingKey, false, false, amqp.Publishing{
Headers: amqp.Table{
"interval": j.Interval.Seconds(),
"published_at": time.Now().UTC().String(),
},
ContentType: "application/octet-stream",
Body: j.Body,
DeliveryMode: amqp.Persistent,
Priority: 0,
Expiration: strconv.FormatUint(uint64(j.Interval.Seconds()), 10) + "000",
})
}
err = pub()
if err != nil {
if strings.Contains(err.Error(), "channel/connection is not open") {
// Retry again
err = d.connectMQ()
if err != nil {
return err
}
pub()
}
}
return nil
}
// insert puts the job to the waiting queue.
func (d *Dalga) insert(j *Job) error {
interval := j.Interval.Seconds()
_, err := d.db.Exec("INSERT INTO "+d.C.MySQL.Table+" "+
"(routing_key, body, `interval`, next_run) "+
"VALUES(?, ?, ?, ?) "+
"ON DUPLICATE KEY UPDATE "+
"next_run=DATE_ADD(next_run, INTERVAL (? - `interval`) SECOND), "+
"`interval`=?",
j.RoutingKey, j.Body, interval, j.NextRun, interval, interval)
return err
}
// delete removes the job from the waiting queue.
func (d *Dalga) delete(routingKey string, body []byte) error {
_, err := d.db.Exec("DELETE FROM "+d.C.MySQL.Table+" "+
"WHERE routing_key=? AND body=?", routingKey, body)
return err
}
// publisher runs a loop that reads the next Job from the queue and publishes it.
func (d *Dalga) publisher() {
publish := func(j *Job) {
err := d.publish(j)
if err != nil {
log.Println(err)
time.Sleep(time.Duration(1) * time.Second)
}
}
for {
debug("---")
select {
case <-d.quitPublisher:
debug("Came message from channel 1: quitPublisher")
goto end
default:
}
job, err := d.front()
if err != nil {
if strings.Contains(err.Error(), "no rows in result set") {
debug("No waiting jobs in the queue")
debug("Waiting wakeup signal")
select {
case job = <-d.newJobs:
case <-d.quitPublisher:
debug("Came message from channel 2: quitPublisher")
goto end
}
debug("Got wakeup signal")
} else {
log.Println(err)
time.Sleep(time.Duration(1) * time.Second)
continue
}
}
CheckNextRun:
remaining := job.Remaining()
debug("Next job:", job, "Remaining:", remaining)
now := time.Now().UTC()
if job.NextRun.After(now) {
// Wait until the next Job time or
// the webserver's /schedule handler wakes us up
debug("Sleeping for job:", remaining)
select {
case <-time.After(remaining):
debug("Job sleep time finished")
publish(job)
case newJob := <-d.newJobs:
debug("A new job has been scheduled")
if newJob.NextRun.Before(job.NextRun) {
debug("The new job comes before out current job")
job = newJob // Process the new job next
}
// Continue processing the current job without fetching from database
goto CheckNextRun
case canceledJob := <-d.canceledJobs:
debug("A job has been cancelled")
if job.Equals(canceledJob) {
// The job we are waiting for has been canceled.
// We need to fetch the next job in the queue.
debug("The cancelled job is our current job")
continue
}
// Continue to process our current job
goto CheckNextRun
case <-d.quitPublisher:
debug("Came message from channel 3: quitPublisher")
goto end
}
} else {
publish(job)
}
}
end:
d.publisherFinished <- true
}
|
package node
import (
"errors"
"log"
"sync"
)
// Takes care of maintaining and relaying maps and insures that we know which
// interfaces can reach which addresses.
type ReachabilityHandler interface {
AddConnection(NodeAddress, MapConnection)
FindNextHop(NodeAddress) (NodeAddress, error)
}
type taggedMap struct {
address NodeAddress
new_map ReachabilityMap
}
type reachability struct {
me NodeAddress
l *sync.Mutex
conns map[NodeAddress]MapConnection
maps map[NodeAddress]ReachabilityMap
merged_map ReachabilityMap
}
func newReachability(me NodeAddress) ReachabilityHandler {
conns := make(map[NodeAddress]MapConnection)
maps := make(map[NodeAddress]ReachabilityMap)
impl := &reachability{me, &sync.Mutex{}, conns, maps, NewBloomReachabilityMap()}
impl.merged_map.AddEntry(me)
return impl
}
func (m *reachability) addMap(update taggedMap) {
m.l.Lock()
defer m.l.Unlock()
m.maps[update.address].Merge(update.new_map)
m.merged_map.Merge(update.new_map)
for addr, conn := range m.conns {
if addr != update.address {
conn.SendMap(update.new_map.Copy())
}
}
}
func (m *reachability) AddConnection(id NodeAddress, c MapConnection) {
// TODO(colin): This should be streamed. or something similar.
m.l.Lock()
defer m.l.Unlock()
m.maps[id] = NewBloomReachabilityMap()
m.conns[id] = c
// Send all our maps
go func() {
m.l.Lock()
defer m.l.Unlock()
err := c.SendMap(m.merged_map.Copy())
if err != nil {
log.Fatal(err)
}
}()
// Store all received maps
go func() {
for rmap := range c.ReachabilityMaps() {
rmap.Increment()
m.addMap(taggedMap{id, rmap})
}
}()
}
func (m *reachability) FindNextHop(id NodeAddress) (NodeAddress, error) {
m.l.Lock()
defer m.l.Unlock()
_, ok := m.conns[id]
if ok {
return id, nil
}
for rid, rmap := range m.maps {
if rmap.IsReachable(id) {
return rid, nil
}
}
return "", errors.New("Unable to find host")
}
Prevent race conditions in the initial map sending
package node
import (
"errors"
"log"
"sync"
)
// Takes care of maintaining and relaying maps and insures that we know which
// interfaces can reach which addresses.
type ReachabilityHandler interface {
AddConnection(NodeAddress, MapConnection)
FindNextHop(NodeAddress) (NodeAddress, error)
}
type taggedMap struct {
address NodeAddress
new_map ReachabilityMap
}
type reachability struct {
me NodeAddress
l *sync.Mutex
conns map[NodeAddress]MapConnection
maps map[NodeAddress]ReachabilityMap
merged_map ReachabilityMap
}
func newReachability(me NodeAddress) ReachabilityHandler {
conns := make(map[NodeAddress]MapConnection)
maps := make(map[NodeAddress]ReachabilityMap)
impl := &reachability{me, &sync.Mutex{}, conns, maps, NewBloomReachabilityMap()}
impl.merged_map.AddEntry(me)
return impl
}
func (m *reachability) addMap(update taggedMap) {
m.l.Lock()
defer m.l.Unlock()
m.maps[update.address].Merge(update.new_map)
m.merged_map.Merge(update.new_map)
for addr, conn := range m.conns {
if addr != update.address {
conn.SendMap(update.new_map.Copy())
}
}
}
func (m *reachability) AddConnection(id NodeAddress, c MapConnection) {
// TODO(colin): This should be streamed. or something similar.
m.l.Lock()
defer m.l.Unlock()
m.maps[id] = NewBloomReachabilityMap()
m.conns[id] = c
initial_map := m.merged_map.Copy()
// Send all our maps
go func() {
m.l.Lock()
defer m.l.Unlock()
err := c.SendMap(initial_map)
if err != nil {
log.Fatal(err)
}
}()
// Store all received maps
go func() {
for rmap := range c.ReachabilityMaps() {
rmap.Increment()
m.addMap(taggedMap{id, rmap})
}
}()
}
func (m *reachability) FindNextHop(id NodeAddress) (NodeAddress, error) {
m.l.Lock()
defer m.l.Unlock()
_, ok := m.conns[id]
if ok {
return id, nil
}
for rid, rmap := range m.maps {
if rmap.IsReachable(id) {
return rid, nil
}
}
return "", errors.New("Unable to find host")
}
|
package zip
import (
"archive/zip"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
)
func Archive(inPath string, outPath string, includeRootDir bool) error {
inFileInfo, err := os.Stat(inPath)
if err != nil {
return err
}
outFile, err := os.Create(outPath)
if err != nil {
return err
}
defer outFile.Close()
zipWriter := zip.NewWriter(outFile)
inIsDir := inFileInfo.IsDir()
archivePath := ""
if !inIsDir || includeRootDir {
archivePath = inFileInfo.Name()
}
err = archive(zipWriter, inPath, inIsDir, archivePath)
if err != nil {
return err
}
err = zipWriter.Close()
if err != nil {
return err
}
return nil
}
func archive(zipWriter *zip.Writer, inPath string, inIsDir bool, archivePath string) error {
if inIsDir {
return archiveDir(zipWriter, inPath, archivePath)
} else {
return archiveFile(zipWriter, inPath, archivePath)
}
}
func archiveDir(zipWriter *zip.Writer, inPath string, archivePath string) error {
childFileInfos, err := ioutil.ReadDir(inPath)
if err != nil {
return err
}
for _, childFileInfo := range childFileInfos {
childFileName := childFileInfo.Name()
childInPath := filepath.Join(inPath, childFileName)
childArchivePath := path.Join(archivePath, childFileName)
childIsDir := childFileInfo.IsDir()
err = archive(zipWriter, childInPath, childIsDir, childArchivePath)
if err != nil {
return err
}
}
return nil
}
func archiveFile(zipWriter *zip.Writer, inPath string, archivePath string) error {
inFile, err := os.Open(inPath)
if err != nil {
return err
}
defer inFile.Close()
writer, err := zipWriter.Create(archivePath)
if err != nil {
return err
}
_, err = io.Copy(writer, inFile)
if err != nil {
return err
}
return nil
}
func Unarchive(archivePath string, filePath string) error {
//TODO
return nil
}
Add ArchiveFile() for zip
package zip
import (
"archive/zip"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
)
func Archive(filePath string, includeRootDir bool, writer io.Writer) error {
fileInfo, err := os.Stat(filePath)
if err != nil {
return err
}
zipWriter := zip.NewWriter(writer)
isDir := fileInfo.IsDir()
archivePath := ""
if !isDir || includeRootDir {
archivePath = fileInfo.Name()
}
err = archive(zipWriter, filePath, isDir, archivePath)
if err != nil {
return err
}
err = zipWriter.Close()
if err != nil {
return err
}
return nil
}
func ArchiveFile(filePath string, includeRootDir bool, outFilePath string) error {
outFile, err := os.Create(outFilePath)
if err != nil {
return err
}
defer outFile.Close()
err = Archive(filePath, includeRootDir, outFile)
if err != nil {
return err
}
return nil
}
func archive(zipWriter *zip.Writer, filePath string, isDir bool, archivePath string) error {
if isDir {
return archiveDir(zipWriter, filePath, archivePath)
} else {
return archiveFile(zipWriter, filePath, archivePath)
}
}
func archiveDir(zipWriter *zip.Writer, filePath string, archivePath string) error {
childFileInfos, err := ioutil.ReadDir(filePath)
if err != nil {
return err
}
for _, childFileInfo := range childFileInfos {
childFileName := childFileInfo.Name()
childFilePath := filepath.Join(filePath, childFileName)
childArchivePath := path.Join(archivePath, childFileName)
childIsDir := childFileInfo.IsDir()
err = archive(zipWriter, childFilePath, childIsDir, childArchivePath)
if err != nil {
return err
}
}
return nil
}
func archiveFile(zipWriter *zip.Writer, filePath string, archivePath string) error {
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
writer, err := zipWriter.Create(archivePath)
if err != nil {
return err
}
_, err = io.Copy(writer, file)
if err != nil {
return err
}
return nil
}
func Unarchive(archivePath string, filePath string) error {
//TODO
return nil
}
|
// Package zk is a native Go client library for the ZooKeeper orchestration service.
package zk
/*
TODO:
* make sure a ping response comes back in a reasonable time
Possible watcher events:
* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
*/
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// ErrNoServer indicates that an operation cannot be completed
// because attempts to connect to all servers in the list failed.
var ErrNoServer = errors.New("zk: could not connect to a server")
// ErrInvalidPath indicates that an operation was being attempted on
// an invalid path. (e.g. empty path)
var ErrInvalidPath = errors.New("zk: invalid path")
// DefaultLogger uses the stdlib log package for logging.
var DefaultLogger Logger = defaultLogger{}
const (
bufferSize = 1536 * 1024
eventChanSize = 6
sendChanSize = 16
protectedPrefix = "_c_"
)
type watchType int
const (
watchTypeData = iota
watchTypeExist = iota
watchTypeChild = iota
)
type watchPathType struct {
path string
wType watchType
}
type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
// Logger is an interface that can be implemented to provide custom log output.
type Logger interface {
Printf(string, ...interface{})
}
type Conn struct {
lastZxid int64
sessionID int64
state State // must be 32-bit aligned
xid uint32
sessionTimeoutMs int32 // session timeout in milliseconds
passwd []byte
dialer Dialer
hostProvider HostProvider
serverMu sync.Mutex // protects server
server string // remember the address/port of the current server
conn net.Conn
eventChan chan Event
shouldQuit chan struct{}
pingInterval time.Duration
recvTimeout time.Duration
connectTimeout time.Duration
sendChan chan *request
requests map[int32]*request // Xid -> pending request
requestsLock sync.Mutex
watchers map[watchPathType][]chan Event
watchersLock sync.Mutex
// Debug (used by unit tests)
reconnectDelay time.Duration
logger Logger
}
// connOption represents a connection option.
type connOption func(c *Conn)
type request struct {
xid int32
opcode int32
pkt interface{}
recvStruct interface{}
recvChan chan response
// Because sending and receiving happen in separate go routines, there's
// a possible race condition when creating watches from outside the read
// loop. We must ensure that a watcher gets added to the list synchronously
// with the response from the server on any request that creates a watch.
// In order to not hard code the watch logic for each opcode in the recv
// loop the caller can use recvFunc to insert some synchronously code
// after a response.
recvFunc func(*request, *responseHeader, error)
}
type response struct {
zxid int64
err error
}
type Event struct {
Type EventType
State State
Path string // For non-session events, the path of the watched node.
Err error
Server string // For connection events
}
// HostProvider is used to represent a set of hosts a ZooKeeper client should connect to.
// It is an analog of the Java equivalent:
// http://svn.apache.org/viewvc/zookeeper/trunk/src/java/main/org/apache/zookeeper/client/HostProvider.java?view=markup
type HostProvider interface {
// Init is called first, with the servers specified in the connection string.
Init(servers []string) error
// Len returns the number of servers.
Len() int
// Next returns the next server to connect to. retryStart will be true if we've looped through
// all known servers without Connected() being called.
Next() (server string, retryStart bool)
// Notify the HostProvider of a successful connection.
Connected()
}
// ConnectWithDialer establishes a new connection to a pool of zookeeper servers
// using a custom Dialer. See Connect for further information about session timeout.
// This method is deprecated and provided for compatibility: use the WithDialer option instead.
func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
return Connect(servers, sessionTimeout, WithDialer(dialer))
}
// Connect establishes a new connection to a pool of zookeeper
// servers. The provided session timeout sets the amount of time for which
// a session is considered valid after losing connection to a server. Within
// the session timeout it's possible to reestablish a connection to a different
// server and keep the same session. This is means any ephemeral nodes and
// watches are maintained.
func Connect(servers []string, sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) {
if len(servers) == 0 {
return nil, nil, errors.New("zk: server list must not be empty")
}
srvs := make([]string, len(servers))
for i, addr := range servers {
if strings.Contains(addr, ":") {
srvs[i] = addr
} else {
srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
}
}
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(srvs)
ec := make(chan Event, eventChanSize)
conn := &Conn{
dialer: net.DialTimeout,
hostProvider: &DNSHostProvider{},
conn: nil,
state: StateDisconnected,
eventChan: ec,
shouldQuit: make(chan struct{}),
connectTimeout: 1 * time.Second,
sendChan: make(chan *request, sendChanSize),
requests: make(map[int32]*request),
watchers: make(map[watchPathType][]chan Event),
passwd: emptyPassword,
logger: DefaultLogger,
// Debug
reconnectDelay: 0,
}
// Set provided options.
for _, option := range options {
option(conn)
}
if err := conn.hostProvider.Init(srvs); err != nil {
return nil, nil, err
}
conn.setTimeouts(int32(sessionTimeout / time.Millisecond))
go func() {
conn.loop()
conn.flushRequests(ErrClosing)
conn.invalidateWatches(ErrClosing)
close(conn.eventChan)
}()
return conn, ec, nil
}
// WithDialer returns a connection option specifying a non-default Dialer.
func WithDialer(dialer Dialer) connOption {
return func(c *Conn) {
c.dialer = dialer
}
}
// WithHostProvider returns a connection option specifying a non-default HostProvider.
func WithHostProvider(hostProvider HostProvider) connOption {
return func(c *Conn) {
c.hostProvider = hostProvider
}
}
func (c *Conn) Close() {
close(c.shouldQuit)
select {
case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
case <-time.After(time.Second):
}
}
// State returns the current state of the connection.
func (c *Conn) State() State {
return State(atomic.LoadInt32((*int32)(&c.state)))
}
// SessionId returns the current session id of the connection.
func (c *Conn) SessionID() int64 {
return atomic.LoadInt64(&c.sessionID)
}
// SetLogger sets the logger to be used for printing errors.
// Logger is an interface provided by this package.
func (c *Conn) SetLogger(l Logger) {
c.logger = l
}
func (c *Conn) setTimeouts(sessionTimeoutMs int32) {
c.sessionTimeoutMs = sessionTimeoutMs
sessionTimeout := time.Duration(sessionTimeoutMs) * time.Millisecond
c.recvTimeout = sessionTimeout * 2 / 3
c.pingInterval = c.recvTimeout / 2
}
func (c *Conn) setState(state State) {
atomic.StoreInt32((*int32)(&c.state), int32(state))
select {
case c.eventChan <- Event{Type: EventSession, State: state, Server: c.Server()}:
default:
// panic("zk: event channel full - it must be monitored and never allowed to be full")
}
}
func (c *Conn) connect() error {
var retryStart bool
for {
c.serverMu.Lock()
c.server, retryStart = c.hostProvider.Next()
c.serverMu.Unlock()
c.setState(StateConnecting)
if retryStart {
c.flushUnsentRequests(ErrNoServer)
select {
case <-time.After(time.Second):
// pass
case <-c.shouldQuit:
c.setState(StateDisconnected)
c.flushUnsentRequests(ErrClosing)
return ErrClosing
}
}
zkConn, err := c.dialer("tcp", c.Server(), c.connectTimeout)
if err == nil {
c.conn = zkConn
c.setState(StateConnected)
c.logger.Printf("Connected to %s", c.Server())
return nil
}
c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err)
}
}
func (c *Conn) loop() {
for {
if err := c.connect(); err != nil {
// c.Close() was called
return
}
err := c.authenticate()
switch {
case err == ErrSessionExpired:
c.logger.Printf("Authentication failed: %s", err)
c.invalidateWatches(err)
case err != nil && c.conn != nil:
c.logger.Printf("Authentication failed: %s", err)
c.conn.Close()
case err == nil:
c.logger.Printf("Authenticated: id=%d, timeout=%d", c.sessionID, c.sessionTimeoutMs)
c.hostProvider.Connected() // mark success
closeChan := make(chan struct{}) // channel to tell send loop stop
var wg sync.WaitGroup
wg.Add(1)
go func() {
err := c.sendLoop(c.conn, closeChan)
c.logger.Printf("Send loop terminated: err=%v", err)
c.conn.Close() // causes recv loop to EOF/exit
wg.Done()
}()
wg.Add(1)
go func() {
err := c.recvLoop(c.conn)
c.logger.Printf("Recv loop terminated: err=%v", err)
if err == nil {
panic("zk: recvLoop should never return nil error")
}
close(closeChan) // tell send loop to exit
wg.Done()
}()
c.sendSetWatches()
wg.Wait()
}
c.setState(StateDisconnected)
select {
case <-c.shouldQuit:
c.flushRequests(ErrClosing)
return
default:
}
if err != ErrSessionExpired {
err = ErrConnectionClosed
}
c.flushRequests(err)
if c.reconnectDelay > 0 {
select {
case <-c.shouldQuit:
return
case <-time.After(c.reconnectDelay):
}
}
}
}
func (c *Conn) flushUnsentRequests(err error) {
for {
select {
default:
return
case req := <-c.sendChan:
req.recvChan <- response{-1, err}
}
}
}
// Send error to all pending requests and clear request map
func (c *Conn) flushRequests(err error) {
c.requestsLock.Lock()
for _, req := range c.requests {
req.recvChan <- response{-1, err}
}
c.requests = make(map[int32]*request)
c.requestsLock.Unlock()
}
// Send error to all watchers and clear watchers map
func (c *Conn) invalidateWatches(err error) {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) >= 0 {
for pathType, watchers := range c.watchers {
ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
for _, ch := range watchers {
ch <- ev
close(ch)
}
}
c.watchers = make(map[watchPathType][]chan Event)
}
}
func (c *Conn) sendSetWatches() {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) == 0 {
return
}
req := &setWatchesRequest{
RelativeZxid: c.lastZxid,
DataWatches: make([]string, 0),
ExistWatches: make([]string, 0),
ChildWatches: make([]string, 0),
}
n := 0
for pathType, watchers := range c.watchers {
if len(watchers) == 0 {
continue
}
switch pathType.wType {
case watchTypeData:
req.DataWatches = append(req.DataWatches, pathType.path)
case watchTypeExist:
req.ExistWatches = append(req.ExistWatches, pathType.path)
case watchTypeChild:
req.ChildWatches = append(req.ChildWatches, pathType.path)
}
n++
}
if n == 0 {
return
}
go func() {
res := &setWatchesResponse{}
_, err := c.request(opSetWatches, req, res, nil)
if err != nil {
c.logger.Printf("Failed to set previous watches: %s", err.Error())
}
}()
}
func (c *Conn) authenticate() error {
buf := make([]byte, 256)
// Encode and send a connect request.
n, err := encodePacket(buf[4:], &connectRequest{
ProtocolVersion: protocolVersion,
LastZxidSeen: c.lastZxid,
TimeOut: c.sessionTimeoutMs,
SessionID: c.sessionID,
Passwd: c.passwd,
})
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = c.conn.Write(buf[:n+4])
c.conn.SetWriteDeadline(time.Time{})
if err != nil {
return err
}
// Receive and decode a connect response.
c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = io.ReadFull(c.conn, buf[:4])
c.conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(c.conn, buf[:blen])
if err != nil {
return err
}
r := connectResponse{}
_, err = decodePacket(buf[:blen], &r)
if err != nil {
return err
}
if r.SessionID == 0 {
atomic.StoreInt64(&c.sessionID, int64(0))
c.passwd = emptyPassword
c.lastZxid = 0
c.setState(StateExpired)
return ErrSessionExpired
}
atomic.StoreInt64(&c.sessionID, r.SessionID)
c.setTimeouts(r.TimeOut)
c.passwd = r.Passwd
c.setState(StateHasSession)
return nil
}
func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error {
pingTicker := time.NewTicker(c.pingInterval)
defer pingTicker.Stop()
buf := make([]byte, bufferSize)
for {
select {
case req := <-c.sendChan:
header := &requestHeader{req.xid, req.opcode}
n, err := encodePacket(buf[4:], header)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n2, err := encodePacket(buf[4+n:], req.pkt)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n += n2
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.requestsLock.Lock()
select {
case <-closeChan:
req.recvChan <- response{-1, ErrConnectionClosed}
c.requestsLock.Unlock()
return ErrConnectionClosed
default:
}
c.requests[req.xid] = req
c.requestsLock.Unlock()
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
req.recvChan <- response{-1, err}
conn.Close()
return err
}
case <-pingTicker.C:
n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
if err != nil {
panic("zk: opPing should never fail to serialize")
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
conn.Close()
return err
}
case <-closeChan:
return nil
}
}
}
func (c *Conn) recvLoop(conn net.Conn) error {
buf := make([]byte, bufferSize)
for {
// package length
conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
_, err := io.ReadFull(conn, buf[:4])
if err != nil {
return err
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(conn, buf[:blen])
conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
res := responseHeader{}
_, err = decodePacket(buf[:16], &res)
if err != nil {
return err
}
if res.Xid == -1 {
res := &watcherEvent{}
_, err := decodePacket(buf[16:blen], res)
if err != nil {
return err
}
ev := Event{
Type: res.Type,
State: res.State,
Path: res.Path,
Err: nil,
}
select {
case c.eventChan <- ev:
default:
}
wTypes := make([]watchType, 0, 2)
switch res.Type {
case EventNodeCreated:
wTypes = append(wTypes, watchTypeExist)
case EventNodeDeleted, EventNodeDataChanged:
wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
case EventNodeChildrenChanged:
wTypes = append(wTypes, watchTypeChild)
}
c.watchersLock.Lock()
for _, t := range wTypes {
wpt := watchPathType{res.Path, t}
if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
for _, ch := range watchers {
ch <- ev
close(ch)
}
delete(c.watchers, wpt)
}
}
c.watchersLock.Unlock()
} else if res.Xid == -2 {
// Ping response. Ignore.
} else if res.Xid < 0 {
c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
} else {
if res.Zxid > 0 {
c.lastZxid = res.Zxid
}
c.requestsLock.Lock()
req, ok := c.requests[res.Xid]
if ok {
delete(c.requests, res.Xid)
}
c.requestsLock.Unlock()
if !ok {
c.logger.Printf("Response for unknown request with xid %d", res.Xid)
} else {
if res.Err != 0 {
err = res.Err.toError()
} else {
_, err = decodePacket(buf[16:blen], req.recvStruct)
}
if req.recvFunc != nil {
req.recvFunc(req, &res, err)
}
req.recvChan <- response{res.Zxid, err}
if req.opcode == opClose {
return io.EOF
}
}
}
}
}
func (c *Conn) nextXid() int32 {
return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff)
}
func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
ch := make(chan Event, 1)
wpt := watchPathType{path, watchType}
c.watchers[wpt] = append(c.watchers[wpt], ch)
return ch
}
func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
rq := &request{
xid: c.nextXid(),
opcode: opcode,
pkt: req,
recvStruct: res,
recvChan: make(chan response, 1),
recvFunc: recvFunc,
}
c.sendChan <- rq
return rq.recvChan
}
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
r := <-c.queueRequest(opcode, req, res, recvFunc)
return r.zxid, r.err
}
func (c *Conn) AddAuth(scheme string, auth []byte) error {
_, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
return err
}
func (c *Conn) Children(path string) ([]string, *Stat, error) {
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
return res.Children, &res.Stat, err
}
func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeChild)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Children, &res.Stat, ech, err
}
func (c *Conn) Get(path string) ([]byte, *Stat, error) {
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
return res.Data, &res.Stat, err
}
// GetW returns the contents of a znode and sets a watch
func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Data, &res.Stat, ech, err
}
func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
if path == "" {
return nil, ErrInvalidPath
}
res := &setDataResponse{}
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
res := &createResponse{}
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
return res.Path, err
}
// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
// after it creates the node. On reconnect the session may still be valid so the
// ephemeral node still exists. Therefore, on reconnect we need to check if a node
// with a GUID generated on create exists.
func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
var guid [16]byte
_, err := io.ReadFull(rand.Reader, guid[:16])
if err != nil {
return "", err
}
guidStr := fmt.Sprintf("%x", guid)
parts := strings.Split(path, "/")
parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
rootPath := strings.Join(parts[:len(parts)-1], "/")
protectedPath := strings.Join(parts, "/")
var newPath string
for i := 0; i < 3; i++ {
newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
switch err {
case ErrSessionExpired:
// No need to search for the node since it can't exist. Just try again.
case ErrConnectionClosed:
children, _, err := c.Children(rootPath)
if err != nil {
return "", err
}
for _, p := range children {
parts := strings.Split(p, "/")
if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
return rootPath + "/" + p, nil
}
}
}
case nil:
return newPath, nil
default:
return "", err
}
}
return "", err
}
func (c *Conn) Delete(path string, version int32) error {
_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
return err
}
func (c *Conn) Exists(path string) (bool, *Stat, error) {
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
return exists, &res.Stat, err
}
func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
} else if err == ErrNoNode {
ech = c.addWatcher(path, watchTypeExist)
}
})
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
if err != nil {
return false, nil, nil, err
}
return exists, &res.Stat, ech, err
}
func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
res := &getAclResponse{}
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
return res.Acl, &res.Stat, err
}
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
res := &setAclResponse{}
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Sync(path string) (string, error) {
res := &syncResponse{}
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
return res.Path, err
}
type MultiResponse struct {
Stat *Stat
String string
}
// Multi executes multiple ZooKeeper operations or none of them. The provided
// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or
// *CheckVersionRequest.
func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
req := &multiRequest{
Ops: make([]multiRequestOp, 0, len(ops)),
DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
}
for _, op := range ops {
var opCode int32
switch op.(type) {
case *CreateRequest:
opCode = opCreate
case *SetDataRequest:
opCode = opSetData
case *DeleteRequest:
opCode = opDelete
case *CheckVersionRequest:
opCode = opCheck
default:
return nil, fmt.Errorf("unknown operation type %T", op)
}
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op})
}
res := &multiResponse{}
_, err := c.request(opMulti, req, res, nil)
mr := make([]MultiResponse, len(res.Ops))
for i, op := range res.Ops {
mr[i] = MultiResponse{Stat: op.Stat, String: op.String}
}
return mr, err
}
// Server returns the current or last-connected server name.
func (c *Conn) Server() string {
c.serverMu.Lock()
defer c.serverMu.Unlock()
return c.server
}
Make all access to session ID atomic
// Package zk is a native Go client library for the ZooKeeper orchestration service.
package zk
/*
TODO:
* make sure a ping response comes back in a reasonable time
Possible watcher events:
* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
*/
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// ErrNoServer indicates that an operation cannot be completed
// because attempts to connect to all servers in the list failed.
var ErrNoServer = errors.New("zk: could not connect to a server")
// ErrInvalidPath indicates that an operation was being attempted on
// an invalid path. (e.g. empty path)
var ErrInvalidPath = errors.New("zk: invalid path")
// DefaultLogger uses the stdlib log package for logging.
var DefaultLogger Logger = defaultLogger{}
const (
bufferSize = 1536 * 1024
eventChanSize = 6
sendChanSize = 16
protectedPrefix = "_c_"
)
type watchType int
const (
watchTypeData = iota
watchTypeExist = iota
watchTypeChild = iota
)
type watchPathType struct {
path string
wType watchType
}
type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
// Logger is an interface that can be implemented to provide custom log output.
type Logger interface {
Printf(string, ...interface{})
}
type Conn struct {
lastZxid int64
sessionID int64
state State // must be 32-bit aligned
xid uint32
sessionTimeoutMs int32 // session timeout in milliseconds
passwd []byte
dialer Dialer
hostProvider HostProvider
serverMu sync.Mutex // protects server
server string // remember the address/port of the current server
conn net.Conn
eventChan chan Event
shouldQuit chan struct{}
pingInterval time.Duration
recvTimeout time.Duration
connectTimeout time.Duration
sendChan chan *request
requests map[int32]*request // Xid -> pending request
requestsLock sync.Mutex
watchers map[watchPathType][]chan Event
watchersLock sync.Mutex
// Debug (used by unit tests)
reconnectDelay time.Duration
logger Logger
}
// connOption represents a connection option.
type connOption func(c *Conn)
type request struct {
xid int32
opcode int32
pkt interface{}
recvStruct interface{}
recvChan chan response
// Because sending and receiving happen in separate go routines, there's
// a possible race condition when creating watches from outside the read
// loop. We must ensure that a watcher gets added to the list synchronously
// with the response from the server on any request that creates a watch.
// In order to not hard code the watch logic for each opcode in the recv
// loop the caller can use recvFunc to insert some synchronously code
// after a response.
recvFunc func(*request, *responseHeader, error)
}
type response struct {
zxid int64
err error
}
type Event struct {
Type EventType
State State
Path string // For non-session events, the path of the watched node.
Err error
Server string // For connection events
}
// HostProvider is used to represent a set of hosts a ZooKeeper client should connect to.
// It is an analog of the Java equivalent:
// http://svn.apache.org/viewvc/zookeeper/trunk/src/java/main/org/apache/zookeeper/client/HostProvider.java?view=markup
type HostProvider interface {
// Init is called first, with the servers specified in the connection string.
Init(servers []string) error
// Len returns the number of servers.
Len() int
// Next returns the next server to connect to. retryStart will be true if we've looped through
// all known servers without Connected() being called.
Next() (server string, retryStart bool)
// Notify the HostProvider of a successful connection.
Connected()
}
// ConnectWithDialer establishes a new connection to a pool of zookeeper servers
// using a custom Dialer. See Connect for further information about session timeout.
// This method is deprecated and provided for compatibility: use the WithDialer option instead.
func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
return Connect(servers, sessionTimeout, WithDialer(dialer))
}
// Connect establishes a new connection to a pool of zookeeper
// servers. The provided session timeout sets the amount of time for which
// a session is considered valid after losing connection to a server. Within
// the session timeout it's possible to reestablish a connection to a different
// server and keep the same session. This is means any ephemeral nodes and
// watches are maintained.
func Connect(servers []string, sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) {
if len(servers) == 0 {
return nil, nil, errors.New("zk: server list must not be empty")
}
srvs := make([]string, len(servers))
for i, addr := range servers {
if strings.Contains(addr, ":") {
srvs[i] = addr
} else {
srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
}
}
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(srvs)
ec := make(chan Event, eventChanSize)
conn := &Conn{
dialer: net.DialTimeout,
hostProvider: &DNSHostProvider{},
conn: nil,
state: StateDisconnected,
eventChan: ec,
shouldQuit: make(chan struct{}),
connectTimeout: 1 * time.Second,
sendChan: make(chan *request, sendChanSize),
requests: make(map[int32]*request),
watchers: make(map[watchPathType][]chan Event),
passwd: emptyPassword,
logger: DefaultLogger,
// Debug
reconnectDelay: 0,
}
// Set provided options.
for _, option := range options {
option(conn)
}
if err := conn.hostProvider.Init(srvs); err != nil {
return nil, nil, err
}
conn.setTimeouts(int32(sessionTimeout / time.Millisecond))
go func() {
conn.loop()
conn.flushRequests(ErrClosing)
conn.invalidateWatches(ErrClosing)
close(conn.eventChan)
}()
return conn, ec, nil
}
// WithDialer returns a connection option specifying a non-default Dialer.
func WithDialer(dialer Dialer) connOption {
return func(c *Conn) {
c.dialer = dialer
}
}
// WithHostProvider returns a connection option specifying a non-default HostProvider.
func WithHostProvider(hostProvider HostProvider) connOption {
return func(c *Conn) {
c.hostProvider = hostProvider
}
}
func (c *Conn) Close() {
close(c.shouldQuit)
select {
case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
case <-time.After(time.Second):
}
}
// State returns the current state of the connection.
func (c *Conn) State() State {
return State(atomic.LoadInt32((*int32)(&c.state)))
}
// SessionId returns the current session id of the connection.
func (c *Conn) SessionID() int64 {
return atomic.LoadInt64(&c.sessionID)
}
// SetLogger sets the logger to be used for printing errors.
// Logger is an interface provided by this package.
func (c *Conn) SetLogger(l Logger) {
c.logger = l
}
func (c *Conn) setTimeouts(sessionTimeoutMs int32) {
c.sessionTimeoutMs = sessionTimeoutMs
sessionTimeout := time.Duration(sessionTimeoutMs) * time.Millisecond
c.recvTimeout = sessionTimeout * 2 / 3
c.pingInterval = c.recvTimeout / 2
}
func (c *Conn) setState(state State) {
atomic.StoreInt32((*int32)(&c.state), int32(state))
select {
case c.eventChan <- Event{Type: EventSession, State: state, Server: c.Server()}:
default:
// panic("zk: event channel full - it must be monitored and never allowed to be full")
}
}
func (c *Conn) connect() error {
var retryStart bool
for {
c.serverMu.Lock()
c.server, retryStart = c.hostProvider.Next()
c.serverMu.Unlock()
c.setState(StateConnecting)
if retryStart {
c.flushUnsentRequests(ErrNoServer)
select {
case <-time.After(time.Second):
// pass
case <-c.shouldQuit:
c.setState(StateDisconnected)
c.flushUnsentRequests(ErrClosing)
return ErrClosing
}
}
zkConn, err := c.dialer("tcp", c.Server(), c.connectTimeout)
if err == nil {
c.conn = zkConn
c.setState(StateConnected)
c.logger.Printf("Connected to %s", c.Server())
return nil
}
c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err)
}
}
func (c *Conn) loop() {
for {
if err := c.connect(); err != nil {
// c.Close() was called
return
}
err := c.authenticate()
switch {
case err == ErrSessionExpired:
c.logger.Printf("Authentication failed: %s", err)
c.invalidateWatches(err)
case err != nil && c.conn != nil:
c.logger.Printf("Authentication failed: %s", err)
c.conn.Close()
case err == nil:
c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs)
c.hostProvider.Connected() // mark success
closeChan := make(chan struct{}) // channel to tell send loop stop
var wg sync.WaitGroup
wg.Add(1)
go func() {
err := c.sendLoop(c.conn, closeChan)
c.logger.Printf("Send loop terminated: err=%v", err)
c.conn.Close() // causes recv loop to EOF/exit
wg.Done()
}()
wg.Add(1)
go func() {
err := c.recvLoop(c.conn)
c.logger.Printf("Recv loop terminated: err=%v", err)
if err == nil {
panic("zk: recvLoop should never return nil error")
}
close(closeChan) // tell send loop to exit
wg.Done()
}()
c.sendSetWatches()
wg.Wait()
}
c.setState(StateDisconnected)
select {
case <-c.shouldQuit:
c.flushRequests(ErrClosing)
return
default:
}
if err != ErrSessionExpired {
err = ErrConnectionClosed
}
c.flushRequests(err)
if c.reconnectDelay > 0 {
select {
case <-c.shouldQuit:
return
case <-time.After(c.reconnectDelay):
}
}
}
}
func (c *Conn) flushUnsentRequests(err error) {
for {
select {
default:
return
case req := <-c.sendChan:
req.recvChan <- response{-1, err}
}
}
}
// Send error to all pending requests and clear request map
func (c *Conn) flushRequests(err error) {
c.requestsLock.Lock()
for _, req := range c.requests {
req.recvChan <- response{-1, err}
}
c.requests = make(map[int32]*request)
c.requestsLock.Unlock()
}
// Send error to all watchers and clear watchers map
func (c *Conn) invalidateWatches(err error) {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) >= 0 {
for pathType, watchers := range c.watchers {
ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
for _, ch := range watchers {
ch <- ev
close(ch)
}
}
c.watchers = make(map[watchPathType][]chan Event)
}
}
func (c *Conn) sendSetWatches() {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) == 0 {
return
}
req := &setWatchesRequest{
RelativeZxid: c.lastZxid,
DataWatches: make([]string, 0),
ExistWatches: make([]string, 0),
ChildWatches: make([]string, 0),
}
n := 0
for pathType, watchers := range c.watchers {
if len(watchers) == 0 {
continue
}
switch pathType.wType {
case watchTypeData:
req.DataWatches = append(req.DataWatches, pathType.path)
case watchTypeExist:
req.ExistWatches = append(req.ExistWatches, pathType.path)
case watchTypeChild:
req.ChildWatches = append(req.ChildWatches, pathType.path)
}
n++
}
if n == 0 {
return
}
go func() {
res := &setWatchesResponse{}
_, err := c.request(opSetWatches, req, res, nil)
if err != nil {
c.logger.Printf("Failed to set previous watches: %s", err.Error())
}
}()
}
func (c *Conn) authenticate() error {
buf := make([]byte, 256)
// Encode and send a connect request.
n, err := encodePacket(buf[4:], &connectRequest{
ProtocolVersion: protocolVersion,
LastZxidSeen: c.lastZxid,
TimeOut: c.sessionTimeoutMs,
SessionID: c.SessionID(),
Passwd: c.passwd,
})
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = c.conn.Write(buf[:n+4])
c.conn.SetWriteDeadline(time.Time{})
if err != nil {
return err
}
// Receive and decode a connect response.
c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = io.ReadFull(c.conn, buf[:4])
c.conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(c.conn, buf[:blen])
if err != nil {
return err
}
r := connectResponse{}
_, err = decodePacket(buf[:blen], &r)
if err != nil {
return err
}
if r.SessionID == 0 {
atomic.StoreInt64(&c.sessionID, int64(0))
c.passwd = emptyPassword
c.lastZxid = 0
c.setState(StateExpired)
return ErrSessionExpired
}
atomic.StoreInt64(&c.sessionID, r.SessionID)
c.setTimeouts(r.TimeOut)
c.passwd = r.Passwd
c.setState(StateHasSession)
return nil
}
func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error {
pingTicker := time.NewTicker(c.pingInterval)
defer pingTicker.Stop()
buf := make([]byte, bufferSize)
for {
select {
case req := <-c.sendChan:
header := &requestHeader{req.xid, req.opcode}
n, err := encodePacket(buf[4:], header)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n2, err := encodePacket(buf[4+n:], req.pkt)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n += n2
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.requestsLock.Lock()
select {
case <-closeChan:
req.recvChan <- response{-1, ErrConnectionClosed}
c.requestsLock.Unlock()
return ErrConnectionClosed
default:
}
c.requests[req.xid] = req
c.requestsLock.Unlock()
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
req.recvChan <- response{-1, err}
conn.Close()
return err
}
case <-pingTicker.C:
n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
if err != nil {
panic("zk: opPing should never fail to serialize")
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
conn.Close()
return err
}
case <-closeChan:
return nil
}
}
}
func (c *Conn) recvLoop(conn net.Conn) error {
buf := make([]byte, bufferSize)
for {
// package length
conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
_, err := io.ReadFull(conn, buf[:4])
if err != nil {
return err
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(conn, buf[:blen])
conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
res := responseHeader{}
_, err = decodePacket(buf[:16], &res)
if err != nil {
return err
}
if res.Xid == -1 {
res := &watcherEvent{}
_, err := decodePacket(buf[16:blen], res)
if err != nil {
return err
}
ev := Event{
Type: res.Type,
State: res.State,
Path: res.Path,
Err: nil,
}
select {
case c.eventChan <- ev:
default:
}
wTypes := make([]watchType, 0, 2)
switch res.Type {
case EventNodeCreated:
wTypes = append(wTypes, watchTypeExist)
case EventNodeDeleted, EventNodeDataChanged:
wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
case EventNodeChildrenChanged:
wTypes = append(wTypes, watchTypeChild)
}
c.watchersLock.Lock()
for _, t := range wTypes {
wpt := watchPathType{res.Path, t}
if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
for _, ch := range watchers {
ch <- ev
close(ch)
}
delete(c.watchers, wpt)
}
}
c.watchersLock.Unlock()
} else if res.Xid == -2 {
// Ping response. Ignore.
} else if res.Xid < 0 {
c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
} else {
if res.Zxid > 0 {
c.lastZxid = res.Zxid
}
c.requestsLock.Lock()
req, ok := c.requests[res.Xid]
if ok {
delete(c.requests, res.Xid)
}
c.requestsLock.Unlock()
if !ok {
c.logger.Printf("Response for unknown request with xid %d", res.Xid)
} else {
if res.Err != 0 {
err = res.Err.toError()
} else {
_, err = decodePacket(buf[16:blen], req.recvStruct)
}
if req.recvFunc != nil {
req.recvFunc(req, &res, err)
}
req.recvChan <- response{res.Zxid, err}
if req.opcode == opClose {
return io.EOF
}
}
}
}
}
func (c *Conn) nextXid() int32 {
return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff)
}
func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
ch := make(chan Event, 1)
wpt := watchPathType{path, watchType}
c.watchers[wpt] = append(c.watchers[wpt], ch)
return ch
}
func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
rq := &request{
xid: c.nextXid(),
opcode: opcode,
pkt: req,
recvStruct: res,
recvChan: make(chan response, 1),
recvFunc: recvFunc,
}
c.sendChan <- rq
return rq.recvChan
}
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
r := <-c.queueRequest(opcode, req, res, recvFunc)
return r.zxid, r.err
}
func (c *Conn) AddAuth(scheme string, auth []byte) error {
_, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
return err
}
func (c *Conn) Children(path string) ([]string, *Stat, error) {
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
return res.Children, &res.Stat, err
}
func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeChild)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Children, &res.Stat, ech, err
}
func (c *Conn) Get(path string) ([]byte, *Stat, error) {
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
return res.Data, &res.Stat, err
}
// GetW returns the contents of a znode and sets a watch
func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Data, &res.Stat, ech, err
}
func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
if path == "" {
return nil, ErrInvalidPath
}
res := &setDataResponse{}
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
res := &createResponse{}
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
return res.Path, err
}
// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
// after it creates the node. On reconnect the session may still be valid so the
// ephemeral node still exists. Therefore, on reconnect we need to check if a node
// with a GUID generated on create exists.
func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
var guid [16]byte
_, err := io.ReadFull(rand.Reader, guid[:16])
if err != nil {
return "", err
}
guidStr := fmt.Sprintf("%x", guid)
parts := strings.Split(path, "/")
parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
rootPath := strings.Join(parts[:len(parts)-1], "/")
protectedPath := strings.Join(parts, "/")
var newPath string
for i := 0; i < 3; i++ {
newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
switch err {
case ErrSessionExpired:
// No need to search for the node since it can't exist. Just try again.
case ErrConnectionClosed:
children, _, err := c.Children(rootPath)
if err != nil {
return "", err
}
for _, p := range children {
parts := strings.Split(p, "/")
if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
return rootPath + "/" + p, nil
}
}
}
case nil:
return newPath, nil
default:
return "", err
}
}
return "", err
}
func (c *Conn) Delete(path string, version int32) error {
_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
return err
}
func (c *Conn) Exists(path string) (bool, *Stat, error) {
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
return exists, &res.Stat, err
}
func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
} else if err == ErrNoNode {
ech = c.addWatcher(path, watchTypeExist)
}
})
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
if err != nil {
return false, nil, nil, err
}
return exists, &res.Stat, ech, err
}
func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
res := &getAclResponse{}
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
return res.Acl, &res.Stat, err
}
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
res := &setAclResponse{}
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Sync(path string) (string, error) {
res := &syncResponse{}
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
return res.Path, err
}
type MultiResponse struct {
Stat *Stat
String string
}
// Multi executes multiple ZooKeeper operations or none of them. The provided
// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or
// *CheckVersionRequest.
func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
req := &multiRequest{
Ops: make([]multiRequestOp, 0, len(ops)),
DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
}
for _, op := range ops {
var opCode int32
switch op.(type) {
case *CreateRequest:
opCode = opCreate
case *SetDataRequest:
opCode = opSetData
case *DeleteRequest:
opCode = opDelete
case *CheckVersionRequest:
opCode = opCheck
default:
return nil, fmt.Errorf("unknown operation type %T", op)
}
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op})
}
res := &multiResponse{}
_, err := c.request(opMulti, req, res, nil)
mr := make([]MultiResponse, len(res.Ops))
for i, op := range res.Ops {
mr[i] = MultiResponse{Stat: op.Stat, String: op.String}
}
return mr, err
}
// Server returns the current or last-connected server name.
func (c *Conn) Server() string {
c.serverMu.Lock()
defer c.serverMu.Unlock()
return c.server
}
|
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"github.com/howeyc/fsnotify"
"github.com/naoina/kocha"
)
const DEFAULT_KOCHA_ENV = "dev"
type runCommand struct {
flag *flag.FlagSet
}
func (c *runCommand) Name() string {
return "run"
}
func (c *runCommand) Alias() string {
return ""
}
func (c *runCommand) Short() string {
return "run the your application"
}
func (c *runCommand) Usage() string {
return fmt.Sprintf("%s [KOCHA_ENV]", c.Name())
}
func (c *runCommand) DefineFlags(fs *flag.FlagSet) {
c.flag = fs
}
func (c *runCommand) Run() {
env := c.flag.Arg(0)
if env == "" {
fmt.Printf("kocha: KOCHA_ENV environment variable isn't set, use \"%v\"\n", DEFAULT_KOCHA_ENV)
env = DEFAULT_KOCHA_ENV
}
os.Setenv("KOCHA_ENV", env)
dir, err := os.Getwd()
if err != nil {
panic(err)
}
execName := filepath.Base(dir)
if runtime.GOOS == "windows" {
execName += ".exe"
}
for {
c.watchApp(dir, execName)
}
}
func (c *runCommand) watchApp(dir, execName string) {
cmd := c.execCmd("go", "build", "-o", execName)
if err := cmd.Wait(); err == nil {
cmd = c.execCmd(filepath.Join(dir, execName))
}
defer cmd.Process.Kill()
watcher, err := fsnotify.NewWatcher()
if err != nil {
panic(err)
}
defer watcher.Close()
if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Name()[0] == '.' {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if err := watcher.Watch(path); err != nil {
return err
}
return nil
}); err != nil {
panic(err)
}
select {
case <-watcher.Event:
case err := <-watcher.Error:
panic(err)
}
fmt.Println("Reloading...\n")
}
func (c *runCommand) execCmd(name string, args ...string) *exec.Cmd {
cmd := exec.Command(name, args...)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
if err := cmd.Start(); err != nil {
kocha.PanicOnError(c, "abort: %v", err)
}
return cmd
}
'kocha run' modifies to watches the files in a specific directory
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"github.com/howeyc/fsnotify"
"github.com/naoina/kocha"
)
const DEFAULT_KOCHA_ENV = "dev"
type runCommand struct {
flag *flag.FlagSet
}
func (c *runCommand) Name() string {
return "run"
}
func (c *runCommand) Alias() string {
return ""
}
func (c *runCommand) Short() string {
return "run the your application"
}
func (c *runCommand) Usage() string {
return fmt.Sprintf("%s [KOCHA_ENV]", c.Name())
}
func (c *runCommand) DefineFlags(fs *flag.FlagSet) {
c.flag = fs
}
func (c *runCommand) Run() {
env := c.flag.Arg(0)
if env == "" {
fmt.Printf("kocha: KOCHA_ENV environment variable isn't set, use \"%v\"\n", DEFAULT_KOCHA_ENV)
env = DEFAULT_KOCHA_ENV
}
os.Setenv("KOCHA_ENV", env)
basedir, err := os.Getwd()
if err != nil {
panic(err)
}
execName := filepath.Base(basedir)
if runtime.GOOS == "windows" {
execName += ".exe"
}
for {
c.watchApp(basedir, execName)
}
}
func (c *runCommand) watchApp(basedir, execName string) {
cmd := c.execCmd("go", "build", "-o", execName)
if err := cmd.Wait(); err == nil {
cmd = c.execCmd(filepath.Join(basedir, execName))
}
defer cmd.Process.Kill()
watcher, err := fsnotify.NewWatcher()
if err != nil {
panic(err)
}
defer watcher.Close()
watchFunc := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Name()[0] == '.' {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if err := watcher.Watch(path); err != nil {
return err
}
return nil
}
for _, path := range []string{
"app", "config", "main.go",
} {
if err := filepath.Walk(filepath.Join(basedir, path), watchFunc); err != nil {
panic(err)
}
}
select {
case <-watcher.Event:
case err := <-watcher.Error:
panic(err)
}
fmt.Println("Reloading...\n")
}
func (c *runCommand) execCmd(name string, args ...string) *exec.Cmd {
cmd := exec.Command(name, args...)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
if err := cmd.Start(); err != nil {
kocha.PanicOnError(c, "abort: %v", err)
}
return cmd
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.