CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package state
import (
"bytes"
"io"
"io/ioutil"
"github.com/juju/errors"
"github.com/juju/testing"
"github.com/juju/juju/resource"
)
type stubRawState struct {
stub *testing.Stub
ReturnPersistence Persistence
ReturnStorage Storage
}
func (s *stubRawState) Persistence() Persistence {
s.stub.AddCall("Persistence")
s.stub.NextErr()
return s.ReturnPersistence
}
func (s *stubRawState) Storage() Storage {
s.stub.AddCall("Storage")
s.stub.NextErr()
return s.ReturnStorage
}
type stubPersistence struct {
stub *testing.Stub
ReturnListResources []resource.Resource
}
func (s *stubPersistence) ListResources(serviceID string) ([]resource.Resource, error) {
s.stub.AddCall("ListResources", serviceID)
if err := s.stub.NextErr(); err != nil {
return nil, errors.Trace(err)
}
return s.ReturnListResources, nil
}
func (s *stubPersistence) StageResource(id, serviceID string, res resource.Resource) error {
s.stub.AddCall("StageResource", id, serviceID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) UnstageResource(id, serviceID string) error {
s.stub.AddCall("UnstageResource", id, serviceID)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) SetResource(id, serviceID string, res resource.Resource) error {
s.stub.AddCall("SetResource", id, serviceID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) SetUnitResource(serviceID, unitID string, res resource.Resource) error {
s.stub.AddCall("SetUnitResource", serviceID, unitID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
type stubStorage struct {
stub *testing.Stub
ReturnGet resource.Content
storageReturns []*bytes.Buffer
}
func (s *stubStorage) PutAndCheckHash(path string, r io.Reader, length int64, hash string) error {
s.stub.AddCall("PutAndCheckHash", path, r, length, hash)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubStorage) Get(path string) (io.ReadCloser, int64, error) {
s.stub.AddCall("Get", path)
if err := s.stub.NextErr(); err != nil {
return nil, 0, errors.Trace(err)
}
if readCloser, ok := s.ReturnGet.Data.(io.ReadCloser); ok {
return readCloser, s.ReturnGet.Size, nil
}
return ioutil.NopCloser(s.ReturnGet.Data), s.ReturnGet.Size, nil
}
func (s *stubStorage) Remove(path string) error {
s.stub.AddCall("Remove", path)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubStorage) Get(path string) (_ io.ReadCloser, resSize int64, _ error) {
s.stub.AddCall("Get", path)
if err := s.stub.NextErr(); err != nil {
return nil, 0, errors.Trace(err)
}
if len(s.storageReturns) == 0 {
return nil, 0, nil
}
buf := s.storageReturns[0]
s.storageReturns = s.storageReturns[1:]
return ioutil.NopCloser(buf), int64(buf.Len()), nil
}
type stubReader struct {
stub *testing.Stub
ReturnRead int
}
func (s *stubReader) Read(buf []byte) (int, error) {
s.stub.AddCall("Read", buf)
if err := s.stub.NextErr(); err != nil {
return 0, errors.Trace(err)
}
return s.ReturnRead, nil
}
type noWrapStubReader struct {
stub *testing.Stub
ReturnRead int
}
func (s *noWrapStubReader) Read(buf []byte) (int, error) {
s.stub.AddCall("Read", buf)
if err := s.stub.NextErr(); err != nil {
return 0, err
}
return s.ReturnRead, nil
}
fix after rebase
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package state
import (
"bytes"
"io"
"io/ioutil"
"github.com/juju/errors"
"github.com/juju/testing"
"github.com/juju/juju/resource"
)
type stubRawState struct {
stub *testing.Stub
ReturnPersistence Persistence
ReturnStorage Storage
}
func (s *stubRawState) Persistence() Persistence {
s.stub.AddCall("Persistence")
s.stub.NextErr()
return s.ReturnPersistence
}
func (s *stubRawState) Storage() Storage {
s.stub.AddCall("Storage")
s.stub.NextErr()
return s.ReturnStorage
}
type stubPersistence struct {
stub *testing.Stub
ReturnListResources []resource.Resource
}
func (s *stubPersistence) ListResources(serviceID string) ([]resource.Resource, error) {
s.stub.AddCall("ListResources", serviceID)
if err := s.stub.NextErr(); err != nil {
return nil, errors.Trace(err)
}
return s.ReturnListResources, nil
}
func (s *stubPersistence) StageResource(id, serviceID string, res resource.Resource) error {
s.stub.AddCall("StageResource", id, serviceID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) UnstageResource(id, serviceID string) error {
s.stub.AddCall("UnstageResource", id, serviceID)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) SetResource(id, serviceID string, res resource.Resource) error {
s.stub.AddCall("SetResource", id, serviceID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubPersistence) SetUnitResource(serviceID, unitID string, res resource.Resource) error {
s.stub.AddCall("SetUnitResource", serviceID, unitID, res)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
type stubStorage struct {
stub *testing.Stub
storageReturns []*bytes.Buffer
}
func (s *stubStorage) PutAndCheckHash(path string, r io.Reader, length int64, hash string) error {
s.stub.AddCall("PutAndCheckHash", path, r, length, hash)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubStorage) Get(path string) (io.ReadCloser, int64, error) {
s.stub.AddCall("Get", path)
if err := s.stub.NextErr(); err != nil {
return nil, 0, errors.Trace(err)
}
if readCloser, ok := s.ReturnGet.Data.(io.ReadCloser); ok {
return readCloser, s.ReturnGet.Size, nil
}
return ioutil.NopCloser(s.ReturnGet.Data), s.ReturnGet.Size, nil
}
func (s *stubStorage) Remove(path string) error {
s.stub.AddCall("Remove", path)
if err := s.stub.NextErr(); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *stubStorage) Get(path string) (_ io.ReadCloser, resSize int64, _ error) {
s.stub.AddCall("Get", path)
if err := s.stub.NextErr(); err != nil {
return nil, 0, errors.Trace(err)
}
if len(s.storageReturns) == 0 {
return nil, 0, nil
}
buf := s.storageReturns[0]
s.storageReturns = s.storageReturns[1:]
return ioutil.NopCloser(buf), int64(buf.Len()), nil
}
type stubReader struct {
stub *testing.Stub
ReturnRead int
}
func (s *stubReader) Read(buf []byte) (int, error) {
s.stub.AddCall("Read", buf)
if err := s.stub.NextErr(); err != nil {
return 0, errors.Trace(err)
}
return s.ReturnRead, nil
}
type noWrapStubReader struct {
stub *testing.Stub
ReturnRead int
}
func (s *noWrapStubReader) Read(buf []byte) (int, error) {
s.stub.AddCall("Read", buf)
if err := s.stub.NextErr(); err != nil {
return 0, err
}
return s.ReturnRead, nil
}
|
package main
import (
"encoding/json"
"flag"
"io/ioutil"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
)
const (
namespace = "resourcemanager"
)
var (
listenAddress = flag.String("web.listen-address", ":9088", "Address on which to expose metrics and web interface.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
resourceManagerUrl = flag.String("resourcemanager.url", "http://localhost:8088", "Hadoop ResourceManager URL.")
)
type Exporter struct {
url string
activeNodes prometheus.Gauge
rebootedNodes prometheus.Gauge
decommissionedNodes prometheus.Gauge
unhealthyNodes prometheus.Gauge
lostNodes prometheus.Gauge
totalNodes prometheus.Gauge
totalVirtualCores prometheus.Gauge
availableMB prometheus.Gauge
reservedMB prometheus.Gauge
appsKilled prometheus.Gauge
appsFailed prometheus.Gauge
appsRunning prometheus.Gauge
appsPending prometheus.Gauge
appsCompleted prometheus.Counter
appsSubmitted prometheus.Counter
allocatedMB prometheus.Gauge
reservedVirtualCores prometheus.Gauge
availableVirtualCores prometheus.Gauge
allocatedVirtualCores prometheus.Gauge
containersAllocated prometheus.Gauge
containersReserved prometheus.Gauge
containersPending prometheus.Gauge
totalMB prometheus.Gauge
}
func NewExporter(url string) *Exporter {
return &Exporter{
url: url,
activeNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "activeNodes",
Help: "activeNodes",
}),
rebootedNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "rebootedNodes",
Help: "rebootedNodes",
}),
decommissionedNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "decommissionedNodes",
Help: "decommissionedNodes",
}),
unhealthyNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "unhealthyNodes",
Help: "unhealthyNodes",
}),
lostNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "lostNodes",
Help: "lostNodes",
}),
totalNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalNodes",
Help: "totalNodes",
}),
totalVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalVirtualCores",
Help: "totalVirtualCores",
}),
availableMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "availableMB",
Help: "availableMB",
}),
reservedMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "reservedMB",
Help: "reservedMB",
}),
appsKilled: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsKilled",
Help: "appsKilled",
}),
appsFailed: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsFailed",
Help: "appsFailed",
}),
appsRunning: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsRunning",
Help: "appsRunning",
}),
appsPending: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsPending",
Help: "appsPending",
}),
appsCompleted: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "appsCompleted",
Help: "appsCompleted",
}),
appsSubmitted: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "appsSubmitted",
Help: "appsSubmitted",
}),
allocatedMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "allocatedMB",
Help: "allocatedMB",
}),
reservedVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "reservedVirtualCores",
Help: "reservedVirtualCores",
}),
availableVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "availableVirtualCores",
Help: "availableVirtualCores",
}),
allocatedVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "allocatedVirtualCores",
Help: "allocatedVirtualCores",
}),
containersAllocated: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersAllocated",
Help: "containersAllocated",
}),
containersReserved: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersReserved",
Help: "containersReserved",
}),
containersPending: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersPending",
Help: "containersPending",
}),
totalMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalMB",
Help: "totalMB",
}),
}
}
// Describe implements the prometheus.Collector interface.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
e.activeNodes.Describe(ch)
e.rebootedNodes.Describe(ch)
e.decommissionedNodes.Describe(ch)
e.unhealthyNodes.Describe(ch)
e.lostNodes.Describe(ch)
e.totalNodes.Describe(ch)
e.totalVirtualCores.Describe(ch)
e.availableMB.Describe(ch)
e.reservedMB.Describe(ch)
e.appsKilled.Describe(ch)
e.appsFailed.Describe(ch)
e.appsRunning.Describe(ch)
e.appsPending.Describe(ch)
e.appsCompleted.Describe(ch)
e.appsSubmitted.Describe(ch)
e.allocatedMB.Describe(ch)
e.reservedVirtualCores.Describe(ch)
e.availableVirtualCores.Describe(ch)
e.allocatedVirtualCores.Describe(ch)
e.containersAllocated.Describe(ch)
e.containersReserved.Describe(ch)
e.containersPending.Describe(ch)
e.totalMB.Describe(ch)
}
// Collect implements the prometheus.Collector interface.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
resp, err := http.Get(e.url + "/ws/v1/cluster/metrics")
if err != nil {
log.Error(err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err)
}
/*
"clusterMetrics": {
"activeNodes": 3,
"rebootedNodes": 0,
"decommissionedNodes": 0,
"unhealthyNodes": 0,
"lostNodes": 0,
"totalNodes": 3,
"totalVirtualCores": 9,
"availableMB": 6144,
"reservedMB": 0,
"appsKilled": 0,
"appsFailed": 1,
"appsRunning": 0,
"appsPending": 0,
"appsCompleted": 9,
"appsSubmitted": 10,
"allocatedMB": 0,
"reservedVirtualCores": 0,
"availableVirtualCores": 9,
"allocatedVirtualCores": 0,
"containersAllocated": 0,
"containersReserved": 0,
"containersPending": 0,
"totalMB": 6144
}
*/
var f interface{}
err = json.Unmarshal(data, &f)
if err != nil {
log.Error(err)
}
m := f.(map[string]interface{})
cm := m["clusterMetrics"].(map[string]interface{})
e.activeNodes.Set(cm["activeNodes"].(float64))
e.rebootedNodes.Set(cm["rebootedNodes"].(float64))
e.decommissionedNodes.Set(cm["decommissionedNodes"].(float64))
e.unhealthyNodes.Set(cm["unhealthyNodes"].(float64))
e.lostNodes.Set(cm["lostNodes"].(float64))
e.totalNodes.Set(cm["totalNodes"].(float64))
e.totalVirtualCores.Set(cm["totalVirtualCores"].(float64))
e.availableMB.Set(cm["availableMB"].(float64))
e.reservedMB.Set(cm["reservedMB"].(float64))
e.appsKilled.Set(cm["appsKilled"].(float64))
e.appsFailed.Set(cm["appsFailed"].(float64))
e.appsRunning.Set(cm["appsRunning"].(float64))
e.appsPending.Set(cm["appsPending"].(float64))
e.appsCompleted.Set(cm["appsCompleted"].(float64))
e.appsSubmitted.Set(cm["appsSubmitted"].(float64))
e.allocatedMB.Set(cm["allocatedMB"].(float64))
e.reservedVirtualCores.Set(cm["reservedVirtualCores"].(float64))
e.availableVirtualCores.Set(cm["availableVirtualCores"].(float64))
e.allocatedVirtualCores.Set(cm["allocatedVirtualCores"].(float64))
e.containersAllocated.Set(cm["containersAllocated"].(float64))
e.containersReserved.Set(cm["containersReserved"].(float64))
e.containersPending.Set(cm["containersPending"].(float64))
e.totalMB.Set(cm["totalMB"].(float64))
e.activeNodes.Collect(ch)
e.rebootedNodes.Collect(ch)
e.decommissionedNodes.Collect(ch)
e.unhealthyNodes.Collect(ch)
e.lostNodes.Collect(ch)
e.totalNodes.Collect(ch)
e.totalVirtualCores.Collect(ch)
e.availableMB.Collect(ch)
e.reservedMB.Collect(ch)
e.appsKilled.Collect(ch)
e.activeNodes.Collect(ch)
e.appsRunning.Collect(ch)
e.appsPending.Collect(ch)
e.appsCompleted.Collect(ch)
e.appsSubmitted.Collect(ch)
e.appsFailed.Collect(ch)
e.reservedVirtualCores.Collect(ch)
e.availableVirtualCores.Collect(ch)
e.allocatedVirtualCores.Collect(ch)
e.containersAllocated.Collect(ch)
e.containersReserved.Collect(ch)
e.containersPending.Collect(ch)
e.totalMB.Collect(ch)
}
func main() {
flag.Parse()
exporter := NewExporter(*resourceManagerUrl)
prometheus.MustRegister(exporter)
log.Printf("Starting Server: %s", *listenAddress)
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>ResourceManager Exporter</title></head>
<body>
<h1>ResourceManager Exporter</h1>
<p><a href="` + *metricsPath + `">Metrics</a></p>
</body>
</html>`))
})
err := http.ListenAndServe(*listenAddress, nil)
if err != nil {
log.Fatal(err)
}
}
resourcemanager_activeNodes duplicate fix
package main
import (
"encoding/json"
"flag"
"io/ioutil"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
)
const (
namespace = "resourcemanager"
)
var (
listenAddress = flag.String("web.listen-address", ":9088", "Address on which to expose metrics and web interface.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
resourceManagerUrl = flag.String("resourcemanager.url", "http://localhost:8088", "Hadoop ResourceManager URL.")
)
type Exporter struct {
url string
activeNodes prometheus.Gauge
rebootedNodes prometheus.Gauge
decommissionedNodes prometheus.Gauge
unhealthyNodes prometheus.Gauge
lostNodes prometheus.Gauge
totalNodes prometheus.Gauge
totalVirtualCores prometheus.Gauge
availableMB prometheus.Gauge
reservedMB prometheus.Gauge
appsKilled prometheus.Gauge
appsFailed prometheus.Gauge
appsRunning prometheus.Gauge
appsPending prometheus.Gauge
appsCompleted prometheus.Counter
appsSubmitted prometheus.Counter
allocatedMB prometheus.Gauge
reservedVirtualCores prometheus.Gauge
availableVirtualCores prometheus.Gauge
allocatedVirtualCores prometheus.Gauge
containersAllocated prometheus.Gauge
containersReserved prometheus.Gauge
containersPending prometheus.Gauge
totalMB prometheus.Gauge
}
func NewExporter(url string) *Exporter {
return &Exporter{
url: url,
activeNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "activeNodes",
Help: "activeNodes",
}),
rebootedNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "rebootedNodes",
Help: "rebootedNodes",
}),
decommissionedNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "decommissionedNodes",
Help: "decommissionedNodes",
}),
unhealthyNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "unhealthyNodes",
Help: "unhealthyNodes",
}),
lostNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "lostNodes",
Help: "lostNodes",
}),
totalNodes: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalNodes",
Help: "totalNodes",
}),
totalVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalVirtualCores",
Help: "totalVirtualCores",
}),
availableMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "availableMB",
Help: "availableMB",
}),
reservedMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "reservedMB",
Help: "reservedMB",
}),
appsKilled: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsKilled",
Help: "appsKilled",
}),
appsFailed: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsFailed",
Help: "appsFailed",
}),
appsRunning: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsRunning",
Help: "appsRunning",
}),
appsPending: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appsPending",
Help: "appsPending",
}),
appsCompleted: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "appsCompleted",
Help: "appsCompleted",
}),
appsSubmitted: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "appsSubmitted",
Help: "appsSubmitted",
}),
allocatedMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "allocatedMB",
Help: "allocatedMB",
}),
reservedVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "reservedVirtualCores",
Help: "reservedVirtualCores",
}),
availableVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "availableVirtualCores",
Help: "availableVirtualCores",
}),
allocatedVirtualCores: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "allocatedVirtualCores",
Help: "allocatedVirtualCores",
}),
containersAllocated: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersAllocated",
Help: "containersAllocated",
}),
containersReserved: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersReserved",
Help: "containersReserved",
}),
containersPending: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "containersPending",
Help: "containersPending",
}),
totalMB: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "totalMB",
Help: "totalMB",
}),
}
}
// Describe implements the prometheus.Collector interface.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
e.activeNodes.Describe(ch)
e.rebootedNodes.Describe(ch)
e.decommissionedNodes.Describe(ch)
e.unhealthyNodes.Describe(ch)
e.lostNodes.Describe(ch)
e.totalNodes.Describe(ch)
e.totalVirtualCores.Describe(ch)
e.availableMB.Describe(ch)
e.reservedMB.Describe(ch)
e.appsKilled.Describe(ch)
e.appsFailed.Describe(ch)
e.appsRunning.Describe(ch)
e.appsPending.Describe(ch)
e.appsCompleted.Describe(ch)
e.appsSubmitted.Describe(ch)
e.allocatedMB.Describe(ch)
e.reservedVirtualCores.Describe(ch)
e.availableVirtualCores.Describe(ch)
e.allocatedVirtualCores.Describe(ch)
e.containersAllocated.Describe(ch)
e.containersReserved.Describe(ch)
e.containersPending.Describe(ch)
e.totalMB.Describe(ch)
}
// Collect implements the prometheus.Collector interface.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
resp, err := http.Get(e.url + "/ws/v1/cluster/metrics")
if err != nil {
log.Error(err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err)
}
/*
"clusterMetrics": {
"activeNodes": 3,
"rebootedNodes": 0,
"decommissionedNodes": 0,
"unhealthyNodes": 0,
"lostNodes": 0,
"totalNodes": 3,
"totalVirtualCores": 9,
"availableMB": 6144,
"reservedMB": 0,
"appsKilled": 0,
"appsFailed": 1,
"appsRunning": 0,
"appsPending": 0,
"appsCompleted": 9,
"appsSubmitted": 10,
"allocatedMB": 0,
"reservedVirtualCores": 0,
"availableVirtualCores": 9,
"allocatedVirtualCores": 0,
"containersAllocated": 0,
"containersReserved": 0,
"containersPending": 0,
"totalMB": 6144
}
*/
var f interface{}
err = json.Unmarshal(data, &f)
if err != nil {
log.Error(err)
}
m := f.(map[string]interface{})
cm := m["clusterMetrics"].(map[string]interface{})
e.activeNodes.Set(cm["activeNodes"].(float64))
e.rebootedNodes.Set(cm["rebootedNodes"].(float64))
e.decommissionedNodes.Set(cm["decommissionedNodes"].(float64))
e.unhealthyNodes.Set(cm["unhealthyNodes"].(float64))
e.lostNodes.Set(cm["lostNodes"].(float64))
e.totalNodes.Set(cm["totalNodes"].(float64))
e.totalVirtualCores.Set(cm["totalVirtualCores"].(float64))
e.availableMB.Set(cm["availableMB"].(float64))
e.reservedMB.Set(cm["reservedMB"].(float64))
e.appsKilled.Set(cm["appsKilled"].(float64))
e.appsFailed.Set(cm["appsFailed"].(float64))
e.appsRunning.Set(cm["appsRunning"].(float64))
e.appsPending.Set(cm["appsPending"].(float64))
e.appsCompleted.Set(cm["appsCompleted"].(float64))
e.appsSubmitted.Set(cm["appsSubmitted"].(float64))
e.allocatedMB.Set(cm["allocatedMB"].(float64))
e.reservedVirtualCores.Set(cm["reservedVirtualCores"].(float64))
e.availableVirtualCores.Set(cm["availableVirtualCores"].(float64))
e.allocatedVirtualCores.Set(cm["allocatedVirtualCores"].(float64))
e.containersAllocated.Set(cm["containersAllocated"].(float64))
e.containersReserved.Set(cm["containersReserved"].(float64))
e.containersPending.Set(cm["containersPending"].(float64))
e.totalMB.Set(cm["totalMB"].(float64))
e.activeNodes.Collect(ch)
e.rebootedNodes.Collect(ch)
e.decommissionedNodes.Collect(ch)
e.unhealthyNodes.Collect(ch)
e.lostNodes.Collect(ch)
e.totalNodes.Collect(ch)
e.totalVirtualCores.Collect(ch)
e.availableMB.Collect(ch)
e.reservedMB.Collect(ch)
e.appsKilled.Collect(ch)
e.appsFailed.Collect(ch)
e.appsRunning.Collect(ch)
e.appsPending.Collect(ch)
e.appsCompleted.Collect(ch)
e.appsSubmitted.Collect(ch)
e.allocatedMB.Collect(ch)
e.reservedVirtualCores.Collect(ch)
e.availableVirtualCores.Collect(ch)
e.allocatedVirtualCores.Collect(ch)
e.containersAllocated.Collect(ch)
e.containersReserved.Collect(ch)
e.containersPending.Collect(ch)
e.totalMB.Collect(ch)
}
func main() {
flag.Parse()
exporter := NewExporter(*resourceManagerUrl)
prometheus.MustRegister(exporter)
log.Printf("Starting Server: %s", *listenAddress)
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>ResourceManager Exporter</title></head>
<body>
<h1>ResourceManager Exporter</h1>
<p><a href="` + *metricsPath + `">Metrics</a></p>
</body>
</html>`))
})
err := http.ListenAndServe(*listenAddress, nil)
if err != nil {
log.Fatal(err)
}
}
|
package config
import (
"errors"
"fmt"
"io/ioutil"
"github.com/BurntSushi/toml"
)
const (
DefaultPort = "29300"
DefaultLogLevel = "error"
DefaultTimeout = 5
DefaultMaxIdleConnsPerHost = 100
)
type Config struct {
Port string
LogLevel string
Timeout int
MaxIdleConnsPerHost int
DisableCompression bool
Endpoints []EndPoint
}
type EndPoint struct {
Name string
Ep string
ProxySetHeaders [][]string
}
func Load(confPath string) (Config, error) {
bytes, err := ioutil.ReadFile(confPath)
if err != nil {
return Config{}, err
}
var config Config
if err := toml.Unmarshal(bytes, &config); err != nil {
return config, err
}
if config.Port == "" {
config.Port = DefaultPort
}
if config.LogLevel == "" {
config.LogLevel = DefaultLogLevel
}
if config.Timeout <= 0 {
config.Timeout = DefaultTimeout
}
if config.MaxIdleConnsPerHost <= 0 {
config.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost
}
if len(config.Endpoints) == 0 {
return config, errors.New("empty Endpoints")
}
for _, ep := range config.Endpoints {
if ep.Name == "" {
return config, errors.New("empty Endpoint name")
}
if ep.Ep == "" {
return config, errors.New("empty Endpoint URL")
}
}
return config, nil
}
func FindEp(conf Config, name string) (EndPoint, error) {
for _, ep := range conf.Endpoints {
if ep.Name == name {
return ep, nil
}
}
return EndPoint{}, fmt.Errorf("ep:%s is not found", name)
}
config: introduced LoadBytes() for testable.
package config
import (
"errors"
"fmt"
"io/ioutil"
"github.com/BurntSushi/toml"
)
const (
DefaultPort = "29300"
DefaultLogLevel = "error"
DefaultTimeout = 5
DefaultMaxIdleConnsPerHost = 100
)
type Config struct {
Port string
LogLevel string
Timeout int
MaxIdleConnsPerHost int
DisableCompression bool
Endpoints []EndPoint
}
type EndPoint struct {
Name string
Ep string
ProxySetHeaders [][]string
}
func LoadBytes(bytes []byte) (Config, error) {
var config Config
if err := toml.Unmarshal(bytes, &config); err != nil {
return config, err
}
return config, nil
}
func Load(confPath string) (Config, error) {
bytes, err := ioutil.ReadFile(confPath)
if err != nil {
return Config{}, err
}
config, err := LoadBytes(bytes)
if err != nil {
return Config{}, err
}
if config.Port == "" {
config.Port = DefaultPort
}
if config.LogLevel == "" {
config.LogLevel = DefaultLogLevel
}
if config.Timeout <= 0 {
config.Timeout = DefaultTimeout
}
if config.MaxIdleConnsPerHost <= 0 {
config.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost
}
if len(config.Endpoints) == 0 {
return config, errors.New("empty Endpoints")
}
for _, ep := range config.Endpoints {
if ep.Name == "" {
return config, errors.New("empty Endpoint name")
}
if ep.Ep == "" {
return config, errors.New("empty Endpoint URL")
}
}
return config, nil
}
func FindEp(conf Config, name string) (EndPoint, error) {
for _, ep := range conf.Endpoints {
if ep.Name == name {
return ep, nil
}
}
return EndPoint{}, fmt.Errorf("ep:%s is not found", name)
}
|
/* Goiardi configuration. */
/*
* Copyright (c) 2013-2014, Jeremy Bingham (<jbingham@gmail.com>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package config parses command line flags and config files, and defines
// options used elsewhere in goiardi.
package config
import (
"github.com/jessevdk/go-flags"
"github.com/BurntSushi/toml"
"os"
"log"
"fmt"
"time"
"path"
)
/* Master struct for configuration. */
type Conf struct {
Ipaddress string
Port int
Hostname string
ConfFile string `toml:"conf-file"`
IndexFile string `toml:"index-file"`
DataStoreFile string `toml:"data-file"`
DebugLevel int `toml:"debug-level"`
FreezeInterval int `toml:"freeze-interval"`
FreezeData bool `toml:"freeze-data"`
LogFile string `toml:"log-file"`
UseAuth bool `toml:"use-auth"`
TimeSlew string `toml:"time-slew"`
TimeSlewDur time.Duration
ConfRoot string `toml:"conf-root"`
UseSSL bool `toml:"use-ssl"`
SslCert string `toml:"ssl-cert"`
SslKey string `toml:"ssl-key"`
HttpsUrls bool `toml:"https-urls"`
DisableWebUI bool `toml:"disable-webui"`
UseMySQL bool `toml:"use-mysql"`
MySQL MySQLdb `toml:"mysql"`
LocalFstoreDir string `toml:"local-filestore-dir"`
}
// MySQL connection options
type MySQLdb struct {
Username string
Password string
Protocol string
Address string
Port string
Dbname string
ExtraParams map[string]string `toml:"extra_params"`
}
/* Struct for command line options. */
type Options struct {
Version bool `short:"v" long:"version" description:"Print version info."`
Verbose []bool `short:"V" long:"verbose" description:"Show verbose debug information. (not implemented)"`
ConfFile string `short:"c" long:"config" description:"Specify a config file to use."`
Ipaddress string `short:"I" long:"ipaddress" description:"Listen on a specific IP address."`
Hostname string `short:"H" long:"hostname" description:"Hostname to use for this server. Defaults to hostname reported by the kernel."`
Port int `short:"P" long:"port" description:"Port to listen on. If port is set to 443, SSL will be activated. (default: 4545)"`
IndexFile string `short:"i" long:"index-file" description:"File to save search index data to."`
DataStoreFile string `short:"D" long:"data-file" description:"File to save data store data to."`
FreezeInterval int `short:"F" long:"freeze-interval" description:"Interval in seconds to freeze in-memory data structures to disk (requires -i/--index-file and -D/--data-file options to be set). (Default 300 seconds/5 minutes.)"`
LogFile string `short:"L" long:"log-file" description:"Log to file X"`
TimeSlew string `long:"time-slew" description:"Time difference allowed between the server's clock at the time in the X-OPS-TIMESTAMP header. Formatted like 5m, 150s, etc. Defaults to 15m."`
ConfRoot string `long:"conf-root" description:"Root directory for configs and certificates. Default: the directory the config file is in, or the current directory if no config file is set."`
UseAuth bool `short:"A" long:"use-auth" description:"Use authentication. Default: false."`
UseSSL bool `long:"use-ssl" description:"Use SSL for connections. If --port is set to 433, this will automatically be turned on. If it is set to 80, it will automatically be turned off. Default: off. Requires --ssl-cert and --ssl-key."`
SslCert string `long:"ssl-cert" description:"SSL certificate file. If a relative path, will be set relative to --conf-root."`
SslKey string `long:"ssl-key" description:"SSL key file. If a relative path, will be set relative to --conf-root."`
HttpsUrls bool `long:"https-urls" description:"Use 'https://' in URLs to server resources if goiardi is not using SSL for its connections. Useful when goiardi is sitting behind a reverse proxy that uses SSL, but is communicating with the proxy over HTTP."`
DisableWebUI bool `long:"disable-webui" description:"If enabled, disables connections and logins to goiardi over the webui interface."`
UseMySQL bool `long:"use-mysql" description:"Use a MySQL database for data storage. Configure database options in the config file."`
LocalFstoreDir string `long:"local-filestore-dir" description:"Directory to save uploaded files in. Optional when running in in-memory mode, *mandatory* for SQL mode."`
}
// The goiardi version.
const Version = "0.4.9999"
// The chef version we're at least aiming for, even if it's not complete yet.
const ChefVersion = "11.0.8"
/* The general plan is to read the command-line options, then parse the config
* file, fill in the config struct with those values, then apply the
* command-line options to the config struct. We read the cli options first so
* we know to look for a different config file if needed, but otherwise the
* command line options override what's in the config file. */
func InitConfig() *Conf { return &Conf{ } }
// Conf struct with the options specified on the command line or in the config
// file.
var Config = InitConfig()
// Read and apply arguments from the command line.
func ParseConfigOptions() error {
var opts = &Options{ }
_, err := flags.Parse(opts)
if err != nil {
if err.(*flags.Error).Type == flags.ErrHelp {
os.Exit(0)
} else {
log.Println(err)
os.Exit(1)
}
}
if opts.Version {
fmt.Printf("goiardi version %s (aiming for compatibility with Chef Server version %s).\n", Version, ChefVersion)
os.Exit(0)
}
/* Load the config file. Command-line options have precedence over
* config file options. */
if opts.ConfFile != "" {
if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil {
panic(err)
os.Exit(1)
}
Config.ConfFile = opts.ConfFile
Config.FreezeData = false
}
if opts.Hostname != "" {
Config.Hostname = opts.Hostname
} else {
if Config.Hostname == "" {
Config.Hostname, err = os.Hostname()
if err != nil {
log.Println(err)
Config.Hostname = "localhost"
}
}
}
if opts.DataStoreFile != "" {
Config.DataStoreFile = opts.DataStoreFile
}
if opts.IndexFile != "" {
Config.IndexFile = opts.IndexFile
}
// Use MySQL?
if opts.UseMySQL {
Config.UseMySQL = opts.UseMySQL
}
if Config.DataStoreFile != "" && Config.UseMySQL {
err := fmt.Errorf("The MySQL and data store options may not be specified together.")
log.Println(err)
os.Exit(1)
}
if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || Config.UseMySQL) && Config.IndexFile != "")) {
err := fmt.Errorf("-i and -D must either both be specified, or not specified.")
panic(err)
os.Exit(1)
}
if Config.UseMySQL && Config.IndexFile == "" {
err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL backend.")
log.Println(err)
os.Exit(1)
}
if Config.IndexFile != "" && (Config.DataStoreFile != "" || Config.UseMySQL) {
Config.FreezeData = true
}
if opts.LogFile != "" {
Config.LogFile = opts.LogFile
}
if Config.LogFile != "" {
lfp, lerr := os.Create(Config.LogFile)
if lerr != nil {
log.Println(err)
os.Exit(1)
}
log.SetOutput(lfp)
}
/* Database options */
// Don't bother setting a default mysql port if mysql isn't used
if Config.UseMySQL {
if Config.MySQL.Port == "" {
Config.MySQL.Port = "3306"
}
}
if opts.LocalFstoreDir != "" {
Config.LocalFstoreDir = opts.LocalFstoreDir
}
if Config.LocalFstoreDir == "" && Config.UseMySQL {
err := fmt.Errorf("local-filestore-dir must be set when running goiardi in SQL mode")
log.Println(err)
os.Exit(1)
}
if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) {
log.Printf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.")
}
if opts.FreezeInterval != 0 {
Config.FreezeInterval = opts.FreezeInterval
}
if Config.FreezeInterval == 0 {
Config.FreezeInterval = 300
}
/* Root directory for certs and the like */
if opts.ConfRoot != "" {
Config.ConfRoot = opts.ConfRoot
}
if Config.ConfRoot == "" {
if Config.ConfFile != "" {
Config.ConfRoot = path.Dir(Config.ConfFile)
} else {
Config.ConfRoot = "."
}
}
Config.Ipaddress = opts.Ipaddress
if opts.Port != 0 {
Config.Port = opts.Port
}
if Config.Port == 0 {
Config.Port = 4545
}
if opts.UseSSL {
Config.UseSSL = opts.UseSSL
}
if opts.SslCert != "" {
Config.SslCert = opts.SslCert
}
if opts.SslKey != "" {
Config.SslKey = opts.SslKey
}
if opts.HttpsUrls {
Config.HttpsUrls = opts.HttpsUrls
}
// SSL setup
if Config.Port == 80 {
Config.UseSSL = false
} else if Config.Port == 443 {
Config.UseSSL = true
}
if Config.UseSSL {
if Config.SslCert == "" || Config.SslKey == "" {
log.Println("SSL mode requires specifying both a certificate and a key file.")
os.Exit(1)
}
/* If the SSL cert and key are not absolute files, join them
* with the conf root */
if !path.IsAbs(Config.SslCert) {
Config.SslCert = path.Join(Config.ConfRoot, Config.SslCert)
}
if !path.IsAbs(Config.SslKey) {
Config.SslKey = path.Join(Config.ConfRoot, Config.SslKey)
}
}
Config.DebugLevel = len(opts.Verbose)
if opts.TimeSlew != "" {
Config.TimeSlew = opts.TimeSlew
}
if Config.TimeSlew != "" {
d, derr := time.ParseDuration(Config.TimeSlew)
if derr != nil {
log.Println("Error parsing time-slew:", derr)
os.Exit(1)
}
Config.TimeSlewDur = d
} else {
Config.TimeSlewDur, _ = time.ParseDuration("15m")
}
if opts.UseAuth {
Config.UseAuth = opts.UseAuth
}
if opts.DisableWebUI {
Config.DisableWebUI = opts.DisableWebUI
}
return nil
}
// The address and port goiardi is configured to listen on.
func ListenAddr() string {
listen_addr := fmt.Sprintf("%s:%d", Config.Ipaddress, Config.Port)
return listen_addr
}
// The hostname and port goiardi is configured to use.
func ServerHostname() string {
var portStr string
if !(Config.Port == 80 || Config.Port == 443) {
portStr = fmt.Sprintf(":%d", Config.Port)
}
hostname := fmt.Sprintf("%s%s", Config.Hostname, portStr)
return hostname
}
// The base URL
func ServerBaseURL() string {
var urlScheme string
if Config.UseSSL || Config.HttpsUrls {
urlScheme = "https"
} else {
urlScheme = "http"
}
url := fmt.Sprintf("%s://%s", urlScheme, ServerHostname())
return url
}
Bump version number for release
/* Goiardi configuration. */
/*
* Copyright (c) 2013-2014, Jeremy Bingham (<jbingham@gmail.com>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package config parses command line flags and config files, and defines
// options used elsewhere in goiardi.
package config
import (
"github.com/jessevdk/go-flags"
"github.com/BurntSushi/toml"
"os"
"log"
"fmt"
"time"
"path"
)
/* Master struct for configuration. */
type Conf struct {
Ipaddress string
Port int
Hostname string
ConfFile string `toml:"conf-file"`
IndexFile string `toml:"index-file"`
DataStoreFile string `toml:"data-file"`
DebugLevel int `toml:"debug-level"`
FreezeInterval int `toml:"freeze-interval"`
FreezeData bool `toml:"freeze-data"`
LogFile string `toml:"log-file"`
UseAuth bool `toml:"use-auth"`
TimeSlew string `toml:"time-slew"`
TimeSlewDur time.Duration
ConfRoot string `toml:"conf-root"`
UseSSL bool `toml:"use-ssl"`
SslCert string `toml:"ssl-cert"`
SslKey string `toml:"ssl-key"`
HttpsUrls bool `toml:"https-urls"`
DisableWebUI bool `toml:"disable-webui"`
UseMySQL bool `toml:"use-mysql"`
MySQL MySQLdb `toml:"mysql"`
LocalFstoreDir string `toml:"local-filestore-dir"`
}
// MySQL connection options
type MySQLdb struct {
Username string
Password string
Protocol string
Address string
Port string
Dbname string
ExtraParams map[string]string `toml:"extra_params"`
}
/* Struct for command line options. */
type Options struct {
Version bool `short:"v" long:"version" description:"Print version info."`
Verbose []bool `short:"V" long:"verbose" description:"Show verbose debug information. (not implemented)"`
ConfFile string `short:"c" long:"config" description:"Specify a config file to use."`
Ipaddress string `short:"I" long:"ipaddress" description:"Listen on a specific IP address."`
Hostname string `short:"H" long:"hostname" description:"Hostname to use for this server. Defaults to hostname reported by the kernel."`
Port int `short:"P" long:"port" description:"Port to listen on. If port is set to 443, SSL will be activated. (default: 4545)"`
IndexFile string `short:"i" long:"index-file" description:"File to save search index data to."`
DataStoreFile string `short:"D" long:"data-file" description:"File to save data store data to."`
FreezeInterval int `short:"F" long:"freeze-interval" description:"Interval in seconds to freeze in-memory data structures to disk (requires -i/--index-file and -D/--data-file options to be set). (Default 300 seconds/5 minutes.)"`
LogFile string `short:"L" long:"log-file" description:"Log to file X"`
TimeSlew string `long:"time-slew" description:"Time difference allowed between the server's clock at the time in the X-OPS-TIMESTAMP header. Formatted like 5m, 150s, etc. Defaults to 15m."`
ConfRoot string `long:"conf-root" description:"Root directory for configs and certificates. Default: the directory the config file is in, or the current directory if no config file is set."`
UseAuth bool `short:"A" long:"use-auth" description:"Use authentication. Default: false."`
UseSSL bool `long:"use-ssl" description:"Use SSL for connections. If --port is set to 433, this will automatically be turned on. If it is set to 80, it will automatically be turned off. Default: off. Requires --ssl-cert and --ssl-key."`
SslCert string `long:"ssl-cert" description:"SSL certificate file. If a relative path, will be set relative to --conf-root."`
SslKey string `long:"ssl-key" description:"SSL key file. If a relative path, will be set relative to --conf-root."`
HttpsUrls bool `long:"https-urls" description:"Use 'https://' in URLs to server resources if goiardi is not using SSL for its connections. Useful when goiardi is sitting behind a reverse proxy that uses SSL, but is communicating with the proxy over HTTP."`
DisableWebUI bool `long:"disable-webui" description:"If enabled, disables connections and logins to goiardi over the webui interface."`
UseMySQL bool `long:"use-mysql" description:"Use a MySQL database for data storage. Configure database options in the config file."`
LocalFstoreDir string `long:"local-filestore-dir" description:"Directory to save uploaded files in. Optional when running in in-memory mode, *mandatory* for SQL mode."`
}
// The goiardi version.
const Version = "0.5.0"
// The chef version we're at least aiming for, even if it's not complete yet.
const ChefVersion = "11.0.11"
/* The general plan is to read the command-line options, then parse the config
* file, fill in the config struct with those values, then apply the
* command-line options to the config struct. We read the cli options first so
* we know to look for a different config file if needed, but otherwise the
* command line options override what's in the config file. */
func InitConfig() *Conf { return &Conf{ } }
// Conf struct with the options specified on the command line or in the config
// file.
var Config = InitConfig()
// Read and apply arguments from the command line.
func ParseConfigOptions() error {
var opts = &Options{ }
_, err := flags.Parse(opts)
if err != nil {
if err.(*flags.Error).Type == flags.ErrHelp {
os.Exit(0)
} else {
log.Println(err)
os.Exit(1)
}
}
if opts.Version {
fmt.Printf("goiardi version %s (aiming for compatibility with Chef Server version %s).\n", Version, ChefVersion)
os.Exit(0)
}
/* Load the config file. Command-line options have precedence over
* config file options. */
if opts.ConfFile != "" {
if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil {
panic(err)
os.Exit(1)
}
Config.ConfFile = opts.ConfFile
Config.FreezeData = false
}
if opts.Hostname != "" {
Config.Hostname = opts.Hostname
} else {
if Config.Hostname == "" {
Config.Hostname, err = os.Hostname()
if err != nil {
log.Println(err)
Config.Hostname = "localhost"
}
}
}
if opts.DataStoreFile != "" {
Config.DataStoreFile = opts.DataStoreFile
}
if opts.IndexFile != "" {
Config.IndexFile = opts.IndexFile
}
// Use MySQL?
if opts.UseMySQL {
Config.UseMySQL = opts.UseMySQL
}
if Config.DataStoreFile != "" && Config.UseMySQL {
err := fmt.Errorf("The MySQL and data store options may not be specified together.")
log.Println(err)
os.Exit(1)
}
if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || Config.UseMySQL) && Config.IndexFile != "")) {
err := fmt.Errorf("-i and -D must either both be specified, or not specified.")
panic(err)
os.Exit(1)
}
if Config.UseMySQL && Config.IndexFile == "" {
err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL backend.")
log.Println(err)
os.Exit(1)
}
if Config.IndexFile != "" && (Config.DataStoreFile != "" || Config.UseMySQL) {
Config.FreezeData = true
}
if opts.LogFile != "" {
Config.LogFile = opts.LogFile
}
if Config.LogFile != "" {
lfp, lerr := os.Create(Config.LogFile)
if lerr != nil {
log.Println(err)
os.Exit(1)
}
log.SetOutput(lfp)
}
/* Database options */
// Don't bother setting a default mysql port if mysql isn't used
if Config.UseMySQL {
if Config.MySQL.Port == "" {
Config.MySQL.Port = "3306"
}
}
if opts.LocalFstoreDir != "" {
Config.LocalFstoreDir = opts.LocalFstoreDir
}
if Config.LocalFstoreDir == "" && Config.UseMySQL {
err := fmt.Errorf("local-filestore-dir must be set when running goiardi in SQL mode")
log.Println(err)
os.Exit(1)
}
if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) {
log.Printf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.")
}
if opts.FreezeInterval != 0 {
Config.FreezeInterval = opts.FreezeInterval
}
if Config.FreezeInterval == 0 {
Config.FreezeInterval = 300
}
/* Root directory for certs and the like */
if opts.ConfRoot != "" {
Config.ConfRoot = opts.ConfRoot
}
if Config.ConfRoot == "" {
if Config.ConfFile != "" {
Config.ConfRoot = path.Dir(Config.ConfFile)
} else {
Config.ConfRoot = "."
}
}
Config.Ipaddress = opts.Ipaddress
if opts.Port != 0 {
Config.Port = opts.Port
}
if Config.Port == 0 {
Config.Port = 4545
}
if opts.UseSSL {
Config.UseSSL = opts.UseSSL
}
if opts.SslCert != "" {
Config.SslCert = opts.SslCert
}
if opts.SslKey != "" {
Config.SslKey = opts.SslKey
}
if opts.HttpsUrls {
Config.HttpsUrls = opts.HttpsUrls
}
// SSL setup
if Config.Port == 80 {
Config.UseSSL = false
} else if Config.Port == 443 {
Config.UseSSL = true
}
if Config.UseSSL {
if Config.SslCert == "" || Config.SslKey == "" {
log.Println("SSL mode requires specifying both a certificate and a key file.")
os.Exit(1)
}
/* If the SSL cert and key are not absolute files, join them
* with the conf root */
if !path.IsAbs(Config.SslCert) {
Config.SslCert = path.Join(Config.ConfRoot, Config.SslCert)
}
if !path.IsAbs(Config.SslKey) {
Config.SslKey = path.Join(Config.ConfRoot, Config.SslKey)
}
}
Config.DebugLevel = len(opts.Verbose)
if opts.TimeSlew != "" {
Config.TimeSlew = opts.TimeSlew
}
if Config.TimeSlew != "" {
d, derr := time.ParseDuration(Config.TimeSlew)
if derr != nil {
log.Println("Error parsing time-slew:", derr)
os.Exit(1)
}
Config.TimeSlewDur = d
} else {
Config.TimeSlewDur, _ = time.ParseDuration("15m")
}
if opts.UseAuth {
Config.UseAuth = opts.UseAuth
}
if opts.DisableWebUI {
Config.DisableWebUI = opts.DisableWebUI
}
return nil
}
// The address and port goiardi is configured to listen on.
func ListenAddr() string {
listen_addr := fmt.Sprintf("%s:%d", Config.Ipaddress, Config.Port)
return listen_addr
}
// The hostname and port goiardi is configured to use.
func ServerHostname() string {
var portStr string
if !(Config.Port == 80 || Config.Port == 443) {
portStr = fmt.Sprintf(":%d", Config.Port)
}
hostname := fmt.Sprintf("%s%s", Config.Hostname, portStr)
return hostname
}
// The base URL
func ServerBaseURL() string {
var urlScheme string
if Config.UseSSL || Config.HttpsUrls {
urlScheme = "https"
} else {
urlScheme = "http"
}
url := fmt.Sprintf("%s://%s", urlScheme, ServerHostname())
return url
}
|
package config
import (
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
)
// Match describes a comment substring match and the associated
// transformations if the transaction matches.
type Replace struct {
// Treat it as a match if we see this as a substring in a comment
Comment string
// Change the payee to this
Payee string
// Treated as a go template. We replace the posting that is
// associated with the account with this posting or postings.
Posting string
}
// Config encapsulates replacements for each posting account
type Config struct {
// Maps from posting account name to matches
PostingAccount map[string][]Replace
}
// ParseYamlConfig reads a configuration from a yaml file
func ParseYamlConfig(file string) (config *Config, err error) {
in, err := os.Open(file)
if err != nil {
return nil, err
}
defer in.Close()
bytes, err := ioutil.ReadAll(in)
if err != nil {
return nil, err
}
config = &Config{}
err = yaml.Unmarshal(bytes, &config.PostingAccount)
return config, err
}
finish rename
package config
import (
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
)
// Replace describes a comment substring match and the associated
// transformations if the transaction matches.
type Replace struct {
// Treat it as a match if we see this as a substring in a comment
Comment string
// Change the payee to this
Payee string
// Treated as a go template. We replace the posting that is
// associated with the account with this posting or postings.
Posting string
}
// Config encapsulates replacements for each posting account
type Config struct {
// Maps from posting account name to matches
PostingAccount map[string][]Replace
}
// ParseYamlConfig reads a configuration from a yaml file
func ParseYamlConfig(file string) (config *Config, err error) {
in, err := os.Open(file)
if err != nil {
return nil, err
}
defer in.Close()
bytes, err := ioutil.ReadAll(in)
if err != nil {
return nil, err
}
config = &Config{}
err = yaml.Unmarshal(bytes, &config.PostingAccount)
return config, err
}
|
package config
var GlobalConfig SystemConfig
type SystemConfig struct {
HostName string `toml:"host_name"`
EndpointUser string `toml:"endpoint_user"`
EndpointName string `toml:"endpoint_name"`
EndpointURL string `toml:"endpoint_url"`
MaxCacheSize int `toml:"max_cache_size"`
EndpointPassword string `toml:"endpoint_password"`
JudgeRoot string `toml:"judge_root"`
DockerImage string `toml:"docker_image"`
DockerServer string `toml:"docker_server"`
CacheRoot string `toml:"cache_root"`
RootMemory int64 `toml:"root_mem"`
}
type JudgeInfo struct {
SubmitID int64 `json:"submitid"`
ContestID int64 `json:"cid"`
TeamID int64 `json:"teamid"`
JudgingID int64 `json:"judgingid"`
ProblemID int64 `json:"probid"`
Language string `json:"langid"`
TimeLimit int64 `json:"maxruntime"`
MemLimit int64 `json:"memlimit"`
OutputLimit int64 `json:"output_limit"`
BuildZip string `json:"compile_script"`
BuildZipMD5 string `json:"compile_script_md5sum"`
RunZip string `json:"run"`
RunZipMD5 string `json:"run_md5sum"`
CompareZip string `json:"compare"`
CompareZipMD5 string `json:"compare_md5sum"`
CompareArgs string `json:"compare_args"`
}
type TestcaseInfo struct {
TestcaseID int64 `json:"testcaseid"`
Rank int64 `json:"rank"`
ProblemID int64 `json:"probid"`
MD5SumInput string `json:"md5sum_input"`
MD5SumOutput string `json:"md5sum_input"`
}
type SubmissionInfo struct {
info []SubmissionFileInfo `json:""`
}
type SubmissionFileInfo struct {
FileName string `json:"filename"`
Content string `json:"contetn"`
}
Fix JSON naming error
package config
var GlobalConfig SystemConfig
type SystemConfig struct {
HostName string `toml:"host_name"`
EndpointUser string `toml:"endpoint_user"`
EndpointName string `toml:"endpoint_name"`
EndpointURL string `toml:"endpoint_url"`
MaxCacheSize int `toml:"max_cache_size"`
EndpointPassword string `toml:"endpoint_password"`
JudgeRoot string `toml:"judge_root"`
DockerImage string `toml:"docker_image"`
DockerServer string `toml:"docker_server"`
CacheRoot string `toml:"cache_root"`
RootMemory int64 `toml:"root_mem"`
}
type JudgeInfo struct {
SubmitID int64 `json:"submitid"`
ContestID int64 `json:"cid"`
TeamID int64 `json:"teamid"`
JudgingID int64 `json:"judgingid"`
ProblemID int64 `json:"probid"`
Language string `json:"langid"`
TimeLimit int64 `json:"maxruntime"`
MemLimit int64 `json:"memlimit"`
OutputLimit int64 `json:"output_limit"`
BuildZip string `json:"compile_script"`
BuildZipMD5 string `json:"compile_script_md5sum"`
RunZip string `json:"run"`
RunZipMD5 string `json:"run_md5sum"`
CompareZip string `json:"compare"`
CompareZipMD5 string `json:"compare_md5sum"`
CompareArgs string `json:"compare_args"`
}
type TestcaseInfo struct {
TestcaseID int64 `json:"testcaseid"`
Rank int64 `json:"rank"`
ProblemID int64 `json:"probid"`
MD5SumInput string `json:"md5sum_input"`
MD5SumOutput string `json:"md5sum_output"`
}
type SubmissionInfo struct {
info []SubmissionFileInfo `json:""`
}
type SubmissionFileInfo struct {
FileName string `json:"filename"`
Content string `json:"contetn"`
}
|
// Package config implements chat service config.
package config
import (
"encoding/json"
"fmt"
"log"
"os"
)
// ServiceConfig is a chat service config.
type ServiceConfig struct {
Address string `json:"address"`
WorkDir string `json:"work_dir"`
AdminEmail string `json:"admin_email"`
SMTPUser string `json:"smtp_user"`
SMTPPasswordFile string `json:"smtp_password_file"`
PatchDir string `json:"patch_dir"` // directory for received .patch files
Debug bool `json:"debug"`
}
// Config is loaded config.
var Config = &ServiceConfig{
Address: "localhost:8085",
WorkDir: os.Getenv("HOME") + "/go/work/",
AdminEmail: "",
}
var configFile = "/usr/local/etc/chatd.json"
// LoadConfig loads custom or default config.
func LoadConfig(fname string) {
if fname != "" {
configFile = fname
}
f, err := os.Open(configFile)
if err != nil {
panic(err)
}
defer f.Close()
dec := json.NewDecoder(f)
if err := dec.Decode(Config); err != nil {
panic(err)
}
log.Println(configFile, "config loaded")
}
// PrintConfig prints loaded config to stdout.
func PrintConfig() {
fmt.Println("config file:", configFile)
fmt.Println("loaded config:")
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ") // uncomment in go-1.7
enc.Encode(Config)
}
Set indent
// Package config implements chat service config.
package config
import (
"encoding/json"
"fmt"
"log"
"os"
)
// ServiceConfig is a chat service config.
type ServiceConfig struct {
Address string `json:"address"`
WorkDir string `json:"work_dir"`
AdminEmail string `json:"admin_email"`
SMTPUser string `json:"smtp_user"`
SMTPPasswordFile string `json:"smtp_password_file"`
PatchDir string `json:"patch_dir"` // directory for received .patch files
Debug bool `json:"debug"`
}
// Config is loaded config.
var Config = &ServiceConfig{
Address: "localhost:8085",
WorkDir: os.Getenv("HOME") + "/go/work/",
AdminEmail: "",
}
var configFile = "/usr/local/etc/chatd.json"
// LoadConfig loads custom or default config.
func LoadConfig(fname string) {
if fname != "" {
configFile = fname
}
f, err := os.Open(configFile)
if err != nil {
panic(err)
}
defer f.Close()
dec := json.NewDecoder(f)
if err := dec.Decode(Config); err != nil {
panic(err)
}
log.Println(configFile, "config loaded")
}
// PrintConfig prints loaded config to stdout.
func PrintConfig() {
fmt.Println("config file:", configFile)
fmt.Println("loaded config:")
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
enc.Encode(Config)
}
|
// Package config contains the configuration logic for CFSSL.
package config
import (
"crypto/x509"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"github.com/cloudflare/cfssl/auth"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
)
// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
// not present in a SigningProfile, all of these fields may be copied from the
// CSR into the signed certificate. If a CSRWhitelist *is* present in a
// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
// be copied from the CSR to the signed certificate. Note that some of these
// fields, like Subject, can be provided or partially provided through the API.
// Since API clients are expected to be trusted, but CSRs are not, fields
// provided through the API are not subject to whitelisting through this
// mechanism.
type CSRWhitelist struct {
Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
DNSNames, IPAddresses bool
}
// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
// JSON marshal / unmarshal.
type OID asn1.ObjectIdentifier
// CertificatePolicy is a flattening of the ASN.1 PolicyInformation structure from
// https://tools.ietf.org/html/rfc3280.html#page-106.
// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
type CertificatePolicy struct {
ID OID
Type string
Qualifier string
}
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CA bool `json:"is_ca"`
OCSPNoCheck bool `json:"ocsp_no_check"`
ExpiryString string `json:"expiry"`
BackdateString string `json:"backdate"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
NotBefore time.Time `json:"not_before"`
NotAfter time.Time `json:"not_after"`
Policies []asn1.ObjectIdentifier
Expiry time.Duration
Backdate time.Duration
Provider auth.Provider
RemoteServer string
UseSerialSeq bool
CSRWhitelist *CSRWhitelist
NameWhitelist *regexp.Regexp
}
// UnmarshalJSON unmarshals a JSON string into an OID.
func (oid *OID) UnmarshalJSON(data []byte) (err error) {
if data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("OID JSON string not wrapped in quotes." + string(data))
}
data = data[1 : len(data)-1]
parsedOid, err := parseObjectIdentifier(string(data))
if err != nil {
return err
}
*oid = OID(parsedOid)
log.Debugf("Parsed OID %v", *oid)
return
}
// MarshalJSON marshals an oid into a JSON string.
func (oid OID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
}
func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
if err != nil {
return
}
if !validOID {
err = errors.New("Invalid OID")
return
}
segments := strings.Split(oidString, ".")
oid = make(asn1.ObjectIdentifier, len(segments))
for i, intString := range segments {
oid[i], err = strconv.Atoi(intString)
if err != nil {
return
}
}
return
}
const timeFormat = "2006-01-02T15:04:05"
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) error {
if p == nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
}
var err error
if p.RemoteName == "" {
log.Debugf("parse expiry in profile")
if p.ExpiryString == "" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.BackdateString != "" {
dur, err = time.ParseDuration(p.BackdateString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
p.Backdate = dur
}
if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
if len(p.Policies) > 0 {
for _, policy := range p.Policies {
if policy.Type != "" && policy.Type != "id-qt-unotice" && policy.Type != "id-qt-cps" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
}
}
} else {
log.Debug("match remote in profile to remotes section")
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
}
if p.AuthKeyName != "" {
log.Debug("match auth key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_key in auth_keys section"))
}
}
return nil
}
// updateRemote takes a signing profile and initializes the remote server object
// to the hostname:port combination sent by remote
func (p *SigningProfile) updateRemote(remote string) error {
if remote != "" {
p.RemoteServer = remote
}
return nil
}
// OverrideRemotes takes a signing configuration and updates the remote server object
// to the hostname:port combination sent by remote
func (p *Signing) OverrideRemotes(remote string) error {
if remote != "" {
var err error
for _, profile := range p.Profiles {
err = profile.updateRemote(remote)
if err != nil {
return err
}
}
err = p.Default.updateRemote(remote)
if err != nil {
return err
}
}
return nil
}
// NeedsRemoteSigner returns true if one of the profiles has a remote set
func (p *Signing) NeedsRemoteSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteName != "" {
return true
}
}
if p.Default.RemoteName != "" {
return true
}
return false
}
// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
func (p *Signing) NeedsLocalSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteName == "" {
return true
}
}
if p.Default.RemoteName == "" {
return true
}
return false
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile must be a valid local profile or a valid remote profile.
// A valid local profile has defined at least key usages to be used, and a
// valid local default profile has defined at least a default expiration.
// A valid remote profile (default or not) has remote signer initialized.
// In addition, a remote profile must has a valid auth provider if auth
// key defined.
func (p *SigningProfile) validProfile(isDefault bool) bool {
if p == nil {
return false
}
if p.RemoteName != "" {
log.Debugf("validate remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid remote profile: no remote signer specified")
return false
}
if p.AuthKeyName != "" && p.Provider == nil {
log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
return false
}
} else {
log.Debugf("validate local profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid local profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid local profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid local profile: no expiry set")
return false
}
}
}
log.Debugf("profile is valid")
return true
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
OCSP *ocspConfig.Config `json:"ocsp"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (p *Signing) Valid() bool {
if p == nil {
return false
}
log.Debugf("validating configuration")
if !p.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, sp := range p.Profiles {
if !sp.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content committment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) (*Config, error) {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
}
body, err := ioutil.ReadFile(path)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
}
return LoadConfig(body)
}
// LoadConfig attempts to load the configuration from a byte slice.
// On error, it returns nil.
func LoadConfig(config []byte) (*Config, error) {
var cfg = &Config{}
err := json.Unmarshal(config, &cfg)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to unmarshal configuration: "+err.Error()))
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if err := cfg.Signing.Default.populate(cfg); err != nil {
return nil, err
}
}
for k := range cfg.Signing.Profiles {
if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
return nil, err
}
}
if !cfg.Valid() {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
}
log.Debugf("configuration ok")
return cfg, nil
}
Add ability to set whitelist in config.json
// Package config contains the configuration logic for CFSSL.
package config
import (
"crypto/x509"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"github.com/cloudflare/cfssl/auth"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
)
// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
// not present in a SigningProfile, all of these fields may be copied from the
// CSR into the signed certificate. If a CSRWhitelist *is* present in a
// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
// be copied from the CSR to the signed certificate. Note that some of these
// fields, like Subject, can be provided or partially provided through the API.
// Since API clients are expected to be trusted, but CSRs are not, fields
// provided through the API are not subject to whitelisting through this
// mechanism.
type CSRWhitelist struct {
Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
DNSNames, IPAddresses bool
}
// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
// JSON marshal / unmarshal.
type OID asn1.ObjectIdentifier
// CertificatePolicy is a flattening of the ASN.1 PolicyInformation structure from
// https://tools.ietf.org/html/rfc3280.html#page-106.
// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
type CertificatePolicy struct {
ID OID
Type string
Qualifier string
}
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CA bool `json:"is_ca"`
PolicyStrings []string `json:"policies"`
OCSPNoCheck bool `json:"ocsp_no_check"`
ExpiryString string `json:"expiry"`
BackdateString string `json:"backdate"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
NotBefore time.Time `json:"not_before"`
NotAfter time.Time `json:"not_after"`
NameWhitelistString string `json:"name_whitelist"`
Policies []asn1.ObjectIdentifier
Expiry time.Duration
Backdate time.Duration
Provider auth.Provider
RemoteServer string
UseSerialSeq bool
CSRWhitelist *CSRWhitelist
NameWhitelist *regexp.Regexp
}
// UnmarshalJSON unmarshals a JSON string into an OID.
func (oid *OID) UnmarshalJSON(data []byte) (err error) {
if data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("OID JSON string not wrapped in quotes." + string(data))
}
data = data[1 : len(data)-1]
parsedOid, err := parseObjectIdentifier(string(data))
if err != nil {
return err
}
*oid = OID(parsedOid)
log.Debugf("Parsed OID %v", *oid)
return
}
// MarshalJSON marshals an oid into a JSON string.
func (oid OID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
}
func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
if err != nil {
return
}
if !validOID {
err = errors.New("Invalid OID")
return
}
segments := strings.Split(oidString, ".")
oid = make(asn1.ObjectIdentifier, len(segments))
for i, intString := range segments {
oid[i], err = strconv.Atoi(intString)
if err != nil {
return
}
}
return
}
const timeFormat = "2006-01-02T15:04:05"
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) error {
if p == nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
}
var err error
if p.RemoteName == "" {
log.Debugf("parse expiry in profile")
if p.ExpiryString == "" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.BackdateString != "" {
dur, err = time.ParseDuration(p.BackdateString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
p.Backdate = dur
}
if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
if len(p.Policies) > 0 {
for _, policy := range p.Policies {
if policy.Type != "" && policy.Type != "id-qt-unotice" && policy.Type != "id-qt-cps" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
}
}
} else {
log.Debug("match remote in profile to remotes section")
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
}
if p.AuthKeyName != "" {
log.Debug("match auth key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_key in auth_keys section"))
}
}
if p.NameWhitelistString != "" {
log.Debug("compiling whitelist regular expression")
rule, err := regexp.Compile(p.NameWhitelistString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to compile name whitelist section"))
}
p.NameWhitelist = rule
}
return nil
}
// updateRemote takes a signing profile and initializes the remote server object
// to the hostname:port combination sent by remote
func (p *SigningProfile) updateRemote(remote string) error {
if remote != "" {
p.RemoteServer = remote
}
return nil
}
// OverrideRemotes takes a signing configuration and updates the remote server object
// to the hostname:port combination sent by remote
func (p *Signing) OverrideRemotes(remote string) error {
if remote != "" {
var err error
for _, profile := range p.Profiles {
err = profile.updateRemote(remote)
if err != nil {
return err
}
}
err = p.Default.updateRemote(remote)
if err != nil {
return err
}
}
return nil
}
// NeedsRemoteSigner returns true if one of the profiles has a remote set
func (p *Signing) NeedsRemoteSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteName != "" {
return true
}
}
if p.Default.RemoteName != "" {
return true
}
return false
}
// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
func (p *Signing) NeedsLocalSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteName == "" {
return true
}
}
if p.Default.RemoteName == "" {
return true
}
return false
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile must be a valid local profile or a valid remote profile.
// A valid local profile has defined at least key usages to be used, and a
// valid local default profile has defined at least a default expiration.
// A valid remote profile (default or not) has remote signer initialized.
// In addition, a remote profile must has a valid auth provider if auth
// key defined.
func (p *SigningProfile) validProfile(isDefault bool) bool {
if p == nil {
return false
}
if p.RemoteName != "" {
log.Debugf("validate remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid remote profile: no remote signer specified")
return false
}
if p.AuthKeyName != "" && p.Provider == nil {
log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
return false
}
} else {
log.Debugf("validate local profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid local profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid local profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid local profile: no expiry set")
return false
}
}
}
log.Debugf("profile is valid")
return true
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
OCSP *ocspConfig.Config `json:"ocsp"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (p *Signing) Valid() bool {
if p == nil {
return false
}
log.Debugf("validating configuration")
if !p.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, sp := range p.Profiles {
if !sp.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content committment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) (*Config, error) {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
}
body, err := ioutil.ReadFile(path)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
}
return LoadConfig(body)
}
// LoadConfig attempts to load the configuration from a byte slice.
// On error, it returns nil.
func LoadConfig(config []byte) (*Config, error) {
var cfg = &Config{}
err := json.Unmarshal(config, &cfg)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to unmarshal configuration: "+err.Error()))
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if err := cfg.Signing.Default.populate(cfg); err != nil {
return nil, err
}
}
for k := range cfg.Signing.Profiles {
if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
return nil, err
}
}
if !cfg.Valid() {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
}
log.Debugf("configuration ok")
return cfg, nil
}
|
// Package config contains the configuration logic for CF-SSL.
package config
import (
"crypto/x509"
"encoding/json"
"io/ioutil"
"time"
"github.com/cloudflare/cfssl/api/client"
"github.com/cloudflare/cfssl/auth"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
)
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CA bool `json:"is_ca"`
ExpiryString string `json:"expiry"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
Expiry time.Duration
Provider auth.Provider
Remote *client.Server
}
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) bool {
log.Debugf("parse expiry in profile")
if p == nil {
log.Debugf("failed: no timestamp in profile")
return false
} else if p.ExpiryString == "" {
log.Debugf("failed: empty expiry string")
return false
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
log.Debugf("failed to parse expiry: %v", err)
return false
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.AuthKeyName != "" {
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new stanard auth provider: %v", err)
return false
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return false
}
} else {
log.Debugf("failed to find auth_key %v in auth_keys section", p.AuthKeyName)
return false
}
}
if p.RemoteName != "" {
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
p.Remote = client.NewServer(remote)
if p.Remote == nil {
log.Debugf("failed to connect to remote %v", remote)
return false
}
} else {
log.Debugf("failed to find remote %v in remotes section %v", p.RemoteName, cfg)
return false
}
}
return true
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile has defined at least key usages to be used, and a
// valid default profile has defined at least a default expiration.
func (p *SigningProfile) validProfile(isDefault bool) bool {
log.Debugf("validate profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid profile: no expiry set")
return false
}
}
log.Debugf("profile is valid")
return true
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (s *Signing) Valid() bool {
log.Debugf("validating configuration")
if !s.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, p := range s.Profiles {
if !p.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content committment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) *Config {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil
}
body, err := ioutil.ReadFile(path)
if err != nil {
log.Debugf("failed to read configuration file: %v", err)
return nil
}
var cfg = &Config{}
err = json.Unmarshal(body, &cfg)
if err != nil {
log.Debugf("failed to unmarshal configuration: %v", err)
return nil
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if !cfg.Signing.Default.populate(cfg) {
return nil
}
}
if !cfg.Valid() {
return nil
}
for k := range cfg.Signing.Profiles {
if !cfg.Signing.Profiles[k].populate(cfg) {
return nil
}
}
log.Debugf("configuration ok")
return cfg
}
Factor out configuration parsing for future use.
// Package config contains the configuration logic for CF-SSL.
package config
import (
"crypto/x509"
"encoding/json"
"io/ioutil"
"time"
"github.com/cloudflare/cfssl/api/client"
"github.com/cloudflare/cfssl/auth"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
)
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CA bool `json:"is_ca"`
ExpiryString string `json:"expiry"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
Expiry time.Duration
Provider auth.Provider
Remote *client.Server
}
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) bool {
log.Debugf("parse expiry in profile")
if p == nil {
log.Debugf("failed: no timestamp in profile")
return false
} else if p.ExpiryString == "" {
log.Debugf("failed: empty expiry string")
return false
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
log.Debugf("failed to parse expiry: %v", err)
return false
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.AuthKeyName != "" {
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new stanard auth provider: %v", err)
return false
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return false
}
} else {
log.Debugf("failed to find auth_key %v in auth_keys section", p.AuthKeyName)
return false
}
}
if p.RemoteName != "" {
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
p.Remote = client.NewServer(remote)
if p.Remote == nil {
log.Debugf("failed to connect to remote %v", remote)
return false
}
} else {
log.Debugf("failed to find remote %v in remotes section %v", p.RemoteName, cfg)
return false
}
}
return true
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile has defined at least key usages to be used, and a
// valid default profile has defined at least a default expiration.
func (p *SigningProfile) validProfile(isDefault bool) bool {
log.Debugf("validate profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid profile: no expiry set")
return false
}
}
log.Debugf("profile is valid")
return true
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (s *Signing) Valid() bool {
log.Debugf("validating configuration")
if !s.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, p := range s.Profiles {
if !p.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content committment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) *Config {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil
}
body, err := ioutil.ReadFile(path)
if err != nil {
log.Debugf("failed to read configuration file: %v", err)
return nil
}
return LoadConfig(body)
}
// LoadConfig attempts to load the configuration from a byte slice.
// On error, it returns nil.
func LoadConfig(config []byte) *Config {
var cfg = &Config{}
err := json.Unmarshal(config, &cfg)
if err != nil {
log.Debugf("failed to unmarshal configuration: %v", err)
return nil
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if !cfg.Signing.Default.populate(cfg) {
return nil
}
}
if !cfg.Valid() {
return nil
}
for k := range cfg.Signing.Profiles {
if !cfg.Signing.Profiles[k].populate(cfg) {
return nil
}
}
log.Debugf("configuration ok")
return cfg
}
|
//
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/ghodss/yaml"
"github.com/mitchellh/go-homedir"
"github.com/nanobox-io/nanobox-golang-stylish"
)
const (
OS = runtime.GOOS
ARCH = runtime.GOARCH
LOGTAP_PORT = ":6361"
MIST_PORT = ":1445"
SERVER_PORT = ":1757"
VERSION = "0.16.13"
)
type (
exiter func(int)
)
var (
err error //
mutex = &sync.Mutex{}
//
AppDir string // the path to the application (~/.nanobox/apps/<app>)
AppsDir string // ~/.nanobox/apps
CWDir string // the current working directory
EnginesDir string // ~/.nanobox/engines
Home string // the users home directory (~)
IP string // the guest vm's private network ip (generated from app name)
Root string // nanobox's root directory path (~/.nanobox)
UpdateFile string // the path to the .update file (~/.nanobox/.update)
//
Nanofile NanofileConfig // parsed nanofile options
VMfile VMfileConfig // parsed nanofile options
//
ServerURI string // nanobox-server host:port combo (IP:1757)
ServerURL string // nanobox-server host:port combo (IP:1757) (http)
MistURI string // mist's host:port combo (IP:1445)
LogtapURI string // logtap's host:port combo (IP:6361)
// flags
Background bool // don't suspend the vm on exit
Devmode bool // run nanobox in devmode
Force bool // force a command to run (effects very per command)
Verbose bool // run cli with log level "debug"
Silent bool // silence all ouput
LogLevel string //
//
Exit exiter = os.Exit
)
//
func init() {
// default log level
LogLevel = "info"
// set the current working directory first, as it's used in other steps of the
// configuration process
if p, err := os.Getwd(); err != nil {
Log.Fatal("[config/config] os.Getwd() failed", err.Error())
} else {
CWDir = filepath.ToSlash(p)
}
// set Home based off the users homedir (~)
if p, err := homedir.Dir(); err != nil {
Log.Fatal("[config/config] homedir.Dir() failed", err.Error())
} else {
Home = filepath.ToSlash(p)
}
// set nanobox's root directory;
Root = filepath.ToSlash(filepath.Join(Home, ".nanobox"))
// check for a ~/.nanobox dir and create one if it's not found
if _, err := os.Stat(Root); err != nil {
fmt.Printf(stylish.Bullet("Creating %s directory", Root))
if err := os.Mkdir(Root, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/.update file and create one if it's not found
UpdateFile = filepath.ToSlash(filepath.Join(Root, ".update"))
if _, err := os.Stat(UpdateFile); err != nil {
f, err := os.Create(UpdateFile)
if err != nil {
Log.Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
}
// check for a ~/.nanobox/engines dir and create one if it's not found
EnginesDir = filepath.ToSlash(filepath.Join(Root, "engines"))
if _, err := os.Stat(EnginesDir); err != nil {
if err := os.Mkdir(EnginesDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/apps dir and create one if it's not found
AppsDir = filepath.ToSlash(filepath.Join(Root, "apps"))
if _, err := os.Stat(AppsDir); err != nil {
if err := os.Mkdir(AppsDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// the .nanofile needs to be parsed right away so that its config options are
// available as soon as possible
Nanofile = ParseNanofile()
//
ServerURI = Nanofile.IP + SERVER_PORT
ServerURL = "http://" + ServerURI
MistURI = Nanofile.IP + MIST_PORT
LogtapURI = Nanofile.IP + LOGTAP_PORT
// set the 'App' first so it can be used in subsequent configurations; the 'App'
// is set to the name of the cwd; this can be overriden from a .nanofile
AppDir = filepath.ToSlash(filepath.Join(AppsDir, Nanofile.Name))
}
// ParseConfig
func ParseConfig(path string, v interface{}) error {
//
fp, err := filepath.Abs(path)
if err != nil {
return err
}
//
f, err := ioutil.ReadFile(fp)
if err != nil {
return err
}
//
return yaml.Unmarshal(f, v)
}
// writeConfig
func writeConfig(path string, v interface{}) error {
// take a config objects path and create (and truncate) the file, preparing it
// to receive new configurations
f, err := os.Create(path)
if err != nil {
Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
// marshal the config object
b, err := yaml.Marshal(v)
if err != nil {
Fatal("[config/config] yaml.Marshal() failed", err.Error())
}
// mutex.Lock()
// write it back to the file
if _, err := f.Write(b); err != nil {
return err
}
// mutex.Unlock()
return nil
}
bumping to 0.16.14
//
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/ghodss/yaml"
"github.com/mitchellh/go-homedir"
"github.com/nanobox-io/nanobox-golang-stylish"
)
const (
OS = runtime.GOOS
ARCH = runtime.GOARCH
LOGTAP_PORT = ":6361"
MIST_PORT = ":1445"
SERVER_PORT = ":1757"
VERSION = "0.16.14"
)
type (
exiter func(int)
)
var (
err error //
mutex = &sync.Mutex{}
//
AppDir string // the path to the application (~/.nanobox/apps/<app>)
AppsDir string // ~/.nanobox/apps
CWDir string // the current working directory
EnginesDir string // ~/.nanobox/engines
Home string // the users home directory (~)
IP string // the guest vm's private network ip (generated from app name)
Root string // nanobox's root directory path (~/.nanobox)
UpdateFile string // the path to the .update file (~/.nanobox/.update)
//
Nanofile NanofileConfig // parsed nanofile options
VMfile VMfileConfig // parsed nanofile options
//
ServerURI string // nanobox-server host:port combo (IP:1757)
ServerURL string // nanobox-server host:port combo (IP:1757) (http)
MistURI string // mist's host:port combo (IP:1445)
LogtapURI string // logtap's host:port combo (IP:6361)
// flags
Background bool // don't suspend the vm on exit
Devmode bool // run nanobox in devmode
Force bool // force a command to run (effects very per command)
Verbose bool // run cli with log level "debug"
Silent bool // silence all ouput
LogLevel string //
//
Exit exiter = os.Exit
)
//
func init() {
// default log level
LogLevel = "info"
// set the current working directory first, as it's used in other steps of the
// configuration process
if p, err := os.Getwd(); err != nil {
Log.Fatal("[config/config] os.Getwd() failed", err.Error())
} else {
CWDir = filepath.ToSlash(p)
}
// set Home based off the users homedir (~)
if p, err := homedir.Dir(); err != nil {
Log.Fatal("[config/config] homedir.Dir() failed", err.Error())
} else {
Home = filepath.ToSlash(p)
}
// set nanobox's root directory;
Root = filepath.ToSlash(filepath.Join(Home, ".nanobox"))
// check for a ~/.nanobox dir and create one if it's not found
if _, err := os.Stat(Root); err != nil {
fmt.Printf(stylish.Bullet("Creating %s directory", Root))
if err := os.Mkdir(Root, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/.update file and create one if it's not found
UpdateFile = filepath.ToSlash(filepath.Join(Root, ".update"))
if _, err := os.Stat(UpdateFile); err != nil {
f, err := os.Create(UpdateFile)
if err != nil {
Log.Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
}
// check for a ~/.nanobox/engines dir and create one if it's not found
EnginesDir = filepath.ToSlash(filepath.Join(Root, "engines"))
if _, err := os.Stat(EnginesDir); err != nil {
if err := os.Mkdir(EnginesDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/apps dir and create one if it's not found
AppsDir = filepath.ToSlash(filepath.Join(Root, "apps"))
if _, err := os.Stat(AppsDir); err != nil {
if err := os.Mkdir(AppsDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// the .nanofile needs to be parsed right away so that its config options are
// available as soon as possible
Nanofile = ParseNanofile()
//
ServerURI = Nanofile.IP + SERVER_PORT
ServerURL = "http://" + ServerURI
MistURI = Nanofile.IP + MIST_PORT
LogtapURI = Nanofile.IP + LOGTAP_PORT
// set the 'App' first so it can be used in subsequent configurations; the 'App'
// is set to the name of the cwd; this can be overriden from a .nanofile
AppDir = filepath.ToSlash(filepath.Join(AppsDir, Nanofile.Name))
}
// ParseConfig
func ParseConfig(path string, v interface{}) error {
//
fp, err := filepath.Abs(path)
if err != nil {
return err
}
//
f, err := ioutil.ReadFile(fp)
if err != nil {
return err
}
//
return yaml.Unmarshal(f, v)
}
// writeConfig
func writeConfig(path string, v interface{}) error {
// take a config objects path and create (and truncate) the file, preparing it
// to receive new configurations
f, err := os.Create(path)
if err != nil {
Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
// marshal the config object
b, err := yaml.Marshal(v)
if err != nil {
Fatal("[config/config] yaml.Marshal() failed", err.Error())
}
// mutex.Lock()
// write it back to the file
if _, err := f.Write(b); err != nil {
return err
}
// mutex.Unlock()
return nil
}
|
// Copyright 2016 The goscope Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gui
import (
"fmt"
"image"
"image/color"
"image/png"
"io/ioutil"
"math"
"os"
"path/filepath"
"testing"
"github.com/zagrodzki/goscope/dummy"
"github.com/zagrodzki/goscope/scope"
)
// TODO: more tests for specific functions
// isOn returns true if pixel x,y is of a different color than white
func isOn(img image.Image, x, y int) bool {
return img.At(x, y) != colorWhite
}
// evaluatePlot checks whether the tested plot:
// 1) has the same bounds as the true plot
// 2) is entirely contained in the true plot
// 3) contains at least one pixel in every column
// 4) contains at least minPointCount pixels
func evaluatePlot(truePlot, testPlot image.Image, minPointCount int) (bool, string) {
if truePlot.Bounds() != testPlot.Bounds() {
return false, fmt.Sprintf("plot bounds: got %v, expected %v", testPlot.Bounds(), truePlot.Bounds())
}
b := truePlot.Bounds()
pointCount := 0
for x := b.Min.X; x < b.Max.X; x++ {
for y := b.Min.Y; y < b.Max.Y; y++ {
testOn := isOn(testPlot, x, y)
if testOn {
pointCount++
}
if testOn && !isOn(truePlot, x, y) {
return false, "test plot is not contained in true plot"
}
}
}
if pointCount < minPointCount {
return false, fmt.Sprintf("too few plot points: got %v, expected at least %v", pointCount, minPointCount)
}
return true, ""
}
// testPlot evaluates a plot generated from the samples against a true plot stored in a file.
// minPointCount is the minimum number of pixels of the tested plot.
// minSimilarity is the minimum similarity of the plots.
func testPlot(t *testing.T, plotFile string, samples []scope.Sample, minPointCount int) {
file, err := os.Open(plotFile)
if err != nil {
t.Fatalf("Cannot open file: %v", err)
}
img, err := png.Decode(file)
if err != nil {
t.Fatalf("Cannot decode file: %v", err)
}
plot := Plot{image.NewRGBA(image.Rect(0, 0, 800, 600))}
plot.Fill(colorWhite)
plotBounds := plot.Bounds()
plot.DrawSamples(samples, TracePos{0.5, 0.25}, plotBounds.Min, plotBounds.Max, colorBlack)
eval, msg := evaluatePlot(img, plot, minPointCount)
if !eval {
t.Errorf(msg)
}
}
func TestSin(t *testing.T) {
numSamples := 1000
interval := 4 * math.Pi / float64(numSamples-1)
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = scope.Sample(math.Sin(float64(i) * interval))
}
testPlot(t, "sin-gp.png", samples, 2000)
}
func TestZero(t *testing.T) {
numSamples := 1000
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = 0
}
testPlot(t, "zero-gp.png", samples, 800)
}
func TestSquare(t *testing.T) {
numSamples := 1000
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples/4; i++ {
samples[i] = 1
samples[i+numSamples/4] = -1
samples[i+numSamples/2] = 1
samples[i+3*numSamples/4] = -1
}
testPlot(t, "square-gp.png", samples, 2000)
}
func TestTriangle(t *testing.T) {
numSamples := 999
interval := 2.0 / float64(numSamples/3-1)
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples/3; i++ {
offset := float64(i) * interval
samples[i] = scope.Sample(-1.0 + offset)
samples[i+numSamples/3] = scope.Sample(1.0 - offset)
samples[i+2*numSamples/3] = scope.Sample(-1.0 + offset)
}
testPlot(t, "triangle-gp.png", samples, 1000)
}
func TestPlotToPng(t *testing.T) {
dev, err := dummy.Open("")
if err != nil {
t.Fatalf("Cannot open the device: %v", err)
}
dir, err := ioutil.TempDir("", "TestPlotToPng")
if err != nil {
t.Fatalf("Cannot create temp dir: %v", err)
}
defer os.RemoveAll(dir)
err = PlotToPng(dev, 800, 600,
make(map[scope.ChanID]TracePos),
make(map[scope.ChanID]color.RGBA),
filepath.Join(dir, "plot.png"))
if err != nil {
t.Fatalf("Cannot plot to file: %v", err)
}
}
func TestPlotToPngWithCustomParameters(t *testing.T) {
dev, err := dummy.Open("")
if err != nil {
t.Fatalf("Cannot open the device: %v", err)
}
dir, err := ioutil.TempDir("", "TestPlotToPngWithCustomScales")
if err != nil {
t.Fatalf("Cannot create temp dir: %v", err)
}
defer os.RemoveAll(dir)
tracePos := map[scope.ChanID]TracePos{
"square": TracePos{0.1, 5},
"triangle": TracePos{0.8, 2},
}
cols := map[scope.ChanID]color.RGBA{
"random": color.RGBA{255, 0, 0, 255},
"sin": color.RGBA{255, 0, 255, 255},
"square": color.RGBA{0, 255, 0, 255},
"triangle": color.RGBA{0, 0, 255, 255},
}
err = PlotToPng(dev, 800, 600, tracePos, cols, filepath.Join(dir, "plot.png"))
if err != nil {
t.Fatalf("Cannot plot to file: %v", err)
}
}
func BenchmarkCreatePlot(b *testing.B) {
dev, err := dummy.Open("")
if err != nil {
b.Fatalf("Cannot open the device: %v", err)
}
plot := Plot{image.NewRGBA(image.Rect(0, 0, 800, 600))}
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = plot.DrawFromDevice(dev,
make(map[scope.ChanID]TracePos),
make(map[scope.ChanID]color.RGBA))
if err != nil {
b.Fatalf("Cannot create plot: %v", err)
}
}
}
tested plot must contain at least one pixel in every column
// Copyright 2016 The goscope Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gui
import (
"fmt"
"image"
"image/color"
"image/png"
"io/ioutil"
"math"
"os"
"path/filepath"
"testing"
"github.com/zagrodzki/goscope/dummy"
"github.com/zagrodzki/goscope/scope"
)
// TODO: more tests for specific functions
// isOn returns true if pixel x,y is of a different color than white
func isOn(img image.Image, x, y int) bool {
return img.At(x, y) != colorWhite
}
// evaluatePlot checks whether the tested plot:
// 1) has the same bounds as the true plot
// 2) is entirely contained in the true plot
// 3) contains at least one pixel in every column
// 4) contains at least minPointCount pixels
func evaluatePlot(truePlot, testPlot image.Image, minPointCount int) (bool, string) {
if truePlot.Bounds() != testPlot.Bounds() {
return false, fmt.Sprintf("plot bounds: got %v, expected %v", testPlot.Bounds(), truePlot.Bounds())
}
b := truePlot.Bounds()
pointCount := 0
for x := b.Min.X; x < b.Max.X; x++ {
col := false
for y := b.Min.Y; y < b.Max.Y; y++ {
testOn := isOn(testPlot, x, y)
col = col || testOn
if testOn {
pointCount++
}
if testOn && !isOn(truePlot, x, y) {
return false, "test plot is not contained in true plot"
}
}
if !col {
return false, fmt.Sprintf("image column %v does not contain any point", x)
}
}
if pointCount < minPointCount {
return false, fmt.Sprintf("too few plot points: got %v, expected at least %v", pointCount, minPointCount)
}
return true, ""
}
// testPlot evaluates a plot generated from the samples against a true plot stored in a file.
// minPointCount is the minimum number of pixels of the tested plot.
// minSimilarity is the minimum similarity of the plots.
func testPlot(t *testing.T, plotFile string, samples []scope.Sample, minPointCount int) {
file, err := os.Open(plotFile)
if err != nil {
t.Fatalf("Cannot open file: %v", err)
}
img, err := png.Decode(file)
if err != nil {
t.Fatalf("Cannot decode file: %v", err)
}
plot := Plot{image.NewRGBA(image.Rect(0, 0, 800, 600))}
plot.Fill(colorWhite)
plotBounds := plot.Bounds()
plot.DrawSamples(samples, TracePos{0.5, 0.25}, plotBounds.Min, plotBounds.Max, colorBlack)
eval, msg := evaluatePlot(img, plot, minPointCount)
if !eval {
t.Errorf(msg)
}
}
func TestSin(t *testing.T) {
numSamples := 1000
interval := 4 * math.Pi / float64(numSamples-1)
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = scope.Sample(math.Sin(float64(i) * interval))
}
testPlot(t, "sin-gp.png", samples, 2000)
}
func TestZero(t *testing.T) {
numSamples := 1000
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = 0
}
testPlot(t, "zero-gp.png", samples, 800)
}
func TestSquare(t *testing.T) {
numSamples := 1000
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples/4; i++ {
samples[i] = 1
samples[i+numSamples/4] = -1
samples[i+numSamples/2] = 1
samples[i+3*numSamples/4] = -1
}
testPlot(t, "square-gp.png", samples, 2000)
}
func TestTriangle(t *testing.T) {
numSamples := 999
interval := 2.0 / float64(numSamples/3-1)
samples := make([]scope.Sample, numSamples)
for i := 0; i < numSamples/3; i++ {
offset := float64(i) * interval
samples[i] = scope.Sample(-1.0 + offset)
samples[i+numSamples/3] = scope.Sample(1.0 - offset)
samples[i+2*numSamples/3] = scope.Sample(-1.0 + offset)
}
testPlot(t, "triangle-gp.png", samples, 1000)
}
func TestPlotToPng(t *testing.T) {
dev, err := dummy.Open("")
if err != nil {
t.Fatalf("Cannot open the device: %v", err)
}
dir, err := ioutil.TempDir("", "TestPlotToPng")
if err != nil {
t.Fatalf("Cannot create temp dir: %v", err)
}
defer os.RemoveAll(dir)
err = PlotToPng(dev, 800, 600,
make(map[scope.ChanID]TracePos),
make(map[scope.ChanID]color.RGBA),
filepath.Join(dir, "plot.png"))
if err != nil {
t.Fatalf("Cannot plot to file: %v", err)
}
}
func TestPlotToPngWithCustomParameters(t *testing.T) {
dev, err := dummy.Open("")
if err != nil {
t.Fatalf("Cannot open the device: %v", err)
}
dir, err := ioutil.TempDir("", "TestPlotToPngWithCustomScales")
if err != nil {
t.Fatalf("Cannot create temp dir: %v", err)
}
defer os.RemoveAll(dir)
tracePos := map[scope.ChanID]TracePos{
"square": TracePos{0.1, 5},
"triangle": TracePos{0.8, 2},
}
cols := map[scope.ChanID]color.RGBA{
"random": color.RGBA{255, 0, 0, 255},
"sin": color.RGBA{255, 0, 255, 255},
"square": color.RGBA{0, 255, 0, 255},
"triangle": color.RGBA{0, 0, 255, 255},
}
err = PlotToPng(dev, 800, 600, tracePos, cols, filepath.Join(dir, "plot.png"))
if err != nil {
t.Fatalf("Cannot plot to file: %v", err)
}
}
func BenchmarkCreatePlot(b *testing.B) {
dev, err := dummy.Open("")
if err != nil {
b.Fatalf("Cannot open the device: %v", err)
}
plot := Plot{image.NewRGBA(image.Rect(0, 0, 800, 600))}
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = plot.DrawFromDevice(dev,
make(map[scope.ChanID]TracePos),
make(map[scope.ChanID]color.RGBA))
if err != nil {
b.Fatalf("Cannot create plot: %v", err)
}
}
}
|
package xormrediscache
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/go-xorm/core"
"hash/crc32"
"log"
"reflect"
// "strconv"
"time"
"unsafe"
)
const (
DEFAULT_EXPIRATION = time.Duration(0)
FOREVER_EXPIRATION = time.Duration(-1)
)
// Wraps the Redis client to meet the Cache interface.
type RedisCacher struct {
pool *redis.Pool
defaultExpiration time.Duration
}
// until redigo supports sharding/clustering, only one host will be in hostList
func NewRedisCacher(host string, password string, defaultExpiration time.Duration) *RedisCacher {
var pool = &redis.Pool{
MaxIdle: 5,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
// the redis protocol should probably be made sett-able
c, err := redis.Dial("tcp", host)
if err != nil {
return nil, err
}
if len(password) > 0 {
if _, err := c.Do("AUTH", password); err != nil {
c.Close()
return nil, err
}
} else {
// check with PING
if _, err := c.Do("PING"); err != nil {
c.Close()
return nil, err
}
}
return c, err
},
// custom connection test method
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if _, err := c.Do("PING"); err != nil {
return err
}
return nil
},
}
return &RedisCacher{pool, defaultExpiration}
}
func exists(conn redis.Conn, key string) bool {
existed, _ := redis.Bool(conn.Do("EXISTS", key))
return existed
}
func (c *RedisCacher) getBeanKey(tableName string, id string) string {
return fmt.Sprintf("xorm:bean:%s:%s", tableName, id)
}
func (c *RedisCacher) getSqlKey(tableName string, sql string) string {
// hash sql to minimize key length
crc := crc32.ChecksumIEEE([]byte(sql))
return fmt.Sprintf("xorm:sql:%s:%d", tableName, crc)
}
func (c *RedisCacher) Flush() error {
// conn := c.pool.Get()
// defer conn.Close()
// _, err := conn.Do("FLUSHALL")
// return err
return c.delObject("xorm:*")
}
func (c *RedisCacher) getObject(key string) interface{} {
conn := c.pool.Get()
defer conn.Close()
raw, err := conn.Do("GET", key)
if raw == nil {
return nil
}
item, err := redis.Bytes(raw, err)
if err != nil {
log.Fatalf("[xorm/redis_cacher] redis.Bytes failed: %s", err)
return nil
}
value, err := deserialize(item)
return value
}
func (c *RedisCacher) GetIds(tableName, sql string) interface{} {
sqlKey := c.getSqlKey(tableName, sql)
log.Printf("[xorm/redis_cacher] GetIds|tableName:%s|sql:%s|key:%s", tableName, sql, sqlKey)
return c.getObject(sqlKey)
}
func (c *RedisCacher) GetBean(tableName string, id string) interface{} {
beanKey := c.getBeanKey(tableName, id)
log.Printf("[xorm/redis_cacher] GetBean|tableName:%s|id:%s|key:%s", tableName, id, beanKey)
return c.getObject(beanKey)
}
func (c *RedisCacher) putObject(key string, value interface{}) {
c.invoke(c.pool.Get().Do, key, value, c.defaultExpiration)
}
func (c *RedisCacher) PutIds(tableName, sql string, ids interface{}) {
sqlKey := c.getSqlKey(tableName, sql)
log.Printf("[xorm/redis_cacher] PutIds|tableName:%s|sql:%s|key:%s|obj:%s|type:%v", tableName, sql, sqlKey, ids, reflect.TypeOf(ids))
c.putObject(sqlKey, ids)
}
func (c *RedisCacher) PutBean(tableName string, id string, obj interface{}) {
beanKey := c.getBeanKey(tableName, id)
log.Printf("[xorm/redis_cacher] PutBean|tableName:%s|id:%s|key:%s|type:%v", tableName, id, beanKey, reflect.TypeOf(obj))
c.putObject(beanKey, obj)
}
func (c *RedisCacher) delObject(key string) error {
log.Printf("[xorm/redis_cacher] delObject key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
if !exists(conn, key) {
log.Printf("[xorm/redis_cacher] delObject: %v", core.ErrCacheMiss)
return core.ErrCacheMiss
}
_, err := conn.Do("DEL", key)
return err
}
func (c *RedisCacher) delObjects(key string) error {
log.Printf("[xorm/redis_cacher] delObjects key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
keys, err := conn.Do("KEYS", key)
log.Printf("[xorm/redis_cacher] delObjects keys: %v", keys)
if err == nil {
for _, key := range keys.([]interface{}) {
conn.Do("DEL", key)
}
}
return err
}
func (c *RedisCacher) DelIds(tableName, sql string) {
c.delObject(c.getSqlKey(tableName, sql))
}
func (c *RedisCacher) DelBean(tableName string, id string) {
c.delObject(c.getBeanKey(tableName, id))
}
func (c *RedisCacher) ClearIds(tableName string) {
c.delObjects(c.getSqlKey(tableName, "*"))
}
func (c *RedisCacher) ClearBeans(tableName string) {
c.delObjects(c.getBeanKey(tableName, "*"))
}
func (c *RedisCacher) invoke(f func(string, ...interface{}) (interface{}, error),
key string, value interface{}, expires time.Duration) error {
switch expires {
case DEFAULT_EXPIRATION:
expires = c.defaultExpiration
case FOREVER_EXPIRATION:
expires = time.Duration(0)
}
b, err := serialize(value)
if err != nil {
return err
}
conn := c.pool.Get()
defer conn.Close()
if expires > 0 {
_, err := f("SETEX", key, int32(expires/time.Second), b)
return err
} else {
_, err := f("SET", key, b)
return err
}
}
func serialize(value interface{}) ([]byte, error) {
err := RegisterGobConcreteType(value)
if err != nil {
return nil, err
}
if reflect.TypeOf(value).Kind() == reflect.Struct {
return nil, fmt.Errorf("serialize func only take pointer of a struct")
}
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
log.Printf("[xorm/redis_cacher] serialize type:%v", reflect.TypeOf(value))
err = encoder.Encode(&value)
if err != nil {
log.Fatalf("[xorm/redis_cacher] gob encoding '%s' failed: %s|value:%v", value, err, value)
return nil, err
}
return b.Bytes(), nil
}
func deserialize(byt []byte) (ptr interface{}, err error) {
b := bytes.NewBuffer(byt)
decoder := gob.NewDecoder(b)
var p interface{}
err = decoder.Decode(&p)
if err != nil {
log.Fatal("[xorm/redis_cacher] decode:", err)
return
}
v := reflect.ValueOf(p)
log.Printf("[xorm/redis_cacher] deserialize type:%v", v.Type())
if v.Kind() == reflect.Struct {
// !nashtsai! TODO following implementation will new an instance and make a copy,
// hence performance degration
var pp interface{} = &p
datas := reflect.ValueOf(pp).Elem().InterfaceData()
sp := reflect.NewAt(v.Type(),
unsafe.Pointer(datas[1])).Interface()
ptr = sp
vv := reflect.ValueOf(ptr)
log.Printf("[xorm/redis_cacher] deserialize convert ptr type:%v | CanAddr:%t", vv.Type(), vv.CanAddr())
// --
} else {
ptr = p
}
return
}
func RegisterGobConcreteType(value interface{}) error {
t := reflect.TypeOf(value)
log.Printf("[xorm/redis_cacher] RegisterGobConcreteType:%v", t)
switch t.Kind() {
case reflect.Ptr:
v := reflect.ValueOf(value)
i := v.Elem().Interface()
gob.Register(i)
case reflect.Struct, reflect.Map, reflect.Slice:
gob.Register(value)
case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
// do nothing since already registered known type
default:
return fmt.Errorf("unhandled type: %v", t)
}
return nil
}
fixed typo
package xormrediscache
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/go-xorm/core"
"hash/crc32"
"log"
"reflect"
// "strconv"
"time"
"unsafe"
)
const (
DEFAULT_EXPIRATION = time.Duration(0)
FOREVER_EXPIRATION = time.Duration(-1)
)
// Wraps the Redis client to meet the Cache interface.
type RedisCacher struct {
pool *redis.Pool
defaultExpiration time.Duration
}
// until redigo supports sharding/clustering, only one host will be in hostList
func NewRedisCacher(host string, password string, defaultExpiration time.Duration) *RedisCacher {
var pool = &redis.Pool{
MaxIdle: 5,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
// the redis protocol should probably be made sett-able
c, err := redis.Dial("tcp", host)
if err != nil {
return nil, err
}
if len(password) > 0 {
if _, err := c.Do("AUTH", password); err != nil {
c.Close()
return nil, err
}
} else {
// check with PING
if _, err := c.Do("PING"); err != nil {
c.Close()
return nil, err
}
}
return c, err
},
// custom connection test method
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if _, err := c.Do("PING"); err != nil {
return err
}
return nil
},
}
return &RedisCacher{pool, defaultExpiration}
}
func exists(conn redis.Conn, key string) bool {
existed, _ := redis.Bool(conn.Do("EXISTS", key))
return existed
}
func (c *RedisCacher) getBeanKey(tableName string, id string) string {
return fmt.Sprintf("xorm:bean:%s:%s", tableName, id)
}
func (c *RedisCacher) getSqlKey(tableName string, sql string) string {
// hash sql to minimize key length
crc := crc32.ChecksumIEEE([]byte(sql))
return fmt.Sprintf("xorm:sql:%s:%d", tableName, crc)
}
func (c *RedisCacher) Flush() error {
// conn := c.pool.Get()
// defer conn.Close()
// _, err := conn.Do("FLUSHALL")
// return err
return c.delObject("xorm:*")
}
func (c *RedisCacher) getObject(key string) interface{} {
conn := c.pool.Get()
defer conn.Close()
raw, err := conn.Do("GET", key)
if raw == nil {
return nil
}
item, err := redis.Bytes(raw, err)
if err != nil {
log.Fatalf("[xorm/redis_cacher] redis.Bytes failed: %s", err)
return nil
}
value, err := deserialize(item)
return value
}
func (c *RedisCacher) GetIds(tableName, sql string) interface{} {
sqlKey := c.getSqlKey(tableName, sql)
log.Printf("[xorm/redis_cacher] GetIds|tableName:%s|sql:%s|key:%s", tableName, sql, sqlKey)
return c.getObject(sqlKey)
}
func (c *RedisCacher) GetBean(tableName string, id string) interface{} {
beanKey := c.getBeanKey(tableName, id)
log.Printf("[xorm/redis_cacher] GetBean|tableName:%s|id:%s|key:%s", tableName, id, beanKey)
return c.getObject(beanKey)
}
func (c *RedisCacher) putObject(key string, value interface{}) {
c.invoke(c.pool.Get().Do, key, value, c.defaultExpiration)
}
func (c *RedisCacher) PutIds(tableName, sql string, ids interface{}) {
sqlKey := c.getSqlKey(tableName, sql)
log.Printf("[xorm/redis_cacher] PutIds|tableName:%s|sql:%s|key:%s|obj:%s|type:%v", tableName, sql, sqlKey, ids, reflect.TypeOf(ids))
c.putObject(sqlKey, ids)
}
func (c *RedisCacher) PutBean(tableName string, id string, obj interface{}) {
beanKey := c.getBeanKey(tableName, id)
log.Printf("[xorm/redis_cacher] PutBean|tableName:%s|id:%s|key:%s|type:%v", tableName, id, beanKey, reflect.TypeOf(obj))
c.putObject(beanKey, obj)
}
func (c *RedisCacher) delObject(key string) error {
log.Printf("[xorm/redis_cacher] delObject key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
if !exists(conn, key) {
log.Printf("[xorm/redis_cacher] delObject: %v", core.ErrCacheMiss)
return core.ErrCacheMiss
}
_, err := conn.Do("DEL", key)
return err
}
func (c *RedisCacher) delObjects(key string) error {
log.Printf("[xorm/redis_cacher] delObjects key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
keys, err := conn.Do("KEYS", key)
log.Printf("[xorm/redis_cacher] delObjects keys: %v", keys)
if err == nil {
for _, key := range keys.([]interface{}) {
conn.Do("DEL", key)
}
}
return err
}
func (c *RedisCacher) DelIds(tableName, sql string) {
c.delObject(c.getSqlKey(tableName, sql))
}
func (c *RedisCacher) DelBean(tableName string, id string) {
c.delObject(c.getBeanKey(tableName, id))
}
func (c *RedisCacher) ClearIds(tableName string) {
c.delObjects(c.getSqlKey(tableName, "*"))
}
func (c *RedisCacher) ClearBeans(tableName string) {
c.delObjects(c.getBeanKey(tableName, "*"))
}
func (c *RedisCacher) invoke(f func(string, ...interface{}) (interface{}, error),
key string, value interface{}, expires time.Duration) error {
switch expires {
case DEFAULT_EXPIRATION:
expires = c.defaultExpiration
case FOREVER_EXPIRATION:
expires = time.Duration(0)
}
b, err := serialize(value)
if err != nil {
return err
}
conn := c.pool.Get()
defer conn.Close()
if expires > 0 {
_, err := f("SETEX", key, int32(expires/time.Second), b)
return err
} else {
_, err := f("SET", key, b)
return err
}
}
func serialize(value interface{}) ([]byte, error) {
err := RegisterGobConcreteType(value)
if err != nil {
return nil, err
}
if reflect.TypeOf(value).Kind() == reflect.Struct {
return nil, fmt.Errorf("serialize func only take pointer of a struct")
}
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
log.Printf("[xorm/redis_cacher] serialize type:%v", reflect.TypeOf(value))
err = encoder.Encode(&value)
if err != nil {
log.Fatalf("[xorm/redis_cacher] gob encoding '%s' failed: %s|value:%v", value, err, value)
return nil, err
}
return b.Bytes(), nil
}
func deserialize(byt []byte) (ptr interface{}, err error) {
b := bytes.NewBuffer(byt)
decoder := gob.NewDecoder(b)
var p interface{}
err = decoder.Decode(&p)
if err != nil {
log.Fatal("[xorm/redis_cacher] decode:", err)
return
}
v := reflect.ValueOf(p)
log.Printf("[xorm/redis_cacher] deserialize type:%v", v.Type())
if v.Kind() == reflect.Struct {
// !nashtsai! TODO following implementation will new an instance and make a copy,
// hence performance degradation
var pp interface{} = &p
datas := reflect.ValueOf(pp).Elem().InterfaceData()
sp := reflect.NewAt(v.Type(),
unsafe.Pointer(datas[1])).Interface()
ptr = sp
vv := reflect.ValueOf(ptr)
log.Printf("[xorm/redis_cacher] deserialize convert ptr type:%v | CanAddr:%t", vv.Type(), vv.CanAddr())
// --
} else {
ptr = p
}
return
}
func RegisterGobConcreteType(value interface{}) error {
t := reflect.TypeOf(value)
log.Printf("[xorm/redis_cacher] RegisterGobConcreteType:%v", t)
switch t.Kind() {
case reflect.Ptr:
v := reflect.ValueOf(value)
i := v.Elem().Interface()
gob.Register(i)
case reflect.Struct, reflect.Map, reflect.Slice:
gob.Register(value)
case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
// do nothing since already registered known type
default:
return fmt.Errorf("unhandled type: %v", t)
}
return nil
}
|
package consul
import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"io"
"io/ioutil"
"net"
"os"
"time"
)
const (
DefaultDC = "dc1"
DefaultLANSerfPort = 8301
DefaultWANSerfPort = 8302
)
var (
DefaultRPCAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 8300}
)
// ProtocolVersionMap is the mapping of Consul protocol versions
// to Serf protocol versions. We mask the Serf protocols using
// our own protocol version.
var protocolVersionMap map[uint8]uint8
func init() {
protocolVersionMap = map[uint8]uint8{
1: 4,
}
}
// Config is used to configure the server
type Config struct {
// Bootstrap mode is used to bring up the first Consul server.
// It is required so that it can elect a leader without any
// other nodes being present
Bootstrap bool
// Datacenter is the datacenter this Consul server represents
Datacenter string
// DataDir is the directory to store our state in
DataDir string
// Node name is the name we use to advertise. Defaults to hostname.
NodeName string
// RaftConfig is the configuration used for Raft in the local DC
RaftConfig *raft.Config
// RPCAddr is the RPC address used by Consul. This should be reachable
// by the WAN and LAN
RPCAddr *net.TCPAddr
// RPCAdvertise is the address that is advertised to other nodes for
// the RPC endpoint. This can differ from the RPC address, if for example
// the RPCAddr is unspecified "0.0.0.0:8300", but this address must be
// reachable
RPCAdvertise *net.TCPAddr
// SerfLANConfig is the configuration for the intra-dc serf
SerfLANConfig *serf.Config
// SerfWANConfig is the configuration for the cross-dc serf
SerfWANConfig *serf.Config
// ReconcileInterval controls how often we reconcile the strongly
// consistent store with the Serf info. This is used to handle nodes
// that are force removed, as well as intermittent unavailability during
// leader election.
ReconcileInterval time.Duration
// LogOutput is the location to write logs to. If this is not set,
// logs will go to stderr.
LogOutput io.Writer
// ProtocolVersion is the protocol version to speak. This must be between
// ProtocolVersionMin and ProtocolVersionMax.
ProtocolVersion uint8
// VerifyIncoming is used to verify the authenticity of incoming connections.
// This means that TCP requests are forbidden, only allowing for TLS. TLS connections
// must match a provided certificate authority. This can be used to force client auth.
VerifyIncoming bool
// VerifyOutgoing is used to verify the authenticity of outgoing connections.
// This means that TLS requests are used, and TCP requests are not made. TLS connections
// must match a provided certificate authority. This is used to verify authenticity of
// server nodes.
VerifyOutgoing bool
// CAFile is a path to a certificate authority file. This is used with VerifyIncoming
// or VerifyOutgoing to verify the TLS connection.
CAFile string
// CertFile is used to provide a TLS certificate that is used for serving TLS connections.
// Must be provided to serve TLS connections.
CertFile string
// KeyFile is used to provide a TLS key that is used for serving TLS connections.
// Must be provided to serve TLS connections.
KeyFile string
// ServerUp callback can be used to trigger a notification that
// a Consul server is now up and known about.
ServerUp func()
}
// CheckVersion is used to check if the ProtocolVersion is valid
func (c *Config) CheckVersion() error {
if c.ProtocolVersion < ProtocolVersionMin {
return fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
} else if c.ProtocolVersion > ProtocolVersionMax {
return fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
}
return nil
}
// CACertificate is used to open and parse a CA file
func (c *Config) CACertificate() (*x509.Certificate, error) {
if c.CAFile == "" {
return nil, nil
}
// Read the file
data, err := ioutil.ReadFile(c.CAFile)
if err != nil {
return nil, fmt.Errorf("Failed to read CA file: %v", err)
}
// Parse the certificate
cert, err := x509.ParseCertificate(data)
if err != nil {
return nil, fmt.Errorf("Failed to parse CA file: %v", err)
}
return cert, nil
}
// KeyPair is used to open and parse a certificate and key file
func (c *Config) KeyPair() (*tls.Certificate, error) {
if c.CertFile == "" || c.KeyFile == "" {
return nil, nil
}
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
return nil, fmt.Errorf("Failed to load cert/key pair: %v", err)
}
return &cert, err
}
// OutgoingTLSConfig generates a TLS configuration for outgoing requests
func (c *Config) OutgoingTLSConfig() (*tls.Config, error) {
// Create the tlsConfig
tlsConfig := &tls.Config{
RootCAs: x509.NewCertPool(),
InsecureSkipVerify: !c.VerifyOutgoing,
}
// Parse the CA cert if any
ca, err := c.CACertificate()
if err != nil {
return nil, err
} else if ca != nil {
tlsConfig.RootCAs.AddCert(ca)
}
// Ensure we have a CA if VerifyOutgoing is set
if c.VerifyOutgoing && ca == nil {
return nil, fmt.Errorf("VerifyOutgoing set, and no CA certificate provided!")
}
// Add cert/key
cert, err := c.KeyPair()
if err != nil {
return nil, err
} else if cert != nil {
tlsConfig.Certificates = []tls.Certificate{*cert}
}
return tlsConfig, nil
}
// IncomingTLSConfig generates a TLS configuration for incoming requests
func (c *Config) IncomingTLSConfig() (*tls.Config, error) {
// Create the tlsConfig
tlsConfig := &tls.Config{
ClientCAs: x509.NewCertPool(),
ClientAuth: tls.NoClientCert,
}
// Parse the CA cert if any
ca, err := c.CACertificate()
if err != nil {
return nil, err
} else if ca != nil {
tlsConfig.ClientCAs.AddCert(ca)
}
// Add cert/key
cert, err := c.KeyPair()
if err != nil {
return nil, err
} else if cert != nil {
tlsConfig.Certificates = []tls.Certificate{*cert}
}
// Check if we require verification
if c.VerifyIncoming {
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
if ca == nil {
return nil, fmt.Errorf("VerifyIncoming set, and no CA certificate provided!")
}
if cert == nil {
return nil, fmt.Errorf("VerifyIncoming set, and no Cert/Key pair provided!")
}
}
return nil, nil
}
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
conf := &Config{
Datacenter: DefaultDC,
NodeName: hostname,
RPCAddr: DefaultRPCAddr,
RaftConfig: raft.DefaultConfig(),
SerfLANConfig: serf.DefaultConfig(),
SerfWANConfig: serf.DefaultConfig(),
ReconcileInterval: 60 * time.Second,
ProtocolVersion: ProtocolVersionMax,
}
// Increase our reap interval to 3 days instead of 24h.
conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour
// WAN Serf should use the WAN timing, since we are using it
// to communicate between DC's
conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()
// Ensure we don't have port conflicts
conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort
// Disable shutdown on removal
conf.RaftConfig.ShutdownOnRemove = false
return conf
}
consul: Fix decoding of certificate
package consul
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"io"
"io/ioutil"
"net"
"os"
"time"
)
const (
DefaultDC = "dc1"
DefaultLANSerfPort = 8301
DefaultWANSerfPort = 8302
)
var (
DefaultRPCAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 8300}
)
// ProtocolVersionMap is the mapping of Consul protocol versions
// to Serf protocol versions. We mask the Serf protocols using
// our own protocol version.
var protocolVersionMap map[uint8]uint8
func init() {
protocolVersionMap = map[uint8]uint8{
1: 4,
}
}
// Config is used to configure the server
type Config struct {
// Bootstrap mode is used to bring up the first Consul server.
// It is required so that it can elect a leader without any
// other nodes being present
Bootstrap bool
// Datacenter is the datacenter this Consul server represents
Datacenter string
// DataDir is the directory to store our state in
DataDir string
// Node name is the name we use to advertise. Defaults to hostname.
NodeName string
// RaftConfig is the configuration used for Raft in the local DC
RaftConfig *raft.Config
// RPCAddr is the RPC address used by Consul. This should be reachable
// by the WAN and LAN
RPCAddr *net.TCPAddr
// RPCAdvertise is the address that is advertised to other nodes for
// the RPC endpoint. This can differ from the RPC address, if for example
// the RPCAddr is unspecified "0.0.0.0:8300", but this address must be
// reachable
RPCAdvertise *net.TCPAddr
// SerfLANConfig is the configuration for the intra-dc serf
SerfLANConfig *serf.Config
// SerfWANConfig is the configuration for the cross-dc serf
SerfWANConfig *serf.Config
// ReconcileInterval controls how often we reconcile the strongly
// consistent store with the Serf info. This is used to handle nodes
// that are force removed, as well as intermittent unavailability during
// leader election.
ReconcileInterval time.Duration
// LogOutput is the location to write logs to. If this is not set,
// logs will go to stderr.
LogOutput io.Writer
// ProtocolVersion is the protocol version to speak. This must be between
// ProtocolVersionMin and ProtocolVersionMax.
ProtocolVersion uint8
// VerifyIncoming is used to verify the authenticity of incoming connections.
// This means that TCP requests are forbidden, only allowing for TLS. TLS connections
// must match a provided certificate authority. This can be used to force client auth.
VerifyIncoming bool
// VerifyOutgoing is used to verify the authenticity of outgoing connections.
// This means that TLS requests are used, and TCP requests are not made. TLS connections
// must match a provided certificate authority. This is used to verify authenticity of
// server nodes.
VerifyOutgoing bool
// CAFile is a path to a certificate authority file. This is used with VerifyIncoming
// or VerifyOutgoing to verify the TLS connection.
CAFile string
// CertFile is used to provide a TLS certificate that is used for serving TLS connections.
// Must be provided to serve TLS connections.
CertFile string
// KeyFile is used to provide a TLS key that is used for serving TLS connections.
// Must be provided to serve TLS connections.
KeyFile string
// ServerUp callback can be used to trigger a notification that
// a Consul server is now up and known about.
ServerUp func()
}
// CheckVersion is used to check if the ProtocolVersion is valid
func (c *Config) CheckVersion() error {
if c.ProtocolVersion < ProtocolVersionMin {
return fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
} else if c.ProtocolVersion > ProtocolVersionMax {
return fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
}
return nil
}
// CACertificate is used to open and parse a CA file
func (c *Config) CACertificate() (*x509.Certificate, error) {
if c.CAFile == "" {
return nil, nil
}
// Read the file
data, err := ioutil.ReadFile(c.CAFile)
if err != nil {
return nil, fmt.Errorf("Failed to read CA file: %v", err)
}
// Decode from the PEM format
block, _ := pem.Decode(data)
if block == nil {
return nil, fmt.Errorf("Failed to decode CA PEM!")
}
// Parse the certificate
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, fmt.Errorf("Failed to parse CA file: %v", err)
}
return cert, nil
}
// KeyPair is used to open and parse a certificate and key file
func (c *Config) KeyPair() (*tls.Certificate, error) {
if c.CertFile == "" || c.KeyFile == "" {
return nil, nil
}
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
return nil, fmt.Errorf("Failed to load cert/key pair: %v", err)
}
return &cert, err
}
// OutgoingTLSConfig generates a TLS configuration for outgoing requests
func (c *Config) OutgoingTLSConfig() (*tls.Config, error) {
// Create the tlsConfig
tlsConfig := &tls.Config{
RootCAs: x509.NewCertPool(),
InsecureSkipVerify: !c.VerifyOutgoing,
}
// Parse the CA cert if any
ca, err := c.CACertificate()
if err != nil {
return nil, err
} else if ca != nil {
tlsConfig.RootCAs.AddCert(ca)
}
// Ensure we have a CA if VerifyOutgoing is set
if c.VerifyOutgoing && ca == nil {
return nil, fmt.Errorf("VerifyOutgoing set, and no CA certificate provided!")
}
// Add cert/key
cert, err := c.KeyPair()
if err != nil {
return nil, err
} else if cert != nil {
tlsConfig.Certificates = []tls.Certificate{*cert}
}
return tlsConfig, nil
}
// IncomingTLSConfig generates a TLS configuration for incoming requests
func (c *Config) IncomingTLSConfig() (*tls.Config, error) {
// Create the tlsConfig
tlsConfig := &tls.Config{
ClientCAs: x509.NewCertPool(),
ClientAuth: tls.NoClientCert,
}
// Parse the CA cert if any
ca, err := c.CACertificate()
if err != nil {
return nil, err
} else if ca != nil {
tlsConfig.ClientCAs.AddCert(ca)
}
// Add cert/key
cert, err := c.KeyPair()
if err != nil {
return nil, err
} else if cert != nil {
tlsConfig.Certificates = []tls.Certificate{*cert}
}
// Check if we require verification
if c.VerifyIncoming {
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
if ca == nil {
return nil, fmt.Errorf("VerifyIncoming set, and no CA certificate provided!")
}
if cert == nil {
return nil, fmt.Errorf("VerifyIncoming set, and no Cert/Key pair provided!")
}
}
return tlsConfig, nil
}
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
conf := &Config{
Datacenter: DefaultDC,
NodeName: hostname,
RPCAddr: DefaultRPCAddr,
RaftConfig: raft.DefaultConfig(),
SerfLANConfig: serf.DefaultConfig(),
SerfWANConfig: serf.DefaultConfig(),
ReconcileInterval: 60 * time.Second,
ProtocolVersion: ProtocolVersionMax,
}
// Increase our reap interval to 3 days instead of 24h.
conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour
// WAN Serf should use the WAN timing, since we are using it
// to communicate between DC's
conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()
// Ensure we don't have port conflicts
conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort
// Disable shutdown on removal
conf.RaftConfig.ShutdownOnRemove = false
return conf
}
|
package consul
import (
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"net"
"strconv"
"time"
)
const (
SerfCheckID = "serfHealth"
SerfCheckName = "Serf Health Status"
ConsulServiceID = "consul"
ConsulServiceName = "consul"
newLeaderEvent = "consul:new-leader"
)
// monitorLeadership is used to monitor if we acquire or lose our role
// as the leader in the Raft cluster. There is some work the leader is
// expected to do, so we must react to changes
func (s *Server) monitorLeadership() {
leaderCh := s.raft.LeaderCh()
var stopCh chan struct{}
for {
select {
case isLeader := <-leaderCh:
if isLeader {
stopCh = make(chan struct{})
go s.leaderLoop(stopCh)
s.logger.Printf("[INFO] consul: cluster leadership acquired")
} else if stopCh != nil {
close(stopCh)
stopCh = nil
s.logger.Printf("[INFO] consul: cluster leadership lost")
}
case <-s.shutdownCh:
return
}
}
}
// leaderLoop runs as long as we are the leader to run various
// maintence activities
func (s *Server) leaderLoop(stopCh chan struct{}) {
// Fire a user event indicating a new leader
payload := []byte(s.config.NodeName)
if err := s.serfLAN.UserEvent(newLeaderEvent, payload, false); err != nil {
s.logger.Printf("[WARN] consul: failed to broadcast new leader event: %v", err)
}
// Reconcile channel is only used once initial reconcile
// has succeeded
var reconcileCh chan serf.Member
RECONCILE:
// Setup a reconciliation timer
reconcileCh = nil
interval := time.After(s.config.ReconcileInterval)
// Apply a raft barrier to ensure our FSM is caught up
start := time.Now()
barrier := s.raft.Barrier(0)
if err := barrier.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err)
goto WAIT
}
metrics.MeasureSince([]string{"consul", "leader", "barrier"}, start)
// Reconcile any missing data
if err := s.reconcile(); err != nil {
s.logger.Printf("[ERR] consul: failed to reconcile: %v", err)
goto WAIT
}
// Initial reconcile worked, now we can process the channel
// updates
reconcileCh = s.reconcileCh
WAIT:
// Periodically reconcile as long as we are the leader,
// or when Serf events arrive
for {
select {
case <-stopCh:
return
case <-s.shutdownCh:
return
case <-interval:
goto RECONCILE
case member := <-reconcileCh:
s.reconcileMember(member)
}
}
}
// reconcile is used to reconcile the differences between Serf
// membership and what is reflected in our strongly consistent store.
// Mainly we need to ensure all live nodes are registered, all failed
// nodes are marked as such, and all left nodes are de-registered.
func (s *Server) reconcile() (err error) {
defer metrics.MeasureSince([]string{"consul", "leader", "reconcile"}, time.Now())
members := s.serfLAN.Members()
knownMembers := make(map[string]struct{})
for _, member := range members {
if err := s.reconcileMember(member); err != nil {
return err
}
knownMembers[member.Name] = struct{}{}
}
// Reconcile any members that have been reaped while we were not the leader
return s.reconcileReaped(knownMembers)
}
// reconcileReaped is used to reconcile nodes that have failed and been reaped
// from Serf but remain in the catalog. This is done by looking for SerfCheckID
// in a crticial state that does not correspond to a known Serf member. We generate
// a "reap" event to cause the node to be cleaned up.
func (s *Server) reconcileReaped(known map[string]struct{}) error {
state := s.fsm.State()
_, critical := state.ChecksInState(structs.HealthCritical)
for _, check := range critical {
// Ignore any non serf checks
if check.CheckID != SerfCheckID {
continue
}
// Check if this node is "known" by serf
if _, ok := known[check.Node]; ok {
continue
}
// Create a fake member
member := serf.Member{
Name: check.Node,
Tags: map[string]string{
"dc": s.config.Datacenter,
"role": "node",
},
}
// Get the node services, look for ConsulServiceID
_, services := state.NodeServices(check.Node)
serverPort := 0
for _, service := range services.Services {
if service.ID == ConsulServiceID {
serverPort = service.Port
break
}
}
// Create the appropriate tags if this was a server node
if serverPort > 0 {
member.Tags["role"] = "consul"
member.Tags["port"] = strconv.FormatUint(uint64(serverPort), 10)
}
// Attempt to reap this member
if err := s.handleReapMember(member); err != nil {
return err
}
}
return nil
}
// reconcileMember is used to do an async reconcile of a single
// serf member
func (s *Server) reconcileMember(member serf.Member) error {
// Check if this is a member we should handle
if !s.shouldHandleMember(member) {
s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member)
return nil
}
defer metrics.MeasureSince([]string{"consul", "leader", "reconcileMember"}, time.Now())
var err error
switch member.Status {
case serf.StatusAlive:
err = s.handleAliveMember(member)
case serf.StatusFailed:
err = s.handleFailedMember(member)
case serf.StatusLeft:
err = s.handleLeftMember(member)
case StatusReap:
err = s.handleReapMember(member)
}
if err != nil {
s.logger.Printf("[ERR] consul: failed to reconcile member: %v: %v",
member, err)
return err
}
return nil
}
// shouldHandleMember checks if this is a Consul pool member
func (s *Server) shouldHandleMember(member serf.Member) bool {
if valid, dc := isConsulNode(member); valid && dc == s.config.Datacenter {
return true
}
if valid, parts := isConsulServer(member); valid && parts.Datacenter == s.config.Datacenter {
return true
}
return false
}
// handleAliveMember is used to ensure the node
// is registered, with a passing health check.
func (s *Server) handleAliveMember(member serf.Member) error {
state := s.fsm.State()
// Register consul service if a server
var service *structs.NodeService
if valid, parts := isConsulServer(member); valid {
service = &structs.NodeService{
ID: ConsulServiceID,
Service: ConsulServiceName,
Port: parts.Port,
}
// Attempt to join the consul server
if err := s.joinConsulServer(member, parts); err != nil {
return err
}
}
// Check if the node exists
_, found, addr := state.GetNode(member.Name)
if found && addr == member.Addr.String() {
// Check if the associated service is available
if service != nil {
match := false
_, services := state.NodeServices(member.Name)
if services != nil {
for id, _ := range services.Services {
if id == service.ID {
match = true
}
}
}
if !match {
goto AFTER_CHECK
}
}
// Check if the serfCheck is in the passing state
_, checks := state.NodeChecks(member.Name)
for _, check := range checks {
if check.CheckID == SerfCheckID && check.Status == structs.HealthPassing {
return nil
}
}
}
AFTER_CHECK:
s.logger.Printf("[INFO] consul: member '%s' joined, marking health alive", member.Name)
// Register with the catalog
req := structs.RegisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
Address: member.Addr.String(),
Service: service,
Check: &structs.HealthCheck{
Node: member.Name,
CheckID: SerfCheckID,
Name: SerfCheckName,
Status: structs.HealthPassing,
},
}
var out struct{}
return s.endpoints.Catalog.Register(&req, &out)
}
// handleFailedMember is used to mark the node's status
// as being critical, along with all checks as unknown.
func (s *Server) handleFailedMember(member serf.Member) error {
state := s.fsm.State()
// Check if the node exists
_, found, addr := state.GetNode(member.Name)
if found && addr == member.Addr.String() {
// Check if the serfCheck is in the critical state
_, checks := state.NodeChecks(member.Name)
for _, check := range checks {
if check.CheckID == SerfCheckID && check.Status == structs.HealthCritical {
return nil
}
}
}
s.logger.Printf("[INFO] consul: member '%s' failed, marking health critical", member.Name)
// Register with the catalog
req := structs.RegisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
Address: member.Addr.String(),
Check: &structs.HealthCheck{
Node: member.Name,
CheckID: SerfCheckID,
Name: SerfCheckName,
Status: structs.HealthCritical,
},
}
var out struct{}
return s.endpoints.Catalog.Register(&req, &out)
}
// handleLeftMember is used to handle members that gracefully
// left. They are deregistered if necessary.
func (s *Server) handleLeftMember(member serf.Member) error {
return s.handleDeregisterMember("left", member)
}
// handleReapMember is used to handle members that have been
// reaped after a prolonged failure. They are deregistered.
func (s *Server) handleReapMember(member serf.Member) error {
return s.handleDeregisterMember("reaped", member)
}
// handleDeregisterMember is used to deregister a member of a given reason
func (s *Server) handleDeregisterMember(reason string, member serf.Member) error {
state := s.fsm.State()
// Check if the node does not exists
_, found, _ := state.GetNode(member.Name)
if !found {
return nil
}
s.logger.Printf("[INFO] consul: member '%s' %s, deregistering", member.Name, reason)
// Remove from Raft peers if this was a server
if valid, parts := isConsulServer(member); valid {
if err := s.removeConsulServer(member, parts.Port); err != nil {
return err
}
}
// Deregister the node
req := structs.DeregisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
}
var out struct{}
return s.endpoints.Catalog.Deregister(&req, &out)
}
// joinConsulServer is used to try to join another consul server
func (s *Server) joinConsulServer(m serf.Member, parts *serverParts) error {
// Do not join ourself
if m.Name == s.config.NodeName {
return nil
}
// Check for possibility of multiple bootstrap nodes
if parts.Bootstrap {
members := s.serfLAN.Members()
for _, member := range members {
valid, p := isConsulServer(member)
if valid && member.Name != m.Name && p.Bootstrap {
s.logger.Printf("[ERR] consul: '%v' and '%v' are both in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer.", m.Name, member.Name)
return nil
}
}
}
// Attempt to add as a peer
var addr net.Addr = &net.TCPAddr{IP: m.Addr, Port: parts.Port}
future := s.raft.AddPeer(addr)
if err := future.Error(); err != nil && err != raft.ErrKnownPeer {
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
return err
}
return nil
}
// removeConsulServer is used to try to remove a consul server that has left
func (s *Server) removeConsulServer(m serf.Member, port int) error {
// Do not remove ourself
if m.Name == s.config.NodeName {
return nil
}
// Attempt to remove as peer
peer := &net.TCPAddr{IP: m.Addr, Port: port}
future := s.raft.RemovePeer(peer)
if err := future.Error(); err != nil && err != raft.ErrUnknownPeer {
s.logger.Printf("[ERR] consul: failed to remove raft peer '%v': %v",
peer, err)
return err
}
return nil
}
consul: Provide output for serfHealth check. Fixes #176.
package consul
import (
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"net"
"strconv"
"time"
)
const (
SerfCheckID = "serfHealth"
SerfCheckName = "Serf Health Status"
SerfCheckAliveOutput = "Agent alive and reachable"
SerfCheckFailedOutput = "Agent not live or unreachable"
ConsulServiceID = "consul"
ConsulServiceName = "consul"
newLeaderEvent = "consul:new-leader"
)
// monitorLeadership is used to monitor if we acquire or lose our role
// as the leader in the Raft cluster. There is some work the leader is
// expected to do, so we must react to changes
func (s *Server) monitorLeadership() {
leaderCh := s.raft.LeaderCh()
var stopCh chan struct{}
for {
select {
case isLeader := <-leaderCh:
if isLeader {
stopCh = make(chan struct{})
go s.leaderLoop(stopCh)
s.logger.Printf("[INFO] consul: cluster leadership acquired")
} else if stopCh != nil {
close(stopCh)
stopCh = nil
s.logger.Printf("[INFO] consul: cluster leadership lost")
}
case <-s.shutdownCh:
return
}
}
}
// leaderLoop runs as long as we are the leader to run various
// maintence activities
func (s *Server) leaderLoop(stopCh chan struct{}) {
// Fire a user event indicating a new leader
payload := []byte(s.config.NodeName)
if err := s.serfLAN.UserEvent(newLeaderEvent, payload, false); err != nil {
s.logger.Printf("[WARN] consul: failed to broadcast new leader event: %v", err)
}
// Reconcile channel is only used once initial reconcile
// has succeeded
var reconcileCh chan serf.Member
RECONCILE:
// Setup a reconciliation timer
reconcileCh = nil
interval := time.After(s.config.ReconcileInterval)
// Apply a raft barrier to ensure our FSM is caught up
start := time.Now()
barrier := s.raft.Barrier(0)
if err := barrier.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err)
goto WAIT
}
metrics.MeasureSince([]string{"consul", "leader", "barrier"}, start)
// Reconcile any missing data
if err := s.reconcile(); err != nil {
s.logger.Printf("[ERR] consul: failed to reconcile: %v", err)
goto WAIT
}
// Initial reconcile worked, now we can process the channel
// updates
reconcileCh = s.reconcileCh
WAIT:
// Periodically reconcile as long as we are the leader,
// or when Serf events arrive
for {
select {
case <-stopCh:
return
case <-s.shutdownCh:
return
case <-interval:
goto RECONCILE
case member := <-reconcileCh:
s.reconcileMember(member)
}
}
}
// reconcile is used to reconcile the differences between Serf
// membership and what is reflected in our strongly consistent store.
// Mainly we need to ensure all live nodes are registered, all failed
// nodes are marked as such, and all left nodes are de-registered.
func (s *Server) reconcile() (err error) {
defer metrics.MeasureSince([]string{"consul", "leader", "reconcile"}, time.Now())
members := s.serfLAN.Members()
knownMembers := make(map[string]struct{})
for _, member := range members {
if err := s.reconcileMember(member); err != nil {
return err
}
knownMembers[member.Name] = struct{}{}
}
// Reconcile any members that have been reaped while we were not the leader
return s.reconcileReaped(knownMembers)
}
// reconcileReaped is used to reconcile nodes that have failed and been reaped
// from Serf but remain in the catalog. This is done by looking for SerfCheckID
// in a crticial state that does not correspond to a known Serf member. We generate
// a "reap" event to cause the node to be cleaned up.
func (s *Server) reconcileReaped(known map[string]struct{}) error {
state := s.fsm.State()
_, critical := state.ChecksInState(structs.HealthCritical)
for _, check := range critical {
// Ignore any non serf checks
if check.CheckID != SerfCheckID {
continue
}
// Check if this node is "known" by serf
if _, ok := known[check.Node]; ok {
continue
}
// Create a fake member
member := serf.Member{
Name: check.Node,
Tags: map[string]string{
"dc": s.config.Datacenter,
"role": "node",
},
}
// Get the node services, look for ConsulServiceID
_, services := state.NodeServices(check.Node)
serverPort := 0
for _, service := range services.Services {
if service.ID == ConsulServiceID {
serverPort = service.Port
break
}
}
// Create the appropriate tags if this was a server node
if serverPort > 0 {
member.Tags["role"] = "consul"
member.Tags["port"] = strconv.FormatUint(uint64(serverPort), 10)
}
// Attempt to reap this member
if err := s.handleReapMember(member); err != nil {
return err
}
}
return nil
}
// reconcileMember is used to do an async reconcile of a single
// serf member
func (s *Server) reconcileMember(member serf.Member) error {
// Check if this is a member we should handle
if !s.shouldHandleMember(member) {
s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member)
return nil
}
defer metrics.MeasureSince([]string{"consul", "leader", "reconcileMember"}, time.Now())
var err error
switch member.Status {
case serf.StatusAlive:
err = s.handleAliveMember(member)
case serf.StatusFailed:
err = s.handleFailedMember(member)
case serf.StatusLeft:
err = s.handleLeftMember(member)
case StatusReap:
err = s.handleReapMember(member)
}
if err != nil {
s.logger.Printf("[ERR] consul: failed to reconcile member: %v: %v",
member, err)
return err
}
return nil
}
// shouldHandleMember checks if this is a Consul pool member
func (s *Server) shouldHandleMember(member serf.Member) bool {
if valid, dc := isConsulNode(member); valid && dc == s.config.Datacenter {
return true
}
if valid, parts := isConsulServer(member); valid && parts.Datacenter == s.config.Datacenter {
return true
}
return false
}
// handleAliveMember is used to ensure the node
// is registered, with a passing health check.
func (s *Server) handleAliveMember(member serf.Member) error {
state := s.fsm.State()
// Register consul service if a server
var service *structs.NodeService
if valid, parts := isConsulServer(member); valid {
service = &structs.NodeService{
ID: ConsulServiceID,
Service: ConsulServiceName,
Port: parts.Port,
}
// Attempt to join the consul server
if err := s.joinConsulServer(member, parts); err != nil {
return err
}
}
// Check if the node exists
_, found, addr := state.GetNode(member.Name)
if found && addr == member.Addr.String() {
// Check if the associated service is available
if service != nil {
match := false
_, services := state.NodeServices(member.Name)
if services != nil {
for id, _ := range services.Services {
if id == service.ID {
match = true
}
}
}
if !match {
goto AFTER_CHECK
}
}
// Check if the serfCheck is in the passing state
_, checks := state.NodeChecks(member.Name)
for _, check := range checks {
if check.CheckID == SerfCheckID && check.Status == structs.HealthPassing {
return nil
}
}
}
AFTER_CHECK:
s.logger.Printf("[INFO] consul: member '%s' joined, marking health alive", member.Name)
// Register with the catalog
req := structs.RegisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
Address: member.Addr.String(),
Service: service,
Check: &structs.HealthCheck{
Node: member.Name,
CheckID: SerfCheckID,
Name: SerfCheckName,
Status: structs.HealthPassing,
Output: SerfCheckAliveOutput,
},
}
var out struct{}
return s.endpoints.Catalog.Register(&req, &out)
}
// handleFailedMember is used to mark the node's status
// as being critical, along with all checks as unknown.
func (s *Server) handleFailedMember(member serf.Member) error {
state := s.fsm.State()
// Check if the node exists
_, found, addr := state.GetNode(member.Name)
if found && addr == member.Addr.String() {
// Check if the serfCheck is in the critical state
_, checks := state.NodeChecks(member.Name)
for _, check := range checks {
if check.CheckID == SerfCheckID && check.Status == structs.HealthCritical {
return nil
}
}
}
s.logger.Printf("[INFO] consul: member '%s' failed, marking health critical", member.Name)
// Register with the catalog
req := structs.RegisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
Address: member.Addr.String(),
Check: &structs.HealthCheck{
Node: member.Name,
CheckID: SerfCheckID,
Name: SerfCheckName,
Status: structs.HealthCritical,
Output: SerfCheckFailedOutput,
},
}
var out struct{}
return s.endpoints.Catalog.Register(&req, &out)
}
// handleLeftMember is used to handle members that gracefully
// left. They are deregistered if necessary.
func (s *Server) handleLeftMember(member serf.Member) error {
return s.handleDeregisterMember("left", member)
}
// handleReapMember is used to handle members that have been
// reaped after a prolonged failure. They are deregistered.
func (s *Server) handleReapMember(member serf.Member) error {
return s.handleDeregisterMember("reaped", member)
}
// handleDeregisterMember is used to deregister a member of a given reason
func (s *Server) handleDeregisterMember(reason string, member serf.Member) error {
state := s.fsm.State()
// Check if the node does not exists
_, found, _ := state.GetNode(member.Name)
if !found {
return nil
}
s.logger.Printf("[INFO] consul: member '%s' %s, deregistering", member.Name, reason)
// Remove from Raft peers if this was a server
if valid, parts := isConsulServer(member); valid {
if err := s.removeConsulServer(member, parts.Port); err != nil {
return err
}
}
// Deregister the node
req := structs.DeregisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
}
var out struct{}
return s.endpoints.Catalog.Deregister(&req, &out)
}
// joinConsulServer is used to try to join another consul server
func (s *Server) joinConsulServer(m serf.Member, parts *serverParts) error {
// Do not join ourself
if m.Name == s.config.NodeName {
return nil
}
// Check for possibility of multiple bootstrap nodes
if parts.Bootstrap {
members := s.serfLAN.Members()
for _, member := range members {
valid, p := isConsulServer(member)
if valid && member.Name != m.Name && p.Bootstrap {
s.logger.Printf("[ERR] consul: '%v' and '%v' are both in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer.", m.Name, member.Name)
return nil
}
}
}
// Attempt to add as a peer
var addr net.Addr = &net.TCPAddr{IP: m.Addr, Port: parts.Port}
future := s.raft.AddPeer(addr)
if err := future.Error(); err != nil && err != raft.ErrKnownPeer {
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
return err
}
return nil
}
// removeConsulServer is used to try to remove a consul server that has left
func (s *Server) removeConsulServer(m serf.Member, port int) error {
// Do not remove ourself
if m.Name == s.config.NodeName {
return nil
}
// Attempt to remove as peer
peer := &net.TCPAddr{IP: m.Addr, Port: port}
future := s.raft.RemovePeer(peer)
if err := future.Error(); err != nil && err != raft.ErrUnknownPeer {
s.logger.Printf("[ERR] consul: failed to remove raft peer '%v': %v",
peer, err)
return err
}
return nil
}
|
// Copyright (c) 2017 Timo Savola. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"math"
)
// Code represents the source or destination of a packet. It is specific to a
// program instance.
type Code int16
const (
CodeServices Code = -1
)
type Domain uint8
const (
DomainCall Domain = iota
DomainInfo
DomainFlow
DomainData
)
// IsStream returns true if the packet is stream-specific (flow or data).
func (dom Domain) IsStream() bool {
return dom&2 != 0
}
func (dom Domain) String() string {
switch dom {
case DomainCall:
return "call"
case DomainInfo:
return "info"
case DomainFlow:
return "flow"
case DomainData:
return "data"
}
return dom.invalidString()
}
const (
Alignment = 8
// Packet header
OffsetSize = 0
OffsetCode = 4
OffsetDomain = 6
OffsetIndex = 7
HeaderSize = 8
// Services packet header
OffsetServicesCount = HeaderSize + 0
ServicesHeaderSize = HeaderSize + 2
// Flow packet header
FlowHeaderSize = HeaderSize
// Data packet header
OffsetDataID = HeaderSize + 0
OffsetDataNote = HeaderSize + 4
DataHeaderSize = HeaderSize + 8
)
const (
flowOffsetID = 0
flowOffsetIncrement = 4
flowSize = 8
)
// Align packet length up to a multiple of packet alignment.
func Align(length int) int {
return (length + (Alignment - 1)) &^ (Alignment - 1)
}
// Buf holds a packet of at least HeaderSize bytes.
type Buf []byte
func Make(code Code, domain Domain, packetSize int) Buf {
b := Buf(make([]byte, packetSize))
b.SetCode(code)
b[OffsetDomain] = byte(domain)
return b
}
func MakeCall(code Code, contentSize int) Buf {
return Make(code, DomainCall, HeaderSize+contentSize)
}
func MakeInfo(code Code, contentSize int) Buf {
return Make(code, DomainInfo, HeaderSize+contentSize)
}
func MakeFlow(code Code, id int32, increment int32) Buf {
b := MakeFlows(code, 1)
b.Set(0, id, increment)
return Buf(b)
}
func MakeFlowEOF(code Code, id int32) Buf {
return MakeFlow(code, id, 0)
}
func MakeDataEOF(code Code, id int32) Buf {
return Buf(MakeData(code, id, 0))
}
// MustBeCall panicks if b is not in the call domain. The value is passed
// through.
func MustBeCall(b Buf) Buf {
if len(b) < HeaderSize || b.Domain() != DomainCall {
panic("not a call packet")
}
return b
}
// MustBeInfo panicks if b is not in the info domain. The value is passed
// through.
func MustBeInfo(b Buf) Buf {
if len(b) < HeaderSize || b.Domain() != DomainInfo {
panic("not an info packet")
}
return b
}
// SetSize encodes the current slice length into the packet header.
func (b Buf) SetSize() {
if n := len(b); n > math.MaxUint32 {
panic(n)
}
binary.LittleEndian.PutUint32(b[OffsetSize:], uint32(len(b)))
}
// EncodedSize decodes the packet header field.
func (b Buf) EncodedSize() int {
return int(binary.LittleEndian.Uint32(b[OffsetSize:]))
}
func (b Buf) Code() Code {
return Code(binary.LittleEndian.Uint16(b[OffsetCode:]))
}
func (b Buf) SetCode(code Code) {
binary.LittleEndian.PutUint16(b[OffsetCode:], uint16(code))
}
func (b Buf) Domain() Domain {
return Domain(b[OffsetDomain] & 15)
}
func (b Buf) Index() uint8 {
return b[OffsetIndex]
}
func (b Buf) SetIndex(i uint8) {
b[OffsetIndex] = i
}
// Content of a received packet, or buffer for initializing sent packet.
func (b Buf) Content() []byte {
return b[HeaderSize:]
}
// Split a packet into two parts. The headerSize parameter determins how many
// bytes are initialized in the second part: the header is copied from the
// first part. The length of the first part is given as the prefixLen
// parameter. If the buffer is too short for the second part, the length of
// the second buffer will be zero.
func (b Buf) Split(headerSize, prefixLen int) (prefix, unused Buf) {
prefixCap := Align(prefixLen)
if prefixCap > len(b) {
prefixCap = len(b)
}
prefix = b[:prefixLen:prefixCap]
unused = b[prefixCap:]
if len(unused) < headerSize {
unused = unused[0:]
return
}
copy(unused, prefix[:headerSize])
return
}
// FlowBuf holds a flow packet of at least FlowHeaderSize bytes.
type FlowBuf Buf
func MakeFlows(code Code, count int) FlowBuf {
return FlowBuf(Make(code, DomainFlow, FlowHeaderSize+count*flowSize))
}
// MustBeFlow panicks if b is not in the flow domain. The value is passed
// through.
func MustBeFlow(b Buf) FlowBuf {
if len(b) < FlowHeaderSize || b.Domain() != DomainFlow {
panic("not a flow packet")
}
return FlowBuf(b)
}
func (b FlowBuf) Code() Code {
return Buf(b).Code()
}
func (b FlowBuf) Num() int {
return (len(b) - FlowHeaderSize) / flowSize
}
func (b FlowBuf) Get(i int) (id, increment int32) {
flow := b[FlowHeaderSize+i*flowSize:]
id = int32(binary.LittleEndian.Uint32(flow[flowOffsetID:]))
increment = int32(binary.LittleEndian.Uint32(flow[flowOffsetIncrement:]))
return
}
func (b FlowBuf) Set(i int, id, increment int32) {
flow := b[FlowHeaderSize+i*flowSize:]
binary.LittleEndian.PutUint32(flow[flowOffsetID:], uint32(id))
binary.LittleEndian.PutUint32(flow[flowOffsetIncrement:], uint32(increment))
}
// DataBuf holds a data packet of at least DataHeaderSize bytes.
type DataBuf Buf
func MakeData(code Code, id int32, dataSize int) DataBuf {
b := Make(code, DomainData, DataHeaderSize+dataSize)
binary.LittleEndian.PutUint32(b[OffsetDataID:], uint32(id))
return DataBuf(b)
}
// MustBeData panicks if b is not in the data domain. The value is passed
// through.
func MustBeData(b Buf) DataBuf {
if len(b) < DataHeaderSize || b.Domain() != DomainData {
panic("not a data packet")
}
return DataBuf(b)
}
func (b DataBuf) Code() Code {
return Buf(b).Code()
}
func (b DataBuf) ID() int32 {
return int32(binary.LittleEndian.Uint32(b[OffsetDataID:]))
}
// Note is a value associated with a data packet. Each service interface
// specifies its semantics separately.
func (b DataBuf) Note() int32 {
return int32(binary.LittleEndian.Uint32(b[OffsetDataNote:]))
}
// SetNote value. It defaults to zero.
func (b DataBuf) SetNote(value int32) {
binary.LittleEndian.PutUint32(b[OffsetDataNote:], uint32(value))
}
func (b DataBuf) Data() []byte {
return b[DataHeaderSize:]
}
func (b DataBuf) DataLen() int {
return len(b) - DataHeaderSize
}
func (b DataBuf) EOF() bool {
return b.DataLen() == 0
}
func (b DataBuf) Split(dataLen int) (prefix Buf, unused DataBuf) {
prefix, unusedBuf := Buf(b).Split(DataHeaderSize, DataHeaderSize+dataLen)
unused = DataBuf(unusedBuf)
return
}
packet: remove Domain.IsStream() method
Now that the service registry takes care of unexpected stream packet
handling, only service implementations which actually do streaming need to
look at such packets, and they need need to do so in more detail anyway.
// Copyright (c) 2017 Timo Savola. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"math"
)
// Code represents the source or destination of a packet. It is specific to a
// program instance.
type Code int16
const (
CodeServices Code = -1
)
type Domain uint8
const (
DomainCall Domain = iota
DomainInfo
DomainFlow
DomainData
)
func (dom Domain) String() string {
switch dom {
case DomainCall:
return "call"
case DomainInfo:
return "info"
case DomainFlow:
return "flow"
case DomainData:
return "data"
}
return dom.invalidString()
}
const (
Alignment = 8
// Packet header
OffsetSize = 0
OffsetCode = 4
OffsetDomain = 6
OffsetIndex = 7
HeaderSize = 8
// Services packet header
OffsetServicesCount = HeaderSize + 0
ServicesHeaderSize = HeaderSize + 2
// Flow packet header
FlowHeaderSize = HeaderSize
// Data packet header
OffsetDataID = HeaderSize + 0
OffsetDataNote = HeaderSize + 4
DataHeaderSize = HeaderSize + 8
)
const (
flowOffsetID = 0
flowOffsetIncrement = 4
flowSize = 8
)
// Align packet length up to a multiple of packet alignment.
func Align(length int) int {
return (length + (Alignment - 1)) &^ (Alignment - 1)
}
// Buf holds a packet of at least HeaderSize bytes.
type Buf []byte
func Make(code Code, domain Domain, packetSize int) Buf {
b := Buf(make([]byte, packetSize))
b.SetCode(code)
b[OffsetDomain] = byte(domain)
return b
}
func MakeCall(code Code, contentSize int) Buf {
return Make(code, DomainCall, HeaderSize+contentSize)
}
func MakeInfo(code Code, contentSize int) Buf {
return Make(code, DomainInfo, HeaderSize+contentSize)
}
func MakeFlow(code Code, id int32, increment int32) Buf {
b := MakeFlows(code, 1)
b.Set(0, id, increment)
return Buf(b)
}
func MakeFlowEOF(code Code, id int32) Buf {
return MakeFlow(code, id, 0)
}
func MakeDataEOF(code Code, id int32) Buf {
return Buf(MakeData(code, id, 0))
}
// MustBeCall panicks if b is not in the call domain. The value is passed
// through.
func MustBeCall(b Buf) Buf {
if len(b) < HeaderSize || b.Domain() != DomainCall {
panic("not a call packet")
}
return b
}
// MustBeInfo panicks if b is not in the info domain. The value is passed
// through.
func MustBeInfo(b Buf) Buf {
if len(b) < HeaderSize || b.Domain() != DomainInfo {
panic("not an info packet")
}
return b
}
// SetSize encodes the current slice length into the packet header.
func (b Buf) SetSize() {
if n := len(b); n > math.MaxUint32 {
panic(n)
}
binary.LittleEndian.PutUint32(b[OffsetSize:], uint32(len(b)))
}
// EncodedSize decodes the packet header field.
func (b Buf) EncodedSize() int {
return int(binary.LittleEndian.Uint32(b[OffsetSize:]))
}
func (b Buf) Code() Code {
return Code(binary.LittleEndian.Uint16(b[OffsetCode:]))
}
func (b Buf) SetCode(code Code) {
binary.LittleEndian.PutUint16(b[OffsetCode:], uint16(code))
}
func (b Buf) Domain() Domain {
return Domain(b[OffsetDomain] & 15)
}
func (b Buf) Index() uint8 {
return b[OffsetIndex]
}
func (b Buf) SetIndex(i uint8) {
b[OffsetIndex] = i
}
// Content of a received packet, or buffer for initializing sent packet.
func (b Buf) Content() []byte {
return b[HeaderSize:]
}
// Split a packet into two parts. The headerSize parameter determins how many
// bytes are initialized in the second part: the header is copied from the
// first part. The length of the first part is given as the prefixLen
// parameter. If the buffer is too short for the second part, the length of
// the second buffer will be zero.
func (b Buf) Split(headerSize, prefixLen int) (prefix, unused Buf) {
prefixCap := Align(prefixLen)
if prefixCap > len(b) {
prefixCap = len(b)
}
prefix = b[:prefixLen:prefixCap]
unused = b[prefixCap:]
if len(unused) < headerSize {
unused = unused[0:]
return
}
copy(unused, prefix[:headerSize])
return
}
// FlowBuf holds a flow packet of at least FlowHeaderSize bytes.
type FlowBuf Buf
func MakeFlows(code Code, count int) FlowBuf {
return FlowBuf(Make(code, DomainFlow, FlowHeaderSize+count*flowSize))
}
// MustBeFlow panicks if b is not in the flow domain. The value is passed
// through.
func MustBeFlow(b Buf) FlowBuf {
if len(b) < FlowHeaderSize || b.Domain() != DomainFlow {
panic("not a flow packet")
}
return FlowBuf(b)
}
func (b FlowBuf) Code() Code {
return Buf(b).Code()
}
func (b FlowBuf) Num() int {
return (len(b) - FlowHeaderSize) / flowSize
}
func (b FlowBuf) Get(i int) (id, increment int32) {
flow := b[FlowHeaderSize+i*flowSize:]
id = int32(binary.LittleEndian.Uint32(flow[flowOffsetID:]))
increment = int32(binary.LittleEndian.Uint32(flow[flowOffsetIncrement:]))
return
}
func (b FlowBuf) Set(i int, id, increment int32) {
flow := b[FlowHeaderSize+i*flowSize:]
binary.LittleEndian.PutUint32(flow[flowOffsetID:], uint32(id))
binary.LittleEndian.PutUint32(flow[flowOffsetIncrement:], uint32(increment))
}
// DataBuf holds a data packet of at least DataHeaderSize bytes.
type DataBuf Buf
func MakeData(code Code, id int32, dataSize int) DataBuf {
b := Make(code, DomainData, DataHeaderSize+dataSize)
binary.LittleEndian.PutUint32(b[OffsetDataID:], uint32(id))
return DataBuf(b)
}
// MustBeData panicks if b is not in the data domain. The value is passed
// through.
func MustBeData(b Buf) DataBuf {
if len(b) < DataHeaderSize || b.Domain() != DomainData {
panic("not a data packet")
}
return DataBuf(b)
}
func (b DataBuf) Code() Code {
return Buf(b).Code()
}
func (b DataBuf) ID() int32 {
return int32(binary.LittleEndian.Uint32(b[OffsetDataID:]))
}
// Note is a value associated with a data packet. Each service interface
// specifies its semantics separately.
func (b DataBuf) Note() int32 {
return int32(binary.LittleEndian.Uint32(b[OffsetDataNote:]))
}
// SetNote value. It defaults to zero.
func (b DataBuf) SetNote(value int32) {
binary.LittleEndian.PutUint32(b[OffsetDataNote:], uint32(value))
}
func (b DataBuf) Data() []byte {
return b[DataHeaderSize:]
}
func (b DataBuf) DataLen() int {
return len(b) - DataHeaderSize
}
func (b DataBuf) EOF() bool {
return b.DataLen() == 0
}
func (b DataBuf) Split(dataLen int) (prefix Buf, unused DataBuf) {
prefix, unusedBuf := Buf(b).Split(DataHeaderSize, DataHeaderSize+dataLen)
unused = DataBuf(unusedBuf)
return
}
|
package services
import (
"os"
"encoding/json"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/vito/cmdtest/matchers"
. "github.com/pivotal-cf-experimental/cf-test-helpers/generator"
. "github.com/pivotal-cf-experimental/cf-test-helpers/cf"
. "github.com/pivotal-cf-experimental/cf-acceptance-tests/helpers"
)
type ServicesResponse struct {
Resources []ServiceResponse
}
type ServiceResponse struct {
Entity struct {
Label string
ServicePlans []ServicePlanResponse `json:"service_plans"`
}
}
type ServicePlanResponse struct {
Entity struct {
Name string
Public bool
}
Metadata struct {
Url string
}
}
var _ = Describe("Service Broker Lifecycle", func() {
var appName string
BeforeEach(func() {
appName = RandomName()
Cf("login", "-u", os.Getenv("ADMIN_USER"), "-p", os.Getenv("ADMIN_PASSWORD"), "-o", os.Getenv("CF_ORG"), "-s", os.Getenv("CF_SPACE"))
Expect(Cf("push", appName, "-p", serviceBrokerPath)).To(Say("App started"))
configJSON, _ := json.Marshal(ServiceBrokerConfig)
Expect(Cf("set-env", appName, "CONFIG", string(configJSON))).To(ExitWithTimeout(0, 2*time.Second))
Expect(Cf("restart", appName)).To(Say("App started"))
})
AfterEach(func() {
Expect(Cf("delete-service-broker", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
Expect(Cf("delete", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
Cf("login", "-u", os.Getenv("CF_USER"), "-p", os.Getenv("CF_USER_PASSWORD"), "-o", os.Getenv("CF_ORG"), "-s", os.Getenv("CF_SPACE"))
})
It("confirms correct behavior in the lifecycle of a service broker", func() {
defer Recover() // Catches panic thrown by Require expectations
// Adding the service broker
Require(Cf("create-service-broker", appName, "username", "password", AppUri(appName, ""))).To(ExitWithTimeout(0, 30*time.Second))
Expect(Cf("service-brokers")).To(Say(appName))
// Confirming the plans are not yet public
session := Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
// Making the plans public
session = Cf("curl", "/v2/services?inline-relations-depth=1")
structure := ServicesResponse{}
json.Unmarshal(session.FullOutput(), &structure)
for _, service := range structure.Resources {
if service.Entity.Label == ServiceBrokerConfig.FirstBrokerServiceLabel {
for _, plan := range service.Entity.ServicePlans {
if plan.Entity.Name == ServiceBrokerConfig.FirstBrokerPlanName {
MakePlanPublic(plan.Metadata.Url)
break
}
}
}
}
// Confirming plans show up in the marketplace
session = Cf("marketplace")
Expect(session).To(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).To(Say(ServiceBrokerConfig.FirstBrokerPlanName))
// Changing the catalog on the broker
Eventually(Curling(AppUri(appName,"/v2/catalog"), "-X", "POST", "-i")).Should(Say("HTTP/1.1 200 OK"))
Require(Cf("update-service-broker", appName, "username", "password", AppUri(appName, ""))).To(ExitWithTimeout(0, 30*time.Second))
// Confirming the changes to the broker show up in the marketplace
session = Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
Expect(session).To(Say(ServiceBrokerConfig.SecondBrokerServiceLabel))
Expect(session).To(Say(ServiceBrokerConfig.SecondBrokerPlanName))
// Deleting the service broker and confirming the plans no longer display
Require(Cf("delete-service-broker", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
session = Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
Expect(session).NotTo(Say(ServiceBrokerConfig.SecondBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.SecondBrokerPlanName))
})
})
Marking the service test as pending, again
package services
import (
"os"
"encoding/json"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/vito/cmdtest/matchers"
. "github.com/pivotal-cf-experimental/cf-test-helpers/generator"
. "github.com/pivotal-cf-experimental/cf-test-helpers/cf"
. "github.com/pivotal-cf-experimental/cf-acceptance-tests/helpers"
)
type ServicesResponse struct {
Resources []ServiceResponse
}
type ServiceResponse struct {
Entity struct {
Label string
ServicePlans []ServicePlanResponse `json:"service_plans"`
}
}
type ServicePlanResponse struct {
Entity struct {
Name string
Public bool
}
Metadata struct {
Url string
}
}
var _ = PDescribe("Service Broker Lifecycle", func() {
var appName string
BeforeEach(func() {
appName = RandomName()
Cf("login", "-u", os.Getenv("ADMIN_USER"), "-p", os.Getenv("ADMIN_PASSWORD"), "-o", os.Getenv("CF_ORG"), "-s", os.Getenv("CF_SPACE"))
Expect(Cf("push", appName, "-p", serviceBrokerPath)).To(Say("App started"))
configJSON, _ := json.Marshal(ServiceBrokerConfig)
Expect(Cf("set-env", appName, "CONFIG", string(configJSON))).To(ExitWithTimeout(0, 2*time.Second))
Expect(Cf("restart", appName)).To(Say("App started"))
})
AfterEach(func() {
Expect(Cf("delete-service-broker", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
Expect(Cf("delete", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
Cf("login", "-u", os.Getenv("CF_USER"), "-p", os.Getenv("CF_USER_PASSWORD"), "-o", os.Getenv("CF_ORG"), "-s", os.Getenv("CF_SPACE"))
})
It("confirms correct behavior in the lifecycle of a service broker", func() {
defer Recover() // Catches panic thrown by Require expectations
// Adding the service broker
Require(Cf("create-service-broker", appName, "username", "password", AppUri(appName, ""))).To(ExitWithTimeout(0, 30*time.Second))
Expect(Cf("service-brokers")).To(Say(appName))
// Confirming the plans are not yet public
session := Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
// Making the plans public
session = Cf("curl", "/v2/services?inline-relations-depth=1")
structure := ServicesResponse{}
json.Unmarshal(session.FullOutput(), &structure)
for _, service := range structure.Resources {
if service.Entity.Label == ServiceBrokerConfig.FirstBrokerServiceLabel {
for _, plan := range service.Entity.ServicePlans {
if plan.Entity.Name == ServiceBrokerConfig.FirstBrokerPlanName {
MakePlanPublic(plan.Metadata.Url)
break
}
}
}
}
// Confirming plans show up in the marketplace
session = Cf("marketplace")
Expect(session).To(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).To(Say(ServiceBrokerConfig.FirstBrokerPlanName))
// Changing the catalog on the broker
Eventually(Curling(AppUri(appName,"/v2/catalog"), "-X", "POST", "-i")).Should(Say("HTTP/1.1 200 OK"))
Require(Cf("update-service-broker", appName, "username", "password", AppUri(appName, ""))).To(ExitWithTimeout(0, 30*time.Second))
// Confirming the changes to the broker show up in the marketplace
session = Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
Expect(session).To(Say(ServiceBrokerConfig.SecondBrokerServiceLabel))
Expect(session).To(Say(ServiceBrokerConfig.SecondBrokerPlanName))
// Deleting the service broker and confirming the plans no longer display
Require(Cf("delete-service-broker", appName, "-f")).To(ExitWithTimeout(0, 2*time.Second))
session = Cf("marketplace")
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.FirstBrokerPlanName))
Expect(session).NotTo(Say(ServiceBrokerConfig.SecondBrokerServiceLabel))
Expect(session).NotTo(Say(ServiceBrokerConfig.SecondBrokerPlanName))
})
})
|
/*
Package arrow provides C-style date formating and parsing, along with other date goodies.
See the github project page at http://github.com/bmuller/arrow for more info.
*/
package arrow
import (
"strconv"
"strings"
"time"
)
type Arrow struct {
time.Time
}
// Like time's constants, but with Day and Week
const (
Nanosecond time.Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
)
func New(t time.Time) Arrow {
return Arrow{t}
}
func UTC() Arrow {
return New(time.Now().UTC())
}
func Now() Arrow {
return New(time.Now())
}
func Yesterday() Arrow {
return Now().Yesterday()
}
func Tomorrow() Arrow {
return Now().Tomorrow()
}
func NextMinute() Arrow {
return Now().AddMinutes(1).AtBeginningOfMinute()
}
func NextHour() Arrow {
return Now().AddHours(1).AtBeginningOfHour()
}
func NextDay() Arrow {
return Now().AddDays(1).AtBeginningOfDay()
}
func SleepUntil(t Arrow) {
time.Sleep(t.Sub(Now()))
}
// Get the current time in the given timezone.
// The timezone parameter should correspond to a file in the IANA Time Zone database,
// such as "America/New_York". "UTC" and "Local" are also acceptable. If the timezone
// given isn't valid, then no change to the timezone is made.
func InTimezone(timezone string) Arrow {
return Now().InTimezone(timezone)
}
func (a Arrow) Before(b Arrow) bool {
return a.Time.Before(b.Time)
}
func (a Arrow) After(b Arrow) bool {
return a.Time.After(b.Time)
}
func (a Arrow) Equal(b Arrow) bool {
return a.Time.Equal(b.Time)
}
// Return an array of Arrow's from this one up to the given one,
// by duration. For instance, Now().UpTo(Tomorrow(), Hour)
// will return an array of Arrow's from now until tomorrow by
// hour (inclusive of a and b).
func (a Arrow) UpTo(b Arrow, by time.Duration) []Arrow {
var result []Arrow
if a.After(b) {
a, b = b, a
}
for a.Before(b) || a.Equal(b) {
result = append(result, a)
a = a.Add(by)
}
return result
}
func (a Arrow) Yesterday() Arrow {
return a.AddDays(-1)
}
func (a Arrow) Tomorrow() Arrow {
return a.AddDays(1)
}
func (a Arrow) UTC() Arrow {
return New(a.Time.UTC())
}
func (a Arrow) Sub(b Arrow) time.Duration {
return a.Time.Sub(b.Time)
}
// Add any duration parseable by time.ParseDuration
func (a Arrow) AddDuration(duration string) Arrow {
if pduration, err := time.ParseDuration(duration); err == nil {
return a.Add(pduration)
}
return a
}
func (a Arrow) Add(d time.Duration) Arrow {
return New(a.Time.Add(d))
}
// The timezone parameter should correspond to a file in the IANA Time Zone database,
// such as "America/New_York". "UTC" and "Local" are also acceptable. If the timezone
// given isn't valid, then no change to the timezone is made.
func (a Arrow) InTimezone(timezone string) Arrow {
if location, err := time.LoadLocation(timezone); err == nil {
return New(a.In(location))
}
return a
}
func (a Arrow) AddDays(days int) Arrow {
return New(a.AddDate(0, 0, days))
}
func (a Arrow) AddHours(hours int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour+hours, min, sec, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AddMinutes(minutes int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour, min+minutes, sec, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AddSeconds(seconds int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour, min, sec+seconds, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AtBeginningOfMinute() Arrow {
return New(a.Truncate(Minute))
}
func (a Arrow) AtBeginningOfHour() Arrow {
return New(a.Truncate(Hour))
}
func (a Arrow) AtBeginningOfDay() Arrow {
d := time.Duration(-a.Hour()) * Hour
return a.AtBeginningOfHour().Add(d)
}
func (a Arrow) AtBeginningOfWeek() Arrow {
days := time.Duration(-1*int(a.Weekday())) * Day
return a.AtBeginningOfDay().Add(days)
}
func (a Arrow) AtBeginningOfMonth() Arrow {
days := time.Duration(-1*int(a.Day())+1) * Day
return a.AtBeginningOfDay().Add(days)
}
func (a Arrow) AtBeginningOfYear() Arrow {
days := time.Duration(-1*int(a.YearDay())+1) * Day
return a.AtBeginningOfDay().Add(days)
}
// Add any durations parseable by time.ParseDuration
func (a Arrow) AddDurations(durations ...string) Arrow {
for _, duration := range durations {
a = a.AddDuration(duration)
}
return a
}
func formatConvert(format string) string {
// create mapping from strftime to time in Go
strftimeMapping := map[string]string{
"%a": "Mon",
"%A": "Monday",
"%b": "Jan",
"%B": "January",
"%c": "", // locale not supported
"%C": "06",
"%d": "02",
"%D": "01/02/06",
"%e": "_2",
"%E": "", // modifiers not supported
"%F": "2006-01-02",
"%G": "%G", // special case, see below
"%g": "%g", // special case, see below
"%h": "Jan",
"%H": "15",
"%I": "03",
"%j": "%j", // special case, see below
"%k": "%k", // special case, see below
"%l": "_3",
"%m": "01",
"%M": "04",
"%n": "\n",
"%O": "", // modifiers not supported
"%p": "PM",
"%P": "pm",
"%r": "03:04:05 PM",
"%R": "15:04",
"%s": "%s", // special case, see below
"%S": "05",
"%t": "\t",
"%T": "15:04:05",
"%u": "%u", // special case, see below
"%U": "%U", // special case, see below
"%V": "%V", // special case, see below
"%w": "%w", // special case, see below
"%W": "%W", // special case, see below
"%x": "%x", // locale not supported
"%X": "%X", // locale not supported
"%y": "06",
"%Y": "2006",
"%z": "-0700",
"%Z": "MST",
"%+": "Mon Jan _2 15:04:05 MST 2006",
"%%": "%%", // special case, see below
}
for fmt, conv := range strftimeMapping {
format = strings.Replace(format, fmt, conv, -1)
}
return format
}
// Parse the time using the same format string types as strftime
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func CParse(layout, value string) (Arrow, error) {
t, e := time.Parse(formatConvert(layout), value)
return New(t), e
}
// Parse the time using the same format string types as strftime,
// within the given location.
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func CParseInLocation(layout, value string, loc *time.Location) (Arrow, error) {
t, e := time.ParseInLocation(formatConvert(layout), value, loc)
return New(t), e
}
// Format the time using the same format string types as strftime.
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func (a Arrow) CFormat(format string) string {
format = a.Format(formatConvert(format))
year, week := a.ISOWeek()
yearday := a.YearDay()
weekday := a.Weekday()
syear := strconv.Itoa(year)
sweek := strconv.Itoa(week)
syearday := strconv.Itoa(yearday)
sweekday := strconv.Itoa(int(weekday))
if a.Year() > 999 {
format = strings.Replace(format, "%G", syear, -1)
format = strings.Replace(format, "%g", syear[2:4], -1)
}
format = strings.Replace(format, "%j", syearday, -1)
if a.Hour() < 10 {
shour := " " + strconv.Itoa(a.Hour())
format = strings.Replace(format, "%k", shour, -1)
}
format = strings.Replace(format, "%s", strconv.FormatInt(a.Unix(), 10), -1)
if weekday == 0 {
format = strings.Replace(format, "%u", "7", -1)
} else {
format = strings.Replace(format, "%u", sweekday, -1)
}
format = strings.Replace(format, "%U", weekNumber(a, time.Sunday), -1)
format = strings.Replace(format, "%U", sweek, -1)
format = strings.Replace(format, "%w", sweekday, -1)
format = strings.Replace(format, "%W", weekNumber(a, time.Monday), -1)
return strings.Replace(format, "%%", "%", -1)
}
// Used for %U and %W:
// %U: The week number of the current year as a decimal number, range
// 00 to 53, starting with the first Sunday as the first day of week 01.
//
// %W: The week number of the current year as a decimal number, range
// 00 to 53, starting with the first Monday as the first day of week 01.
func weekNumber(a Arrow, firstday time.Weekday) string {
dayone := a.AtBeginningOfYear()
for dayone.Weekday() != time.Sunday {
dayone = dayone.AddDays(1)
}
week := int(a.Sub(dayone.AddDays(-7)) / Week)
return strconv.Itoa(week)
}
added Unix function
/*
Package arrow provides C-style date formating and parsing, along with other date goodies.
See the github project page at http://github.com/bmuller/arrow for more info.
*/
package arrow
import (
"strconv"
"strings"
"time"
)
type Arrow struct {
time.Time
}
// Like time's constants, but with Day and Week
const (
Nanosecond time.Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
)
func New(t time.Time) Arrow {
return Arrow{t}
}
func UTC() Arrow {
return New(time.Now().UTC())
}
func Unix(sec int64, nsec int64) Arrow {
return New(time.Unix(sec, nsec))
}
func Now() Arrow {
return New(time.Now())
}
func Yesterday() Arrow {
return Now().Yesterday()
}
func Tomorrow() Arrow {
return Now().Tomorrow()
}
func NextMinute() Arrow {
return Now().AddMinutes(1).AtBeginningOfMinute()
}
func NextHour() Arrow {
return Now().AddHours(1).AtBeginningOfHour()
}
func NextDay() Arrow {
return Now().AddDays(1).AtBeginningOfDay()
}
func SleepUntil(t Arrow) {
time.Sleep(t.Sub(Now()))
}
// Get the current time in the given timezone.
// The timezone parameter should correspond to a file in the IANA Time Zone database,
// such as "America/New_York". "UTC" and "Local" are also acceptable. If the timezone
// given isn't valid, then no change to the timezone is made.
func InTimezone(timezone string) Arrow {
return Now().InTimezone(timezone)
}
func (a Arrow) Before(b Arrow) bool {
return a.Time.Before(b.Time)
}
func (a Arrow) After(b Arrow) bool {
return a.Time.After(b.Time)
}
func (a Arrow) Equal(b Arrow) bool {
return a.Time.Equal(b.Time)
}
// Return an array of Arrow's from this one up to the given one,
// by duration. For instance, Now().UpTo(Tomorrow(), Hour)
// will return an array of Arrow's from now until tomorrow by
// hour (inclusive of a and b).
func (a Arrow) UpTo(b Arrow, by time.Duration) []Arrow {
var result []Arrow
if a.After(b) {
a, b = b, a
}
for a.Before(b) || a.Equal(b) {
result = append(result, a)
a = a.Add(by)
}
return result
}
func (a Arrow) Yesterday() Arrow {
return a.AddDays(-1)
}
func (a Arrow) Tomorrow() Arrow {
return a.AddDays(1)
}
func (a Arrow) UTC() Arrow {
return New(a.Time.UTC())
}
func (a Arrow) Sub(b Arrow) time.Duration {
return a.Time.Sub(b.Time)
}
// Add any duration parseable by time.ParseDuration
func (a Arrow) AddDuration(duration string) Arrow {
if pduration, err := time.ParseDuration(duration); err == nil {
return a.Add(pduration)
}
return a
}
func (a Arrow) Add(d time.Duration) Arrow {
return New(a.Time.Add(d))
}
// The timezone parameter should correspond to a file in the IANA Time Zone database,
// such as "America/New_York". "UTC" and "Local" are also acceptable. If the timezone
// given isn't valid, then no change to the timezone is made.
func (a Arrow) InTimezone(timezone string) Arrow {
if location, err := time.LoadLocation(timezone); err == nil {
return New(a.In(location))
}
return a
}
func (a Arrow) AddDays(days int) Arrow {
return New(a.AddDate(0, 0, days))
}
func (a Arrow) AddHours(hours int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour+hours, min, sec, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AddMinutes(minutes int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour, min+minutes, sec, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AddSeconds(seconds int) Arrow {
year, month, day := a.Time.Date()
hour, min, sec := a.Time.Clock()
d := time.Date(year, month, day, hour, min, sec+seconds, a.Nanosecond(), a.Location())
return New(d)
}
func (a Arrow) AtBeginningOfMinute() Arrow {
return New(a.Truncate(Minute))
}
func (a Arrow) AtBeginningOfHour() Arrow {
return New(a.Truncate(Hour))
}
func (a Arrow) AtBeginningOfDay() Arrow {
d := time.Duration(-a.Hour()) * Hour
return a.AtBeginningOfHour().Add(d)
}
func (a Arrow) AtBeginningOfWeek() Arrow {
days := time.Duration(-1*int(a.Weekday())) * Day
return a.AtBeginningOfDay().Add(days)
}
func (a Arrow) AtBeginningOfMonth() Arrow {
days := time.Duration(-1*int(a.Day())+1) * Day
return a.AtBeginningOfDay().Add(days)
}
func (a Arrow) AtBeginningOfYear() Arrow {
days := time.Duration(-1*int(a.YearDay())+1) * Day
return a.AtBeginningOfDay().Add(days)
}
// Add any durations parseable by time.ParseDuration
func (a Arrow) AddDurations(durations ...string) Arrow {
for _, duration := range durations {
a = a.AddDuration(duration)
}
return a
}
func formatConvert(format string) string {
// create mapping from strftime to time in Go
strftimeMapping := map[string]string{
"%a": "Mon",
"%A": "Monday",
"%b": "Jan",
"%B": "January",
"%c": "", // locale not supported
"%C": "06",
"%d": "02",
"%D": "01/02/06",
"%e": "_2",
"%E": "", // modifiers not supported
"%F": "2006-01-02",
"%G": "%G", // special case, see below
"%g": "%g", // special case, see below
"%h": "Jan",
"%H": "15",
"%I": "03",
"%j": "%j", // special case, see below
"%k": "%k", // special case, see below
"%l": "_3",
"%m": "01",
"%M": "04",
"%n": "\n",
"%O": "", // modifiers not supported
"%p": "PM",
"%P": "pm",
"%r": "03:04:05 PM",
"%R": "15:04",
"%s": "%s", // special case, see below
"%S": "05",
"%t": "\t",
"%T": "15:04:05",
"%u": "%u", // special case, see below
"%U": "%U", // special case, see below
"%V": "%V", // special case, see below
"%w": "%w", // special case, see below
"%W": "%W", // special case, see below
"%x": "%x", // locale not supported
"%X": "%X", // locale not supported
"%y": "06",
"%Y": "2006",
"%z": "-0700",
"%Z": "MST",
"%+": "Mon Jan _2 15:04:05 MST 2006",
"%%": "%%", // special case, see below
}
for fmt, conv := range strftimeMapping {
format = strings.Replace(format, fmt, conv, -1)
}
return format
}
// Parse the time using the same format string types as strftime
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func CParse(layout, value string) (Arrow, error) {
t, e := time.Parse(formatConvert(layout), value)
return New(t), e
}
// Parse the time using the same format string types as strftime,
// within the given location.
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func CParseInLocation(layout, value string, loc *time.Location) (Arrow, error) {
t, e := time.ParseInLocation(formatConvert(layout), value, loc)
return New(t), e
}
// Format the time using the same format string types as strftime.
// See http://man7.org/linux/man-pages/man3/strftime.3.html for more info.
func (a Arrow) CFormat(format string) string {
format = a.Format(formatConvert(format))
year, week := a.ISOWeek()
yearday := a.YearDay()
weekday := a.Weekday()
syear := strconv.Itoa(year)
sweek := strconv.Itoa(week)
syearday := strconv.Itoa(yearday)
sweekday := strconv.Itoa(int(weekday))
if a.Year() > 999 {
format = strings.Replace(format, "%G", syear, -1)
format = strings.Replace(format, "%g", syear[2:4], -1)
}
format = strings.Replace(format, "%j", syearday, -1)
if a.Hour() < 10 {
shour := " " + strconv.Itoa(a.Hour())
format = strings.Replace(format, "%k", shour, -1)
}
format = strings.Replace(format, "%s", strconv.FormatInt(a.Unix(), 10), -1)
if weekday == 0 {
format = strings.Replace(format, "%u", "7", -1)
} else {
format = strings.Replace(format, "%u", sweekday, -1)
}
format = strings.Replace(format, "%U", weekNumber(a, time.Sunday), -1)
format = strings.Replace(format, "%U", sweek, -1)
format = strings.Replace(format, "%w", sweekday, -1)
format = strings.Replace(format, "%W", weekNumber(a, time.Monday), -1)
return strings.Replace(format, "%%", "%", -1)
}
// Used for %U and %W:
// %U: The week number of the current year as a decimal number, range
// 00 to 53, starting with the first Sunday as the first day of week 01.
//
// %W: The week number of the current year as a decimal number, range
// 00 to 53, starting with the first Monday as the first day of week 01.
func weekNumber(a Arrow, firstday time.Weekday) string {
dayone := a.AtBeginningOfYear()
for dayone.Weekday() != time.Sunday {
dayone = dayone.AddDays(1)
}
week := int(a.Sub(dayone.AddDays(-7)) / Week)
return strconv.Itoa(week)
}
|
package parser
import (
"../configuration"
"os/exec"
"strings"
"regexp"
"fmt"
"os"
)
type Tag struct {
Label string
Value string
}
var Tags []Tag
/**
* @brief Take the command string as input and return
* the list of commands to be executed, after
* having replaced the tags (like "$main", or
* "$path") to their corresponding values
*
* @param label The command string (ie : "compile" or
* "run")
*
* @return { An array of shell command objects }
*/
func GetCommandArrFromInput(label string) []*exec.Cmd {
//read config files
conf.Init()
//look for command in schemes
content := conf.GetScheme(label)
//create a slice that will contain the final
// list of commands
var commands []*exec.Cmd
for _, cmd := range content {
cmd = ParseTags(cmd)
cmd = ParseDollarParams(cmd)
arr := SplitCommand(cmd)
//logging
//fmt.Println(len(arr), arr)
//create the command
tmp := exec.Command(arr[0], arr[1:]...)
//append it to the slice
commands = append(commands, tmp)
}
return commands
}
func InitTags() {
//declare each Tag
Tags = []Tag {
Tag {
Label : "$main",
Value : conf.GetMainPath(),
},
Tag {
Label : "$path",
Value : conf.GetProjectRoot(),
},
Tag {
Label : "$name",
Value : conf.GetName(),
},
}
}
//replace tags in command
func ParseTags(command string) string {
InitTags()
for _, tag := range Tags {
command = strings.Replace(command, tag.Label, tag.Value, -1)
}
return command
}
//iterates through array to replace $1..$9 with real $1..$9
// (like shell)
func ParseDollarParams(command string) string {
for i := 1; i <= 9; i++ {
sel := fmt.Sprintf("$%d", i)
if(strings.Index(command, sel) != -1) {
command = strings.Replace(command, sel, os.Args[i+1], -1)
}
}
return command
}
func SplitCommand(command string) []string {
//split into array using regexp (to let quoted string be 1 arg, as in shell)
delimeter := "[^\\s\"']+|\"([^\"]*)\"|'([^']*)'"
reg := regexp.MustCompile(delimeter)
arr := reg.FindAllString(command, -1)
for i, arg := range arr {
if(arg[0] == '"' && arg[len(arg)-1] == '"') {
arg = arg[1:len(arg)-1]
arr[i]=arg
}
//arr[i] = strings.Replace(arg, "\"", "", -1)
}
fmt.Println(arr)
return arr
}
Escaping external quotes done fine
package parser
import (
"../configuration"
"os/exec"
"strings"
"regexp"
"fmt"
"os"
)
type Tag struct {
Label string
Value string
}
var Tags []Tag
/**
* @brief Take the command string as input and return
* the list of commands to be executed, after
* having replaced the tags (like "$main", or
* "$path") to their corresponding values
*
* @param label The command string (ie : "compile" or
* "run")
*
* @return { An array of shell command objects }
*/
func GetCommandArrFromInput(label string) []*exec.Cmd {
//read config files
conf.Init()
//look for command in schemes
content := conf.GetScheme(label)
//create a slice that will contain the final
// list of commands
var commands []*exec.Cmd
for _, cmd := range content {
cmd = ParseTags(cmd)
cmd = ParseDollarParams(cmd)
arr := SplitCommand(cmd)
//logging
//fmt.Println(len(arr), arr)
//create the command
tmp := exec.Command(arr[0], arr[1:]...)
//append it to the slice
commands = append(commands, tmp)
}
return commands
}
func InitTags() {
//declare each Tag
Tags = []Tag {
Tag {
Label : "$main",
Value : conf.GetMainPath(),
},
Tag {
Label : "$path",
Value : conf.GetProjectRoot(),
},
Tag {
Label : "$name",
Value : conf.GetName(),
},
}
}
//replace tags in command
func ParseTags(command string) string {
InitTags()
for _, tag := range Tags {
command = strings.Replace(command, tag.Label, tag.Value, -1)
}
return command
}
//iterates through array to replace $1..$9 with real $1..$9
// (like shell)
func ParseDollarParams(command string) string {
for i := 1; i <= 9; i++ {
sel := fmt.Sprintf("$%d", i)
if(strings.Index(command, sel) != -1) {
command = strings.Replace(command, sel, os.Args[i+1], -1)
}
}
return command
}
func SplitCommand(command string) []string {
//split into array using regexp (to let quoted string be 1 arg, as in shell)
delimeter := "[^\\s\"']+|\"([^\"]*)\"|'([^']*)'"
reg := regexp.MustCompile(delimeter)
arr := reg.FindAllString(command, -1)
for i, arg := range arr {
//delete extremities quotes
// ex : git, commit, -m, "Message to be displayed"
// => git, commit, -m, Message to be displayed
//
// it avoids extra quotes when the argument is exported
// into other files or services (such as git)
if(arg[0] == '"' && arg[len(arg)-1] == '"') {
arg = arg[1:len(arg)-1]
arr[i]=arg
}
//arr[i] = strings.Replace(arg, "\"", "", -1)
}
fmt.Println(arr)
return arr
} |
package parser
import "fmt"
func ParseInput() {
fmt.Println("parsing some files")
}
changing the parsing method to take new files
package parser
import "fmt"
func ParseInput() {
fmt.Println("parsing some new files")
}
|
package parser
import (
"fmt"
"plaid/lexer"
"strconv"
"strings"
)
// SyntaxError combines a source code location with the resulting error message
type SyntaxError struct {
loc lexer.Loc
msg string
}
func (se SyntaxError) Error() string {
return fmt.Sprintf("%s %s", se.loc, se.msg)
}
func makeSyntaxError(tok lexer.Token, msg string, deference bool) error {
if tok.Type == lexer.Error && deference {
msg = tok.Lexeme
}
return SyntaxError{tok.Loc, msg}
}
// Precedence describes the relative binding powers of different operators
type Precedence int
// The staticly defined precedence levels
const (
Lowest Precedence = iota * 10
Assign
Comparison
Sum
Product
Prefix
Postfix
Dispatch
)
// PrefixParseFunc describes the parsing function for any construct where the
// binding operator comes before the expression it binds to.
type PrefixParseFunc func(p *Parser) (Expr, error)
// PostfixParseFunc describes the parsing function for any construct where the
// binding operator comes after the expression it binds to.
type PostfixParseFunc func(p *Parser, left Expr) (Expr, error)
// Parser contains methods for generating an abstract syntax tree from a
// sequence of Tokens
type Parser struct {
lexer *lexer.Lexer
precedenceTable map[lexer.Type]Precedence
prefixParseFuncs map[lexer.Type]PrefixParseFunc
postfixParseFuncs map[lexer.Type]PostfixParseFunc
}
func (p *Parser) peekTokenIsNot(first lexer.Type, rest ...lexer.Type) bool {
peek := p.lexer.Peek().Type
if first == peek {
return false
}
for _, other := range rest {
if other == peek {
return false
}
}
return true
}
func (p *Parser) expectNextToken(which lexer.Type, otherwise string) (lexer.Token, error) {
if p.peekTokenIsNot(which) {
peek := p.lexer.Peek()
return peek, makeSyntaxError(peek, otherwise, false)
}
return p.lexer.Next(), nil
}
func (p *Parser) registerPrecedence(typ lexer.Type, level Precedence) {
p.precedenceTable[typ] = level
}
func (p *Parser) registerPrefix(typ lexer.Type, fn PrefixParseFunc) {
p.prefixParseFuncs[typ] = fn
}
func (p *Parser) registerPostfix(typ lexer.Type, fn PostfixParseFunc, level Precedence) {
p.registerPrecedence(typ, level)
p.postfixParseFuncs[typ] = fn
}
func (p *Parser) peekPrecedence() Precedence {
prec, exists := p.precedenceTable[p.lexer.Peek().Type]
if exists {
return prec
}
return Lowest
}
// Parse initializers a parser and defines the grammar precedence levels
func Parse(source string) (Program, error) {
p := makeParser(source)
loadGrammar(p)
return parseProgram(p)
}
func makeParser(source string) *Parser {
s := lexer.Scan(source)
l := lexer.Lex(s)
p := &Parser{
l,
make(map[lexer.Type]Precedence),
make(map[lexer.Type]PrefixParseFunc),
make(map[lexer.Type]PostfixParseFunc),
}
return p
}
func loadGrammar(p *Parser) {
p.registerPrefix(lexer.Fn, parseFunction)
p.registerPrefix(lexer.ParenL, parseGroup)
p.registerPrefix(lexer.Plus, parsePrefix)
p.registerPrefix(lexer.Dash, parsePrefix)
p.registerPrefix(lexer.Self, parseSelf)
p.registerPrefix(lexer.Ident, parseIdent)
p.registerPrefix(lexer.Number, parseNumber)
p.registerPrefix(lexer.String, parseString)
p.registerPrefix(lexer.Boolean, parseBoolean)
p.registerPostfix(lexer.BracketL, parseSubscript, Dispatch)
p.registerPostfix(lexer.ParenL, parseDispatch, Dispatch)
p.registerPostfix(lexer.Assign, parseAssign, Assign)
p.registerPostfix(lexer.LT, parseInfix, Comparison)
p.registerPostfix(lexer.LTEquals, parseInfix, Comparison)
p.registerPostfix(lexer.GT, parseInfix, Comparison)
p.registerPostfix(lexer.GTEquals, parseInfix, Comparison)
p.registerPostfix(lexer.Plus, parseInfix, Sum)
p.registerPostfix(lexer.Dash, parseInfix, Sum)
p.registerPostfix(lexer.Star, parseInfix, Product)
p.registerPostfix(lexer.Slash, parseInfix, Product)
}
func parseProgram(p *Parser) (Program, error) {
stmts := []Stmt{}
for p.peekTokenIsNot(lexer.Error, lexer.EOF) {
stmt, err := parseStmt(p)
if err != nil {
return Program{}, err
}
stmts = append(stmts, stmt)
}
return Program{stmts}, nil
}
func parseStmt(p *Parser) (Stmt, error) {
switch p.lexer.Peek().Type {
case lexer.If:
return parseIfStmt(p)
case lexer.Let:
return parseDeclarationStmt(p)
case lexer.Return:
return parseReturnStmt(p)
default:
return parseExprStmt(p)
}
}
func parseStmtBlock(p *Parser) (StmtBlock, error) {
left, err := p.expectNextToken(lexer.BraceL, "expected left brace")
if err != nil {
return StmtBlock{}, err
}
stmts := []Stmt{}
for p.peekTokenIsNot(lexer.BraceR, lexer.EOF, lexer.Error) {
var stmt Stmt
stmt, err = parseStmt(p)
if err != nil {
return StmtBlock{}, err
}
stmts = append(stmts, stmt)
}
right, err := p.expectNextToken(lexer.BraceR, "expected right brace")
if err != nil {
return StmtBlock{}, err
}
return StmtBlock{left, stmts, right}, nil
}
func parseIfStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.If, "expected IF keyword")
if err != nil {
return nil, err
}
var cond Expr
if cond, err = parseExpr(p, Lowest); err != nil {
return nil, err
}
var clause StmtBlock
if clause, err = parseStmtBlock(p); err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return IfStmt{tok, cond, clause}, nil
}
func parseDeclarationStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.Let, "expected LET keyword")
if err != nil {
return nil, err
}
var expr Expr
if expr, err = parseIdent(p); err != nil {
return nil, err
}
name := expr.(IdentExpr)
_, err = p.expectNextToken(lexer.Assign, "expected :=")
if err != nil {
return nil, err
}
if expr, err = parseExpr(p, Lowest); err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return DeclarationStmt{tok, name, expr}, nil
}
func parseReturnStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.Return, "expected RETURN keyword")
if err != nil {
return nil, err
}
var expr Expr
if p.peekTokenIsNot(lexer.Semi, lexer.EOF, lexer.Error) {
expr, err = parseExpr(p, Lowest)
if err != nil {
return nil, err
}
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return ReturnStmt{tok, expr}, nil
}
func parseExprStmt(p *Parser) (Stmt, error) {
expr, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
var stmt Stmt
switch expr.(type) {
case DispatchExpr:
stmt = ExprStmt{expr}
case AssignExpr:
stmt = ExprStmt{expr}
default:
return nil, SyntaxError{expr.Start(), "expected start of statement"}
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return stmt, nil
}
func parseTypeNote(p *Parser) (TypeNote, error) {
var child TypeNote
var err error
switch p.lexer.Peek().Type {
case lexer.Ident:
child, err = parseTypeNoteIdent(p)
case lexer.BracketL:
child, err = parseTypeNoteList(p)
case lexer.ParenL:
child, err = parseTypeNoteTuple(p)
default:
return nil, makeSyntaxError(p.lexer.Peek(), "unexpected symbol", true)
}
if err != nil {
return nil, err
}
for p.lexer.Peek().Type == lexer.Question {
child, _ = parseTypeNoteOptional(p, child)
}
return child, nil
}
func parseTypeNoteIdent(p *Parser) (TypeNote, error) {
var tok lexer.Token
var err error
if tok, err = p.expectNextToken(lexer.Ident, "expected identifier"); err != nil {
return nil, err
}
switch tok.Lexeme {
case "Void":
return TypeNoteVoid{tok}, nil
default:
return TypeNoteIdent{tok, tok.Lexeme}, nil
}
}
func parseTypeNoteList(p *Parser) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.BracketL, "expected left bracket")
if err != nil {
return nil, err
}
child, err := parseTypeNote(p)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.BracketR, "expected right bracket")
if err != nil {
return nil, err
}
return TypeNoteList{tok, child}, nil
}
func parseTypeNoteOptional(p *Parser, child TypeNote) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.Question, "expected question mark")
if err != nil {
return nil, err
}
return TypeNoteOptional{tok, child}, nil
}
func parseTypeNoteTuple(p *Parser) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
params := []TypeNote{}
for p.peekTokenIsNot(lexer.ParenR, lexer.Error, lexer.EOF) {
var sig TypeNote
sig, err = parseTypeNote(p)
if err != nil {
return nil, err
}
params = append(params, sig)
if p.peekTokenIsNot(lexer.Comma) {
break
} else {
p.lexer.Next()
}
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
tuple := TypeNoteTuple{tok, params}
if p.peekTokenIsNot(lexer.Arrow) {
return tuple, nil
}
return parseTypeNoteFunction(p, tuple)
}
func parseTypeNoteFunction(p *Parser, tuple TypeNoteTuple) (TypeNote, error) {
_, err := p.expectNextToken(lexer.Arrow, "expected arrow")
if err != nil {
return nil, err
}
ret, err := parseTypeNote(p)
if err != nil {
return nil, err
}
return TypeNoteFunction{tuple, ret}, nil
}
func parseExpr(p *Parser, level Precedence) (Expr, error) {
prefix, exists := p.prefixParseFuncs[p.lexer.Peek().Type]
if exists == false {
return nil, makeSyntaxError(p.lexer.Peek(), "unexpected symbol", true)
}
left, err := prefix(p)
if err != nil {
return nil, err
}
for p.peekTokenIsNot(lexer.EOF) && level < p.peekPrecedence() {
infix := p.postfixParseFuncs[p.lexer.Peek().Type]
left, err = infix(p, left)
if err != nil {
return nil, err
}
}
return left, nil
}
func parseFunction(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Fn, "expected FN keyword")
if err != nil {
return nil, err
}
params, ret, err := parseFunctionSignature(p)
if err != nil {
return nil, err
}
block, err := parseStmtBlock(p)
if err != nil {
return nil, err
}
return FunctionExpr{tok, params, ret, block}, nil
}
func parseFunctionSignature(p *Parser) ([]FunctionParam, TypeNote, error) {
var params []FunctionParam
var ret TypeNote
var err error
if params, err = parseFunctionParams(p); err != nil {
return nil, nil, err
}
if ret, err = parseFunctionReturnSig(p); err != nil {
return nil, nil, err
}
return params, ret, nil
}
func parseFunctionParams(p *Parser) ([]FunctionParam, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
params := []FunctionParam{}
for p.peekTokenIsNot(lexer.ParenR, lexer.EOF, lexer.Error) {
var param FunctionParam
param, err = parseFunctionParam(p)
if err != nil {
return nil, err
}
params = append(params, param)
if p.peekTokenIsNot(lexer.Comma) {
break
} else {
p.lexer.Next()
}
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return params, nil
}
func parseFunctionParam(p *Parser) (FunctionParam, error) {
ident, err := parseIdent(p)
if err != nil {
return FunctionParam{}, err
}
_, err = p.expectNextToken(lexer.Colon, "expected colon between parameter name and type")
if err != nil {
return FunctionParam{}, err
}
var sig TypeNote
sig, err = parseTypeNote(p)
if err != nil {
return FunctionParam{}, err
}
return FunctionParam{ident.(IdentExpr), sig}, nil
}
func parseFunctionReturnSig(p *Parser) (TypeNote, error) {
_, err := p.expectNextToken(lexer.Colon, "expected colon between parameters and return type")
if err != nil {
return nil, err
}
ret, err := parseTypeNote(p)
if err != nil {
return nil, err
}
return ret, err
}
func parseInfix(p *Parser, left Expr) (Expr, error) {
level := p.peekPrecedence()
tok := p.lexer.Next()
oper := tok.Lexeme
right, err := parseExpr(p, level)
if err != nil {
return nil, err
}
return BinaryExpr{oper, tok, left, right}, nil
}
func parseSubscript(p *Parser, left Expr) (Expr, error) {
_, err := p.expectNextToken(lexer.BracketL, "expect left bracket")
if err != nil {
return nil, err
}
if p.lexer.Peek().Type == lexer.BracketR {
err = makeSyntaxError(p.lexer.Peek(), "expected index expression", false)
return nil, err
}
index, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.BracketR, "expect right bracket")
if err != nil {
return nil, err
}
return SubscriptExpr{left, index}, nil
}
func parseDispatch(p *Parser, left Expr) (Expr, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
var args []Expr
for p.peekTokenIsNot(lexer.ParenR) {
var arg Expr
arg, err = parseExpr(p, Lowest)
if err != nil {
return nil, err
}
args = append(args, arg)
if p.peekTokenIsNot(lexer.Comma) {
break
}
p.lexer.Next()
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return DispatchExpr{left, args}, nil
}
func parseAssign(p *Parser, left Expr) (Expr, error) {
leftIdent, ok := left.(IdentExpr)
if ok == false {
return nil, SyntaxError{left.Start(), "left hand must be an identifier"}
}
level := p.peekPrecedence()
tok := p.lexer.Next()
right, err := parseExpr(p, level-1)
if err != nil {
return nil, err
}
return AssignExpr{tok, leftIdent, right}, nil
}
func parsePostfix(p *Parser, left Expr) (Expr, error) {
tok := p.lexer.Next()
oper := tok.Lexeme
return UnaryExpr{oper, tok, left}, nil
}
func parsePrefix(p *Parser) (Expr, error) {
tok := p.lexer.Next()
oper := tok.Lexeme
right, err := parseExpr(p, Prefix)
if err != nil {
return nil, err
}
return UnaryExpr{oper, tok, right}, nil
}
func parseGroup(p *Parser) (Expr, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
expr, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return expr, nil
}
func parseSelf(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Self, "expected self")
if err != nil {
return nil, err
}
return SelfExpr{tok}, nil
}
func parseIdent(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Ident, "expected identifier")
if err != nil {
return nil, err
}
return IdentExpr{tok, tok.Lexeme}, nil
}
func parseNumber(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Number, "expected number literal")
if err != nil {
return nil, err
}
return evalNumber(tok)
}
func evalNumber(tok lexer.Token) (NumberExpr, error) {
val, err := strconv.ParseUint(tok.Lexeme, 10, 64)
if err != nil {
return NumberExpr{}, makeSyntaxError(tok, "malformed number literal", false)
}
return NumberExpr{tok, int(val)}, nil
}
func parseString(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.String, "expected string literal")
if err != nil {
return nil, err
}
return evalString(tok)
}
func evalString(tok lexer.Token) (StringExpr, error) {
dblQuote := "\""
remSuffix := strings.TrimSuffix(tok.Lexeme, dblQuote)
remBoth := strings.TrimPrefix(remSuffix, dblQuote)
return StringExpr{tok, remBoth}, nil
}
func parseBoolean(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Boolean, "expected boolean literal")
if err != nil {
return nil, err
}
return evalBoolean(tok)
}
func evalBoolean(tok lexer.Token) (BooleanExpr, error) {
if tok.Lexeme == "true" {
return BooleanExpr{tok, true}, nil
} else if tok.Lexeme == "false" {
return BooleanExpr{tok, false}, nil
}
return BooleanExpr{}, makeSyntaxError(tok, "malformed boolean literal", false)
}
add more loop exit conditions when parsing dispatch expressions
package parser
import (
"fmt"
"plaid/lexer"
"strconv"
"strings"
)
// SyntaxError combines a source code location with the resulting error message
type SyntaxError struct {
loc lexer.Loc
msg string
}
func (se SyntaxError) Error() string {
return fmt.Sprintf("%s %s", se.loc, se.msg)
}
func makeSyntaxError(tok lexer.Token, msg string, deference bool) error {
if tok.Type == lexer.Error && deference {
msg = tok.Lexeme
}
return SyntaxError{tok.Loc, msg}
}
// Precedence describes the relative binding powers of different operators
type Precedence int
// The staticly defined precedence levels
const (
Lowest Precedence = iota * 10
Assign
Comparison
Sum
Product
Prefix
Postfix
Dispatch
)
// PrefixParseFunc describes the parsing function for any construct where the
// binding operator comes before the expression it binds to.
type PrefixParseFunc func(p *Parser) (Expr, error)
// PostfixParseFunc describes the parsing function for any construct where the
// binding operator comes after the expression it binds to.
type PostfixParseFunc func(p *Parser, left Expr) (Expr, error)
// Parser contains methods for generating an abstract syntax tree from a
// sequence of Tokens
type Parser struct {
lexer *lexer.Lexer
precedenceTable map[lexer.Type]Precedence
prefixParseFuncs map[lexer.Type]PrefixParseFunc
postfixParseFuncs map[lexer.Type]PostfixParseFunc
}
func (p *Parser) peekTokenIsNot(first lexer.Type, rest ...lexer.Type) bool {
peek := p.lexer.Peek().Type
if first == peek {
return false
}
for _, other := range rest {
if other == peek {
return false
}
}
return true
}
func (p *Parser) expectNextToken(which lexer.Type, otherwise string) (lexer.Token, error) {
if p.peekTokenIsNot(which) {
peek := p.lexer.Peek()
return peek, makeSyntaxError(peek, otherwise, false)
}
return p.lexer.Next(), nil
}
func (p *Parser) registerPrecedence(typ lexer.Type, level Precedence) {
p.precedenceTable[typ] = level
}
func (p *Parser) registerPrefix(typ lexer.Type, fn PrefixParseFunc) {
p.prefixParseFuncs[typ] = fn
}
func (p *Parser) registerPostfix(typ lexer.Type, fn PostfixParseFunc, level Precedence) {
p.registerPrecedence(typ, level)
p.postfixParseFuncs[typ] = fn
}
func (p *Parser) peekPrecedence() Precedence {
prec, exists := p.precedenceTable[p.lexer.Peek().Type]
if exists {
return prec
}
return Lowest
}
// Parse initializers a parser and defines the grammar precedence levels
func Parse(source string) (Program, error) {
p := makeParser(source)
loadGrammar(p)
return parseProgram(p)
}
func makeParser(source string) *Parser {
s := lexer.Scan(source)
l := lexer.Lex(s)
p := &Parser{
l,
make(map[lexer.Type]Precedence),
make(map[lexer.Type]PrefixParseFunc),
make(map[lexer.Type]PostfixParseFunc),
}
return p
}
func loadGrammar(p *Parser) {
p.registerPrefix(lexer.Fn, parseFunction)
p.registerPrefix(lexer.ParenL, parseGroup)
p.registerPrefix(lexer.Plus, parsePrefix)
p.registerPrefix(lexer.Dash, parsePrefix)
p.registerPrefix(lexer.Self, parseSelf)
p.registerPrefix(lexer.Ident, parseIdent)
p.registerPrefix(lexer.Number, parseNumber)
p.registerPrefix(lexer.String, parseString)
p.registerPrefix(lexer.Boolean, parseBoolean)
p.registerPostfix(lexer.BracketL, parseSubscript, Dispatch)
p.registerPostfix(lexer.ParenL, parseDispatch, Dispatch)
p.registerPostfix(lexer.Assign, parseAssign, Assign)
p.registerPostfix(lexer.LT, parseInfix, Comparison)
p.registerPostfix(lexer.LTEquals, parseInfix, Comparison)
p.registerPostfix(lexer.GT, parseInfix, Comparison)
p.registerPostfix(lexer.GTEquals, parseInfix, Comparison)
p.registerPostfix(lexer.Plus, parseInfix, Sum)
p.registerPostfix(lexer.Dash, parseInfix, Sum)
p.registerPostfix(lexer.Star, parseInfix, Product)
p.registerPostfix(lexer.Slash, parseInfix, Product)
}
func parseProgram(p *Parser) (Program, error) {
stmts := []Stmt{}
for p.peekTokenIsNot(lexer.Error, lexer.EOF) {
stmt, err := parseStmt(p)
if err != nil {
return Program{}, err
}
stmts = append(stmts, stmt)
}
return Program{stmts}, nil
}
func parseStmt(p *Parser) (Stmt, error) {
switch p.lexer.Peek().Type {
case lexer.If:
return parseIfStmt(p)
case lexer.Let:
return parseDeclarationStmt(p)
case lexer.Return:
return parseReturnStmt(p)
default:
return parseExprStmt(p)
}
}
func parseStmtBlock(p *Parser) (StmtBlock, error) {
left, err := p.expectNextToken(lexer.BraceL, "expected left brace")
if err != nil {
return StmtBlock{}, err
}
stmts := []Stmt{}
for p.peekTokenIsNot(lexer.BraceR, lexer.EOF, lexer.Error) {
var stmt Stmt
stmt, err = parseStmt(p)
if err != nil {
return StmtBlock{}, err
}
stmts = append(stmts, stmt)
}
right, err := p.expectNextToken(lexer.BraceR, "expected right brace")
if err != nil {
return StmtBlock{}, err
}
return StmtBlock{left, stmts, right}, nil
}
func parseIfStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.If, "expected IF keyword")
if err != nil {
return nil, err
}
var cond Expr
if cond, err = parseExpr(p, Lowest); err != nil {
return nil, err
}
var clause StmtBlock
if clause, err = parseStmtBlock(p); err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return IfStmt{tok, cond, clause}, nil
}
func parseDeclarationStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.Let, "expected LET keyword")
if err != nil {
return nil, err
}
var expr Expr
if expr, err = parseIdent(p); err != nil {
return nil, err
}
name := expr.(IdentExpr)
_, err = p.expectNextToken(lexer.Assign, "expected :=")
if err != nil {
return nil, err
}
if expr, err = parseExpr(p, Lowest); err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return DeclarationStmt{tok, name, expr}, nil
}
func parseReturnStmt(p *Parser) (Stmt, error) {
tok, err := p.expectNextToken(lexer.Return, "expected RETURN keyword")
if err != nil {
return nil, err
}
var expr Expr
if p.peekTokenIsNot(lexer.Semi, lexer.EOF, lexer.Error) {
expr, err = parseExpr(p, Lowest)
if err != nil {
return nil, err
}
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return ReturnStmt{tok, expr}, nil
}
func parseExprStmt(p *Parser) (Stmt, error) {
expr, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
var stmt Stmt
switch expr.(type) {
case DispatchExpr:
stmt = ExprStmt{expr}
case AssignExpr:
stmt = ExprStmt{expr}
default:
return nil, SyntaxError{expr.Start(), "expected start of statement"}
}
_, err = p.expectNextToken(lexer.Semi, "expected semicolon")
if err != nil {
return nil, err
}
return stmt, nil
}
func parseTypeNote(p *Parser) (TypeNote, error) {
var child TypeNote
var err error
switch p.lexer.Peek().Type {
case lexer.Ident:
child, err = parseTypeNoteIdent(p)
case lexer.BracketL:
child, err = parseTypeNoteList(p)
case lexer.ParenL:
child, err = parseTypeNoteTuple(p)
default:
return nil, makeSyntaxError(p.lexer.Peek(), "unexpected symbol", true)
}
if err != nil {
return nil, err
}
for p.lexer.Peek().Type == lexer.Question {
child, _ = parseTypeNoteOptional(p, child)
}
return child, nil
}
func parseTypeNoteIdent(p *Parser) (TypeNote, error) {
var tok lexer.Token
var err error
if tok, err = p.expectNextToken(lexer.Ident, "expected identifier"); err != nil {
return nil, err
}
switch tok.Lexeme {
case "Void":
return TypeNoteVoid{tok}, nil
default:
return TypeNoteIdent{tok, tok.Lexeme}, nil
}
}
func parseTypeNoteList(p *Parser) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.BracketL, "expected left bracket")
if err != nil {
return nil, err
}
child, err := parseTypeNote(p)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.BracketR, "expected right bracket")
if err != nil {
return nil, err
}
return TypeNoteList{tok, child}, nil
}
func parseTypeNoteOptional(p *Parser, child TypeNote) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.Question, "expected question mark")
if err != nil {
return nil, err
}
return TypeNoteOptional{tok, child}, nil
}
func parseTypeNoteTuple(p *Parser) (TypeNote, error) {
tok, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
params := []TypeNote{}
for p.peekTokenIsNot(lexer.ParenR, lexer.Error, lexer.EOF) {
var sig TypeNote
sig, err = parseTypeNote(p)
if err != nil {
return nil, err
}
params = append(params, sig)
if p.peekTokenIsNot(lexer.Comma) {
break
} else {
p.lexer.Next()
}
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
tuple := TypeNoteTuple{tok, params}
if p.peekTokenIsNot(lexer.Arrow) {
return tuple, nil
}
return parseTypeNoteFunction(p, tuple)
}
func parseTypeNoteFunction(p *Parser, tuple TypeNoteTuple) (TypeNote, error) {
_, err := p.expectNextToken(lexer.Arrow, "expected arrow")
if err != nil {
return nil, err
}
ret, err := parseTypeNote(p)
if err != nil {
return nil, err
}
return TypeNoteFunction{tuple, ret}, nil
}
func parseExpr(p *Parser, level Precedence) (Expr, error) {
prefix, exists := p.prefixParseFuncs[p.lexer.Peek().Type]
if exists == false {
return nil, makeSyntaxError(p.lexer.Peek(), "unexpected symbol", true)
}
left, err := prefix(p)
if err != nil {
return nil, err
}
for p.peekTokenIsNot(lexer.EOF) && level < p.peekPrecedence() {
infix := p.postfixParseFuncs[p.lexer.Peek().Type]
left, err = infix(p, left)
if err != nil {
return nil, err
}
}
return left, nil
}
func parseFunction(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Fn, "expected FN keyword")
if err != nil {
return nil, err
}
params, ret, err := parseFunctionSignature(p)
if err != nil {
return nil, err
}
block, err := parseStmtBlock(p)
if err != nil {
return nil, err
}
return FunctionExpr{tok, params, ret, block}, nil
}
func parseFunctionSignature(p *Parser) ([]FunctionParam, TypeNote, error) {
var params []FunctionParam
var ret TypeNote
var err error
if params, err = parseFunctionParams(p); err != nil {
return nil, nil, err
}
if ret, err = parseFunctionReturnSig(p); err != nil {
return nil, nil, err
}
return params, ret, nil
}
func parseFunctionParams(p *Parser) ([]FunctionParam, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
params := []FunctionParam{}
for p.peekTokenIsNot(lexer.ParenR, lexer.EOF, lexer.Error) {
var param FunctionParam
param, err = parseFunctionParam(p)
if err != nil {
return nil, err
}
params = append(params, param)
if p.peekTokenIsNot(lexer.Comma) {
break
} else {
p.lexer.Next()
}
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return params, nil
}
func parseFunctionParam(p *Parser) (FunctionParam, error) {
ident, err := parseIdent(p)
if err != nil {
return FunctionParam{}, err
}
_, err = p.expectNextToken(lexer.Colon, "expected colon between parameter name and type")
if err != nil {
return FunctionParam{}, err
}
var sig TypeNote
sig, err = parseTypeNote(p)
if err != nil {
return FunctionParam{}, err
}
return FunctionParam{ident.(IdentExpr), sig}, nil
}
func parseFunctionReturnSig(p *Parser) (TypeNote, error) {
_, err := p.expectNextToken(lexer.Colon, "expected colon between parameters and return type")
if err != nil {
return nil, err
}
ret, err := parseTypeNote(p)
if err != nil {
return nil, err
}
return ret, err
}
func parseInfix(p *Parser, left Expr) (Expr, error) {
level := p.peekPrecedence()
tok := p.lexer.Next()
oper := tok.Lexeme
right, err := parseExpr(p, level)
if err != nil {
return nil, err
}
return BinaryExpr{oper, tok, left, right}, nil
}
func parseSubscript(p *Parser, left Expr) (Expr, error) {
_, err := p.expectNextToken(lexer.BracketL, "expect left bracket")
if err != nil {
return nil, err
}
if p.lexer.Peek().Type == lexer.BracketR {
err = makeSyntaxError(p.lexer.Peek(), "expected index expression", false)
return nil, err
}
index, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.BracketR, "expect right bracket")
if err != nil {
return nil, err
}
return SubscriptExpr{left, index}, nil
}
func parseDispatch(p *Parser, left Expr) (Expr, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
var args []Expr
for p.peekTokenIsNot(lexer.ParenR, lexer.Error, lexer.EOF) {
var arg Expr
arg, err = parseExpr(p, Lowest)
if err != nil {
return nil, err
}
args = append(args, arg)
if p.peekTokenIsNot(lexer.Comma) {
break
}
p.lexer.Next()
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return DispatchExpr{left, args}, nil
}
func parseAssign(p *Parser, left Expr) (Expr, error) {
leftIdent, ok := left.(IdentExpr)
if ok == false {
return nil, SyntaxError{left.Start(), "left hand must be an identifier"}
}
level := p.peekPrecedence()
tok := p.lexer.Next()
right, err := parseExpr(p, level-1)
if err != nil {
return nil, err
}
return AssignExpr{tok, leftIdent, right}, nil
}
func parsePostfix(p *Parser, left Expr) (Expr, error) {
tok := p.lexer.Next()
oper := tok.Lexeme
return UnaryExpr{oper, tok, left}, nil
}
func parsePrefix(p *Parser) (Expr, error) {
tok := p.lexer.Next()
oper := tok.Lexeme
right, err := parseExpr(p, Prefix)
if err != nil {
return nil, err
}
return UnaryExpr{oper, tok, right}, nil
}
func parseGroup(p *Parser) (Expr, error) {
_, err := p.expectNextToken(lexer.ParenL, "expected left paren")
if err != nil {
return nil, err
}
expr, err := parseExpr(p, Lowest)
if err != nil {
return nil, err
}
_, err = p.expectNextToken(lexer.ParenR, "expected right paren")
if err != nil {
return nil, err
}
return expr, nil
}
func parseSelf(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Self, "expected self")
if err != nil {
return nil, err
}
return SelfExpr{tok}, nil
}
func parseIdent(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Ident, "expected identifier")
if err != nil {
return nil, err
}
return IdentExpr{tok, tok.Lexeme}, nil
}
func parseNumber(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Number, "expected number literal")
if err != nil {
return nil, err
}
return evalNumber(tok)
}
func evalNumber(tok lexer.Token) (NumberExpr, error) {
val, err := strconv.ParseUint(tok.Lexeme, 10, 64)
if err != nil {
return NumberExpr{}, makeSyntaxError(tok, "malformed number literal", false)
}
return NumberExpr{tok, int(val)}, nil
}
func parseString(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.String, "expected string literal")
if err != nil {
return nil, err
}
return evalString(tok)
}
func evalString(tok lexer.Token) (StringExpr, error) {
dblQuote := "\""
remSuffix := strings.TrimSuffix(tok.Lexeme, dblQuote)
remBoth := strings.TrimPrefix(remSuffix, dblQuote)
return StringExpr{tok, remBoth}, nil
}
func parseBoolean(p *Parser) (Expr, error) {
tok, err := p.expectNextToken(lexer.Boolean, "expected boolean literal")
if err != nil {
return nil, err
}
return evalBoolean(tok)
}
func evalBoolean(tok lexer.Token) (BooleanExpr, error) {
if tok.Lexeme == "true" {
return BooleanExpr{tok, true}, nil
} else if tok.Lexeme == "false" {
return BooleanExpr{tok, false}, nil
}
return BooleanExpr{}, makeSyntaxError(tok, "malformed boolean literal", false)
}
|
package parser
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/gorilla/css/scanner"
"github.com/aymerick/douceur/css"
)
const (
importantSuffixRegexp = `(?i)\s*!important\s*$`
)
var (
importantRegexp *regexp.Regexp
)
// Parser represents a CSS parser
type Parser struct {
scan *scanner.Scanner // Tokenizer
// Tokens parsed but not consumed yet
tokens []*scanner.Token
// Rule embedding level
embedLevel int
}
func init() {
importantRegexp = regexp.MustCompile(importantSuffixRegexp)
}
// NewParser instanciates a new parser
func NewParser(txt string) *Parser {
return &Parser{
scan: scanner.New(txt),
}
}
// Parse parses a whole stylesheet
func Parse(text string) (*css.Stylesheet, error) {
result, err := NewParser(text).ParseStylesheet()
if err != nil {
return nil, err
}
return result, nil
}
// ParseDeclarations parses CSS declarations
func ParseDeclarations(text string) ([]*css.Declaration, error) {
result, err := NewParser(text).ParseDeclarations()
if err != nil {
return nil, err
}
return result, nil
}
// ParseStylesheet parses a stylesheet
func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) {
result := css.NewStylesheet()
// Parse BOM
if _, err := parser.parseBOM(); err != nil {
return result, err
}
// Parse list of rules
rules, err := parser.ParseRules()
if err != nil {
return result, err
}
result.Rules = rules
return result, nil
}
// ParseRules parses a list of rules
func (parser *Parser) ParseRules() ([]*css.Rule, error) {
result := []*css.Rule{}
inBlock := false
if parser.tokenChar("{") {
// parsing a block of rules
inBlock = true
parser.embedLevel++
parser.shiftToken()
}
for parser.tokenParsable() {
if parser.tokenIgnorable() {
parser.shiftToken()
} else if parser.tokenChar("}") {
if !inBlock {
errMsg := fmt.Sprintf("Unexpected } character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
parser.shiftToken()
parser.embedLevel--
// finished
break
} else {
rule, err := parser.ParseRule()
if err != nil {
return result, err
}
rule.EmbedLevel = parser.embedLevel
result = append(result, rule)
}
}
return result, parser.err()
}
// ParseRule parses a rule
func (parser *Parser) ParseRule() (*css.Rule, error) {
if parser.tokenAtKeyword() {
return parser.parseAtRule()
}
return parser.parseQualifiedRule()
}
// ParseDeclarations parses a list of declarations
func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) {
result := []*css.Declaration{}
if parser.tokenChar("{") {
parser.shiftToken()
}
for parser.tokenParsable() {
if parser.tokenIgnorable() {
parser.shiftToken()
} else if parser.tokenChar("}") {
// end of block
parser.shiftToken()
break
} else {
declaration, err := parser.ParseDeclaration()
if err != nil {
return result, err
}
result = append(result, declaration)
}
}
return result, parser.err()
}
// ParseDeclaration parses a declaration
func (parser *Parser) ParseDeclaration() (*css.Declaration, error) {
result := css.NewDeclaration()
curValue := ""
for parser.tokenParsable() {
if parser.tokenChar(":") {
result.Property = strings.TrimSpace(curValue)
curValue = ""
parser.shiftToken()
} else if parser.tokenChar(";") || parser.tokenChar("}") {
if result.Property == "" {
errMsg := fmt.Sprintf("Unexpected ; character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
if importantRegexp.MatchString(curValue) {
result.Important = true
curValue = importantRegexp.ReplaceAllString(curValue, "")
}
result.Value = strings.TrimSpace(curValue)
if parser.tokenChar(";") {
parser.shiftToken()
}
// finished
break
} else {
token := parser.shiftToken()
curValue += token.Value
}
}
// log.Printf("[parsed] Declaration: %s", result.String())
return result, parser.err()
}
// Parse an At Rule
func (parser *Parser) parseAtRule() (*css.Rule, error) {
// parse rule name (eg: "@import")
token := parser.shiftToken()
result := css.NewRule(css.AtRule)
result.Name = token.Value
for parser.tokenParsable() {
if parser.tokenChar(";") {
parser.shiftToken()
// finished
break
} else if parser.tokenChar("{") {
if result.EmbedsRules() {
// parse rules block
rules, err := parser.ParseRules()
if err != nil {
return result, err
}
result.Rules = rules
} else {
// parse declarations block
declarations, err := parser.ParseDeclarations()
if err != nil {
return result, err
}
result.Declarations = declarations
}
// finished
break
} else {
// parse prelude
prelude, err := parser.parsePrelude()
if err != nil {
return result, err
}
result.Prelude = prelude
}
}
// log.Printf("[parsed] Rule: %s", result.String())
return result, parser.err()
}
// Parse a Qualified Rule
func (parser *Parser) parseQualifiedRule() (*css.Rule, error) {
result := css.NewRule(css.QualifiedRule)
for parser.tokenParsable() {
if parser.tokenChar("{") {
if result.Prelude == "" {
errMsg := fmt.Sprintf("Unexpected { character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
// parse declarations block
declarations, err := parser.ParseDeclarations()
if err != nil {
return result, err
}
result.Declarations = declarations
// finished
break
} else {
// parse prelude
prelude, err := parser.parsePrelude()
if err != nil {
return result, err
}
result.Prelude = prelude
}
}
result.Selectors = strings.Split(result.Prelude, ",")
for i, sel := range result.Selectors {
result.Selectors[i] = strings.TrimSpace(sel)
}
// log.Printf("[parsed] Rule: %s", result.String())
return result, parser.err()
}
// Parse Rule prelude
func (parser *Parser) parsePrelude() (string, error) {
result := ""
for parser.tokenParsable() && !parser.tokenEndOfPrelude() {
token := parser.shiftToken()
result += token.Value
}
result = strings.TrimSpace(result)
// log.Printf("[parsed] prelude: %s", result)
return result, parser.err()
}
// Parse BOM
func (parser *Parser) parseBOM() (bool, error) {
if parser.nextToken().Type == scanner.TokenBOM {
parser.shiftToken()
return true, nil
}
return false, parser.err()
}
// Returns next token without removing it from tokens buffer
func (parser *Parser) nextToken() *scanner.Token {
if len(parser.tokens) == 0 {
// fetch next token
nextToken := parser.scan.Next()
// log.Printf("[token] %s => %v", nextToken.Type.String(), nextToken.Value)
// queue it
parser.tokens = append(parser.tokens, nextToken)
}
return parser.tokens[0]
}
// Returns next token and remove it from the tokens buffer
func (parser *Parser) shiftToken() *scanner.Token {
var result *scanner.Token
result, parser.tokens = parser.tokens[0], parser.tokens[1:]
return result
}
// Returns tokenizer error, or nil if no error
func (parser *Parser) err() error {
if parser.tokenError() {
token := parser.nextToken()
return fmt.Errorf("Tokenizer error: %s", token.String())
}
return nil
}
// Returns true if next token is Error
func (parser *Parser) tokenError() bool {
return parser.nextToken().Type == scanner.TokenError
}
// Returns true if next token is EOF
func (parser *Parser) tokenEOF() bool {
return parser.nextToken().Type == scanner.TokenEOF
}
// Returns true if next token is a whitespace
func (parser *Parser) tokenWS() bool {
return parser.nextToken().Type == scanner.TokenS
}
// Returns true if next token is a comment
func (parser *Parser) tokenComment() bool {
return parser.nextToken().Type == scanner.TokenComment
}
// Returns true if next token is a CDO or a CDC
func (parser *Parser) tokenCDOorCDC() bool {
switch parser.nextToken().Type {
case scanner.TokenCDO, scanner.TokenCDC:
return true
default:
return false
}
}
// Returns true if next token is ignorable
func (parser *Parser) tokenIgnorable() bool {
return parser.tokenWS() || parser.tokenComment() || parser.tokenCDOorCDC()
}
// Returns true if next token is parsable
func (parser *Parser) tokenParsable() bool {
return !parser.tokenEOF() && !parser.tokenError()
}
// Returns true if next token is an At Rule keyword
func (parser *Parser) tokenAtKeyword() bool {
return parser.nextToken().Type == scanner.TokenAtKeyword
}
// Returns true if next token is given character
func (parser *Parser) tokenChar(value string) bool {
token := parser.nextToken()
return (token.Type == scanner.TokenChar) && (token.Value == value)
}
// Returns true if next token marks the end of a prelude
func (parser *Parser) tokenEndOfPrelude() bool {
return parser.tokenChar(";") || parser.tokenChar("{")
}
make returns more concise
* spelling
package parser
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/gorilla/css/scanner"
"github.com/aymerick/douceur/css"
)
const (
importantSuffixRegexp = `(?i)\s*!important\s*$`
)
var (
importantRegexp *regexp.Regexp
)
// Parser represents a CSS parser
type Parser struct {
scan *scanner.Scanner // Tokenizer
// Tokens parsed but not consumed yet
tokens []*scanner.Token
// Rule embedding level
embedLevel int
}
func init() {
importantRegexp = regexp.MustCompile(importantSuffixRegexp)
}
// NewParser instantiates a new parser
func NewParser(txt string) *Parser {
return &Parser{
scan: scanner.New(txt),
}
}
// Parse parses a whole stylesheet
func Parse(text string) (*css.Stylesheet, error) {
return NewParser(text).ParseStylesheet()
}
// ParseDeclarations parses CSS declarations
func ParseDeclarations(text string) ([]*css.Declaration, error) {
return NewParser(text).ParseDeclarations()
}
// ParseStylesheet parses a stylesheet
func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) {
result := css.NewStylesheet()
// Parse BOM
if _, err := parser.parseBOM(); err != nil {
return result, err
}
// Parse list of rules
rules, err := parser.ParseRules()
if err != nil {
return result, err
}
result.Rules = rules
return result, nil
}
// ParseRules parses a list of rules
func (parser *Parser) ParseRules() ([]*css.Rule, error) {
result := []*css.Rule{}
inBlock := false
if parser.tokenChar("{") {
// parsing a block of rules
inBlock = true
parser.embedLevel++
parser.shiftToken()
}
for parser.tokenParsable() {
if parser.tokenIgnorable() {
parser.shiftToken()
} else if parser.tokenChar("}") {
if !inBlock {
errMsg := fmt.Sprintf("Unexpected } character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
parser.shiftToken()
parser.embedLevel--
// finished
break
} else {
rule, err := parser.ParseRule()
if err != nil {
return result, err
}
rule.EmbedLevel = parser.embedLevel
result = append(result, rule)
}
}
return result, parser.err()
}
// ParseRule parses a rule
func (parser *Parser) ParseRule() (*css.Rule, error) {
if parser.tokenAtKeyword() {
return parser.parseAtRule()
}
return parser.parseQualifiedRule()
}
// ParseDeclarations parses a list of declarations
func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) {
result := []*css.Declaration{}
if parser.tokenChar("{") {
parser.shiftToken()
}
for parser.tokenParsable() {
if parser.tokenIgnorable() {
parser.shiftToken()
} else if parser.tokenChar("}") {
// end of block
parser.shiftToken()
break
} else {
declaration, err := parser.ParseDeclaration()
if err != nil {
return result, err
}
result = append(result, declaration)
}
}
return result, parser.err()
}
// ParseDeclaration parses a declaration
func (parser *Parser) ParseDeclaration() (*css.Declaration, error) {
result := css.NewDeclaration()
curValue := ""
for parser.tokenParsable() {
if parser.tokenChar(":") {
result.Property = strings.TrimSpace(curValue)
curValue = ""
parser.shiftToken()
} else if parser.tokenChar(";") || parser.tokenChar("}") {
if result.Property == "" {
errMsg := fmt.Sprintf("Unexpected ; character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
if importantRegexp.MatchString(curValue) {
result.Important = true
curValue = importantRegexp.ReplaceAllString(curValue, "")
}
result.Value = strings.TrimSpace(curValue)
if parser.tokenChar(";") {
parser.shiftToken()
}
// finished
break
} else {
token := parser.shiftToken()
curValue += token.Value
}
}
// log.Printf("[parsed] Declaration: %s", result.String())
return result, parser.err()
}
// Parse an At Rule
func (parser *Parser) parseAtRule() (*css.Rule, error) {
// parse rule name (eg: "@import")
token := parser.shiftToken()
result := css.NewRule(css.AtRule)
result.Name = token.Value
for parser.tokenParsable() {
if parser.tokenChar(";") {
parser.shiftToken()
// finished
break
} else if parser.tokenChar("{") {
if result.EmbedsRules() {
// parse rules block
rules, err := parser.ParseRules()
if err != nil {
return result, err
}
result.Rules = rules
} else {
// parse declarations block
declarations, err := parser.ParseDeclarations()
if err != nil {
return result, err
}
result.Declarations = declarations
}
// finished
break
} else {
// parse prelude
prelude, err := parser.parsePrelude()
if err != nil {
return result, err
}
result.Prelude = prelude
}
}
// log.Printf("[parsed] Rule: %s", result.String())
return result, parser.err()
}
// Parse a Qualified Rule
func (parser *Parser) parseQualifiedRule() (*css.Rule, error) {
result := css.NewRule(css.QualifiedRule)
for parser.tokenParsable() {
if parser.tokenChar("{") {
if result.Prelude == "" {
errMsg := fmt.Sprintf("Unexpected { character: %s", parser.nextToken().String())
return result, errors.New(errMsg)
}
// parse declarations block
declarations, err := parser.ParseDeclarations()
if err != nil {
return result, err
}
result.Declarations = declarations
// finished
break
} else {
// parse prelude
prelude, err := parser.parsePrelude()
if err != nil {
return result, err
}
result.Prelude = prelude
}
}
result.Selectors = strings.Split(result.Prelude, ",")
for i, sel := range result.Selectors {
result.Selectors[i] = strings.TrimSpace(sel)
}
// log.Printf("[parsed] Rule: %s", result.String())
return result, parser.err()
}
// Parse Rule prelude
func (parser *Parser) parsePrelude() (string, error) {
result := ""
for parser.tokenParsable() && !parser.tokenEndOfPrelude() {
token := parser.shiftToken()
result += token.Value
}
result = strings.TrimSpace(result)
// log.Printf("[parsed] prelude: %s", result)
return result, parser.err()
}
// Parse BOM
func (parser *Parser) parseBOM() (bool, error) {
if parser.nextToken().Type == scanner.TokenBOM {
parser.shiftToken()
return true, nil
}
return false, parser.err()
}
// Returns next token without removing it from tokens buffer
func (parser *Parser) nextToken() *scanner.Token {
if len(parser.tokens) == 0 {
// fetch next token
nextToken := parser.scan.Next()
// log.Printf("[token] %s => %v", nextToken.Type.String(), nextToken.Value)
// queue it
parser.tokens = append(parser.tokens, nextToken)
}
return parser.tokens[0]
}
// Returns next token and remove it from the tokens buffer
func (parser *Parser) shiftToken() *scanner.Token {
var result *scanner.Token
result, parser.tokens = parser.tokens[0], parser.tokens[1:]
return result
}
// Returns tokenizer error, or nil if no error
func (parser *Parser) err() error {
if parser.tokenError() {
token := parser.nextToken()
return fmt.Errorf("Tokenizer error: %s", token.String())
}
return nil
}
// Returns true if next token is Error
func (parser *Parser) tokenError() bool {
return parser.nextToken().Type == scanner.TokenError
}
// Returns true if next token is EOF
func (parser *Parser) tokenEOF() bool {
return parser.nextToken().Type == scanner.TokenEOF
}
// Returns true if next token is a whitespace
func (parser *Parser) tokenWS() bool {
return parser.nextToken().Type == scanner.TokenS
}
// Returns true if next token is a comment
func (parser *Parser) tokenComment() bool {
return parser.nextToken().Type == scanner.TokenComment
}
// Returns true if next token is a CDO or a CDC
func (parser *Parser) tokenCDOorCDC() bool {
switch parser.nextToken().Type {
case scanner.TokenCDO, scanner.TokenCDC:
return true
default:
return false
}
}
// Returns true if next token is ignorable
func (parser *Parser) tokenIgnorable() bool {
return parser.tokenWS() || parser.tokenComment() || parser.tokenCDOorCDC()
}
// Returns true if next token is parsable
func (parser *Parser) tokenParsable() bool {
return !parser.tokenEOF() && !parser.tokenError()
}
// Returns true if next token is an At Rule keyword
func (parser *Parser) tokenAtKeyword() bool {
return parser.nextToken().Type == scanner.TokenAtKeyword
}
// Returns true if next token is given character
func (parser *Parser) tokenChar(value string) bool {
token := parser.nextToken()
return (token.Type == scanner.TokenChar) && (token.Value == value)
}
// Returns true if next token marks the end of a prelude
func (parser *Parser) tokenEndOfPrelude() bool {
return parser.tokenChar(";") || parser.tokenChar("{")
}
|
package parser
import "github.com/hansrodtang/semver"
type parser struct {
items chan item // channel of scanned items.
result node
ibuf []item
pos int
}
func (p *parser) run() (node, error) {
return handleRange(p), nil
}
func (p *parser) next() item {
if p.pos >= len(p.ibuf) {
i := <-p.items
p.ibuf = append(p.ibuf, i)
p.pos++
return i
}
i := p.ibuf[p.pos]
p.pos++
return i
}
func (p *parser) backup() {
p.pos--
}
func Parse(input string) (node, error) {
_, ch := lex(input)
p := &parser{ch, nil, []item{}, 0}
return p.run()
}
func handleOperator(p *parser) nodeComparison {
var nc nodeComparison
for {
i := p.next()
switch i.typ {
case itemEOF:
p.backup()
return nc
case itemSet:
return nc
case itemRange:
return nc
case itemVersion:
ver, _ := semver.New(i.val)
nc = nodeComparison{eq, ver}
return nc
default:
v := p.next()
ver, _ := semver.New(v.val)
nc = nodeComparison{comparators[i.val], ver}
return nc
}
}
}
func handleSet(p *parser) nodeSet {
var nc nodeComparison
var set nodeSet
for {
i := p.next()
switch i.typ {
case itemSet:
break
case itemEOF:
p.backup()
return set
case itemRange:
return set
default:
p.backup()
nc = handleOperator(p)
set.comparisons = append(set.comparisons, nc)
}
}
}
func handleRange(p *parser) node {
var ns nodeSet
var rng nodeRange
for {
i := p.next()
switch i.typ {
case itemEOF:
return rng
default:
p.backup()
ns = handleSet(p)
rng.sets = append(rng.sets, ns)
}
}
}
Removed cases that the lexer should prevent.
Replace this with itemError checks at a later point.
package parser
import "github.com/hansrodtang/semver"
type parser struct {
items chan item // channel of scanned items.
result node
ibuf []item
pos int
}
func (p *parser) run() (node, error) {
return handleRange(p), nil
}
func (p *parser) next() item {
if p.pos >= len(p.ibuf) {
i := <-p.items
p.ibuf = append(p.ibuf, i)
p.pos++
return i
}
i := p.ibuf[p.pos]
p.pos++
return i
}
func (p *parser) backup() {
p.pos--
}
func Parse(input string) (node, error) {
_, ch := lex(input)
p := &parser{ch, nil, []item{}, 0}
return p.run()
}
func handleOperator(p *parser) nodeComparison {
var nc nodeComparison
for {
i := p.next()
switch i.typ {
case itemVersion:
ver, _ := semver.New(i.val)
nc = nodeComparison{eq, ver}
return nc
default:
v := p.next()
ver, _ := semver.New(v.val)
nc = nodeComparison{comparators[i.val], ver}
return nc
}
}
}
func handleSet(p *parser) nodeSet {
var nc nodeComparison
var set nodeSet
for {
i := p.next()
switch i.typ {
case itemSet:
break
case itemEOF:
p.backup()
return set
case itemRange:
return set
default:
p.backup()
nc = handleOperator(p)
set.comparisons = append(set.comparisons, nc)
}
}
}
func handleRange(p *parser) node {
var ns nodeSet
var rng nodeRange
for {
i := p.next()
switch i.typ {
case itemEOF:
return rng
default:
p.backup()
ns = handleSet(p)
rng.sets = append(rng.sets, ns)
}
}
}
|
package parser
import (
"fmt"
"misc/calc/ast"
"misc/calc/scanner"
"misc/calc/token"
"strconv"
)
func ParseExpr(expr string) ast.Node {
f := token.NewFile("", expr)
return ParseFile(f, expr)
}
func ParseFile(f *token.File, str string) *ast.File {
if f.Size() != len(str) {
fmt.Println("File size does not match string length.")
return nil
}
root := ast.NewFile(token.Pos(1), token.Pos(len(str)+1))
p := new(parser)
p.init(f, str)
p.topScope = root.Scope
p.curScope = root.Scope
for n := p.parse(); n != nil; n = p.parse() {
root.Nodes = append(root.Nodes, n)
p.next()
}
//if p.file.NumErrors() > 0 {
// p.file.PrintErrors()
// return nil
//}
if p.topScope != p.curScope {
panic("Imbalanced scope!")
}
return root
}
type parser struct {
file *token.File
scan *scanner.Scanner
topScope *ast.Scope
curScope *ast.Scope
tok token.Token
pos token.Pos
lit string
}
func (p *parser) init(file *token.File, expr string) {
p.file = file
p.scan = new(scanner.Scanner)
p.scan.Init(file, expr)
p.next()
}
type perror struct {
pos token.Pos
msg error
}
/*
var closeError = errors.New("Unexpected ')'")
var eofError = errors.New("Reached end of file")
var openError = errors.New("Opening '(' with no closing bracket.")
*/
func (p *parser) next() {
p.tok, p.pos, p.lit = p.scan.Scan()
p.pos += p.file.Base()
//fmt.Println("tok:", p.tok)
//fmt.Println("pos:", p.pos)
//fmt.Println("lit:", p.lit)
}
func (p *parser) parse() ast.Node {
var n ast.Node = nil
switch p.tok {
case token.IDENT:
n = p.parseIdentifier()
case token.NUMBER:
n = p.parseNumber()
case token.LPAREN:
n = p.parseExpression()
case token.COMMENT:
// consume comment and move on
p.next()
return p.parse()
case token.EOF:
return nil
default:
p.file.AddError(p.pos, "Unexpected token outside of expression: ", p.lit)
return nil
}
return n
}
func (p *parser) parseComparisonExpression(lp token.Pos) *ast.CompExpr {
ce := new(ast.CompExpr)
ce.LParen = lp
ce.CompLit = p.lit
p.next()
ce.A = p.parseSubExpression()
ce.B = p.parseSubExpression()
if ce.A == nil || ce.B == nil { // doesn't seem right...
p.file.AddError(p.pos, "Some kind of conditional error")
}
//p.expect(token.RPAREN)
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected ')', got:", p.lit)
return nil
}
ce.RParen = p.pos
return ce
}
func (p *parser) parseDefineExpression(lparen token.Pos) *ast.DefineExpr {
d := new(ast.DefineExpr)
d.LParen = lparen
d.Args = make([]string, 0) // TODO: remove?
tmp := p.curScope
d.Scope = ast.NewScope(p.curScope)
p.curScope = d.Scope
d.Impl = make([]ast.Node, 0)
p.next()
switch p.tok {
case token.LPAREN:
e := p.parseIdentifierList()
l := e.Nodes
d.Name = l[0].(*ast.Identifier).Lit
l = l[1:]
for _, v := range l {
d.Args = append(d.Args, v.(*ast.Identifier).Lit) //TODO: remove?
d.Scope.Insert(v.(*ast.Identifier).Lit, nil)
p.curScope.Insert(v.(*ast.Identifier).Lit, d)
}
case token.IDENT:
d.Name = p.parseIdentifier().Lit
p.next()
default:
p.file.AddError(p.pos, "Expected identifier(s) but got: ", p.lit)
return nil
}
tmp.Insert(d.Name, d)
for p.tok != token.RPAREN {
if p.tok != token.LPAREN {
p.file.AddError(p.pos, "Expected expression but got: ", p.lit)
return nil
}
d.Impl = append(d.Impl, p.parseExpression())
p.next()
}
if len(d.Impl) < 1 {
p.file.AddError(p.pos, "Expected list of expressions but got: ", p.lit)
return nil
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected closing paren but got: ", p.lit)
return nil
}
p.curScope = tmp
return d
}
func (p *parser) parseExpression() ast.Node {
lparen := p.pos
p.next()
switch p.tok {
case token.LPAREN:
p.file.AddError(p.pos, "Parse: First element of an expression may not "+
"be another expression!")
return nil
case token.RPAREN:
p.file.AddError(p.pos, "Parse: Empty expression not allowed.")
return nil
case token.LT, token.LTE, token.GT, token.GTE, token.EQ, token.NEQ:
return p.parseComparisonExpression(lparen)
case token.ADD, token.SUB, token.MUL, token.DIV, token.MOD:
return p.parseMathExpression(lparen)
case token.DEFINE:
return p.parseDefineExpression(lparen)
case token.IDENT:
return p.parseUserExpression(lparen)
case token.IF:
return p.parseIfExpression(lparen)
case token.PRINT:
return p.parsePrintExpression(lparen)
case token.SET:
return p.parseSetExpression(lparen)
}
return nil
}
func (p *parser) parseIdentifier() *ast.Identifier {
return &ast.Identifier{p.pos, p.lit}
}
func (p *parser) parseIdentifierList() *ast.Expression {
e := new(ast.Expression)
e.LParen = p.pos
e.Nodes = make([]ast.Node, 0)
p.next()
for p.tok == token.IDENT {
e.Nodes = append(e.Nodes, p.parseIdentifier())
p.next()
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected identifier or rparen, got: ", p.lit)
return nil
}
e.RParen = p.pos
p.next()
return e
}
func (p *parser) parseIfExpression(lparen token.Pos) *ast.IfExpr {
ie := new(ast.IfExpr)
ie.LParen, ie.Else = lparen, nil
p.next()
ie.Comp = p.parseSubExpression()
ie.Then = p.parseSubExpression()
if p.tok == token.RPAREN {
ie.Else = nil
ie.RParen = p.pos
return ie
}
ie.Else = p.parseSubExpression()
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected closing paren, got: ", p.lit)
return nil
}
ie.RParen = p.pos
if ie.Comp == nil || ie.Then == nil {
return nil
}
return ie
}
func (p *parser) parseMathExpression(lp token.Pos) *ast.MathExpr {
me := new(ast.MathExpr)
me.OpLit = p.lit
p.next()
for p.tok != token.RPAREN && p.tok != token.EOF {
me.ExprList = append(me.ExprList, p.parseSubExpression())
}
if len(me.ExprList) < 2 {
p.file.AddError(p.pos, "Math expressions must have at least 2 arguments")
return nil
}
//me.ExprList = p.parseExpressionList()
me.RParen = p.pos
return me
}
func (p *parser) parseNumber() *ast.Number {
i, err := strconv.ParseInt(p.lit, 0, 64)
if err != nil {
p.file.AddError(p.pos, "Parse:", err)
}
return &ast.Number{p.pos, p.lit, int(i)}
}
func (p *parser) parsePrintExpression(lparen token.Pos) *ast.PrintExpr {
pe := new(ast.PrintExpr)
pe.LParen = lparen
pe.Nodes = make([]ast.Node, 0)
p.next()
for p.tok != token.RPAREN {
pe.Nodes = append(pe.Nodes, p.parseSubExpression())
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Unknown token:", p.lit, "Expected: ')'")
}
pe.RParen = p.pos
return pe
}
func (p *parser) parseSetExpression(lparen token.Pos) *ast.SetExpr {
se := new(ast.SetExpr)
se.LParen = lparen
// eventually expand this for multiple assignment
p.next()
if p.tok != token.IDENT {
p.file.AddError(p.pos, "First argument to set must be an identifier")
return nil
}
se.Name = p.parseIdentifier().Lit
p.next()
se.Value = p.parseSubExpression()
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Unknown token:", p.lit, "Expected: ')'")
}
se.RParen = p.pos
p.curScope.Insert(se.Name, se)
return se
}
func (p *parser) parseSubExpression() ast.Node {
for p.tok == token.COMMENT {
p.next()
}
var n ast.Node
switch p.tok {
case token.IDENT:
i := p.parseIdentifier()
if p.curScope.Lookup(i.Lit) == nil {
p.file.AddError(p.pos, "Undeclared identifier - ", i.Lit)
p.next()
return nil
}
n = i
case token.LPAREN:
n = p.parseExpression()
case token.NUMBER:
n = p.parseNumber()
default:
p.file.AddError(p.pos, "Unexpected token: ", p.lit)
}
p.next()
return n
}
func (p *parser) parseUserExpression(lp token.Pos) *ast.UserExpr {
if p.curScope.Lookup(p.lit) == nil {
p.file.AddError(p.pos, "Undeclared variable: ", p.lit)
return nil
}
ue := new(ast.UserExpr)
ue.Name = p.lit
p.next()
for p.tok != token.RPAREN {
e := p.parseSubExpression()
if e != nil {
ue.Nodes = append(ue.Nodes, e)
}
}
ue.RParen = p.pos
return ue
}
Fix identifier error messages
package parser
import (
"fmt"
"misc/calc/ast"
"misc/calc/scanner"
"misc/calc/token"
"strconv"
)
func ParseExpr(expr string) ast.Node {
f := token.NewFile("", expr)
return ParseFile(f, expr)
}
func ParseFile(f *token.File, str string) *ast.File {
if f.Size() != len(str) {
fmt.Println("File size does not match string length.")
return nil
}
root := ast.NewFile(token.Pos(1), token.Pos(len(str)+1))
p := new(parser)
p.init(f, str)
p.topScope = root.Scope
p.curScope = root.Scope
for n := p.parse(); n != nil; n = p.parse() {
root.Nodes = append(root.Nodes, n)
p.next()
}
//if p.file.NumErrors() > 0 {
// p.file.PrintErrors()
// return nil
//}
if p.topScope != p.curScope {
panic("Imbalanced scope!")
}
return root
}
type parser struct {
file *token.File
scan *scanner.Scanner
topScope *ast.Scope
curScope *ast.Scope
tok token.Token
pos token.Pos
lit string
}
func (p *parser) init(file *token.File, expr string) {
p.file = file
p.scan = new(scanner.Scanner)
p.scan.Init(file, expr)
p.next()
}
type perror struct {
pos token.Pos
msg error
}
/*
var closeError = errors.New("Unexpected ')'")
var eofError = errors.New("Reached end of file")
var openError = errors.New("Opening '(' with no closing bracket.")
*/
func (p *parser) next() {
p.tok, p.pos, p.lit = p.scan.Scan()
p.pos += p.file.Base()
//fmt.Println("tok:", p.tok)
//fmt.Println("pos:", p.pos)
//fmt.Println("lit:", p.lit)
}
func (p *parser) parse() ast.Node {
var n ast.Node = nil
switch p.tok {
case token.IDENT:
n = p.parseIdentifier()
case token.NUMBER:
n = p.parseNumber()
case token.LPAREN:
n = p.parseExpression()
case token.COMMENT:
// consume comment and move on
p.next()
return p.parse()
case token.EOF:
return nil
default:
p.file.AddError(p.pos, "Unexpected token outside of expression: ", p.lit)
return nil
}
return n
}
func (p *parser) parseComparisonExpression(lp token.Pos) *ast.CompExpr {
ce := new(ast.CompExpr)
ce.LParen = lp
ce.CompLit = p.lit
p.next()
ce.A = p.parseSubExpression()
ce.B = p.parseSubExpression()
if ce.A == nil || ce.B == nil { // doesn't seem right...
p.file.AddError(p.pos, "Some kind of conditional error")
}
//p.expect(token.RPAREN)
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected ')', got:", p.lit)
return nil
}
ce.RParen = p.pos
return ce
}
func (p *parser) parseDefineExpression(lparen token.Pos) *ast.DefineExpr {
d := new(ast.DefineExpr)
d.LParen = lparen
d.Args = make([]string, 0) // TODO: remove?
tmp := p.curScope
d.Scope = ast.NewScope(p.curScope)
p.curScope = d.Scope
d.Impl = make([]ast.Node, 0)
p.next()
switch p.tok {
case token.LPAREN:
e := p.parseIdentifierList()
l := e.Nodes
d.Name = l[0].(*ast.Identifier).Lit
l = l[1:]
for _, v := range l {
d.Args = append(d.Args, v.(*ast.Identifier).Lit) //TODO: remove?
d.Scope.Insert(v.(*ast.Identifier).Lit, nil)
p.curScope.Insert(v.(*ast.Identifier).Lit, d)
}
case token.IDENT:
d.Name = p.parseIdentifier().Lit
p.next()
default:
p.file.AddError(p.pos, "Expected identifier(s) but got: ", p.lit)
return nil
}
tmp.Insert(d.Name, d)
for p.tok != token.RPAREN {
if p.tok != token.LPAREN {
p.file.AddError(p.pos, "Expected expression but got: ", p.lit)
return nil
}
d.Impl = append(d.Impl, p.parseExpression())
p.next()
}
if len(d.Impl) < 1 {
p.file.AddError(p.pos, "Expected list of expressions but got: ", p.lit)
return nil
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected closing paren but got: ", p.lit)
return nil
}
p.curScope = tmp
return d
}
func (p *parser) parseExpression() ast.Node {
lparen := p.pos
p.next()
switch p.tok {
case token.LPAREN:
p.file.AddError(p.pos, "Parse: First element of an expression may not "+
"be another expression!")
return nil
case token.RPAREN:
p.file.AddError(p.pos, "Parse: Empty expression not allowed.")
return nil
case token.LT, token.LTE, token.GT, token.GTE, token.EQ, token.NEQ:
return p.parseComparisonExpression(lparen)
case token.ADD, token.SUB, token.MUL, token.DIV, token.MOD:
return p.parseMathExpression(lparen)
case token.DEFINE:
return p.parseDefineExpression(lparen)
case token.IDENT:
return p.parseUserExpression(lparen)
case token.IF:
return p.parseIfExpression(lparen)
case token.PRINT:
return p.parsePrintExpression(lparen)
case token.SET:
return p.parseSetExpression(lparen)
}
return nil
}
func (p *parser) parseIdentifier() *ast.Identifier {
return &ast.Identifier{p.pos, p.lit}
}
func (p *parser) parseIdentifierList() *ast.Expression {
e := new(ast.Expression)
e.LParen = p.pos
e.Nodes = make([]ast.Node, 0)
p.next()
for p.tok == token.IDENT {
e.Nodes = append(e.Nodes, p.parseIdentifier())
p.next()
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected identifier or rparen, got: ", p.lit)
return nil
}
e.RParen = p.pos
p.next()
return e
}
func (p *parser) parseIfExpression(lparen token.Pos) *ast.IfExpr {
ie := new(ast.IfExpr)
ie.LParen, ie.Else = lparen, nil
p.next()
ie.Comp = p.parseSubExpression()
ie.Then = p.parseSubExpression()
if p.tok == token.RPAREN {
ie.Else = nil
ie.RParen = p.pos
return ie
}
ie.Else = p.parseSubExpression()
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Expected closing paren, got: ", p.lit)
return nil
}
ie.RParen = p.pos
if ie.Comp == nil || ie.Then == nil {
return nil
}
return ie
}
func (p *parser) parseMathExpression(lp token.Pos) *ast.MathExpr {
me := new(ast.MathExpr)
me.OpLit = p.lit
p.next()
for p.tok != token.RPAREN && p.tok != token.EOF {
me.ExprList = append(me.ExprList, p.parseSubExpression())
}
if len(me.ExprList) < 2 {
p.file.AddError(p.pos, "Math expressions must have at least 2 arguments")
return nil
}
//me.ExprList = p.parseExpressionList()
me.RParen = p.pos
return me
}
func (p *parser) parseNumber() *ast.Number {
i, err := strconv.ParseInt(p.lit, 0, 64)
if err != nil {
p.file.AddError(p.pos, "Parse:", err)
}
return &ast.Number{p.pos, p.lit, int(i)}
}
func (p *parser) parsePrintExpression(lparen token.Pos) *ast.PrintExpr {
pe := new(ast.PrintExpr)
pe.LParen = lparen
pe.Nodes = make([]ast.Node, 0)
p.next()
for p.tok != token.RPAREN {
pe.Nodes = append(pe.Nodes, p.parseSubExpression())
}
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Unknown token:", p.lit, "Expected: ')'")
}
pe.RParen = p.pos
return pe
}
func (p *parser) parseSetExpression(lparen token.Pos) *ast.SetExpr {
se := new(ast.SetExpr)
se.LParen = lparen
// eventually expand this for multiple assignment
p.next()
if p.tok != token.IDENT {
p.file.AddError(p.pos, "First argument to set must be an identifier")
return nil
}
se.Name = p.parseIdentifier().Lit
p.next()
se.Value = p.parseSubExpression()
if p.tok != token.RPAREN {
p.file.AddError(p.pos, "Unknown token:", p.lit, "Expected: ')'")
}
se.RParen = p.pos
p.curScope.Insert(se.Name, se)
return se
}
func (p *parser) parseSubExpression() ast.Node {
for p.tok == token.COMMENT {
p.next()
}
var n ast.Node
switch p.tok {
case token.IDENT:
i := p.parseIdentifier()
if p.curScope.Lookup(i.Lit) == nil {
p.file.AddError(p.pos, "Undeclared identifier: ", i.Lit)
p.next()
return nil
}
n = i
case token.LPAREN:
n = p.parseExpression()
case token.NUMBER:
n = p.parseNumber()
default:
p.file.AddError(p.pos, "Unexpected token: ", p.lit)
}
p.next()
return n
}
func (p *parser) parseUserExpression(lp token.Pos) *ast.UserExpr {
if p.curScope.Lookup(p.lit) == nil {
p.file.AddError(p.pos, "Undeclared identifier: ", p.lit)
return nil
}
ue := new(ast.UserExpr)
ue.Name = p.lit
p.next()
for p.tok != token.RPAREN {
e := p.parseSubExpression()
if e != nil {
ue.Nodes = append(ue.Nodes, e)
}
}
ue.RParen = p.pos
return ue
}
|
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package parser
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"sync"
"github.com/mvdan/sh/ast"
"github.com/mvdan/sh/token"
)
// Mode controls the parser behaviour via a set of flags.
type Mode uint
const (
ParseComments Mode = 1 << iota // add comments to the AST
PosixConformant // match the POSIX standard where it differs from bash
)
var parserFree = sync.Pool{
New: func() interface{} {
return &parser{helperBuf: new(bytes.Buffer)}
},
}
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
func Parse(src []byte, name string, mode Mode) (*ast.File, error) {
p := parserFree.Get().(*parser)
p.reset()
alloc := &struct {
f ast.File
l [16]int
}{}
p.f = &alloc.f
p.f.Name = name
p.f.Lines = alloc.l[:1]
p.src, p.mode = src, mode
p.next()
p.f.Stmts = p.stmts()
parserFree.Put(p)
return p.f, p.err
}
type parser struct {
src []byte
f *ast.File
mode Mode
spaced, newLine bool
stopNewline, forbidNested bool
err error
tok token.Token
val string
pos token.Pos
npos int
quote quoteState
// list of pending heredoc bodies
heredocs []*ast.Redirect
hdocStop []byte
helperBuf *bytes.Buffer
litBatch []ast.Lit
wpsBatch []ast.WordPart
stmtBatch []ast.Stmt
}
func (p *parser) lit(pos token.Pos, val string) *ast.Lit {
if len(p.litBatch) == 0 {
p.litBatch = make([]ast.Lit, 32)
}
l := &p.litBatch[0]
l.ValuePos = pos
l.Value = val
p.litBatch = p.litBatch[1:]
return l
}
func (p *parser) wps(wp ast.WordPart) []ast.WordPart {
if len(p.wpsBatch) == 0 {
p.wpsBatch = make([]ast.WordPart, 32)
}
wps := p.wpsBatch[:1]
p.wpsBatch = p.wpsBatch[1:]
wps[0] = wp
return wps
}
func (p *parser) stmt(pos token.Pos) *ast.Stmt {
if len(p.stmtBatch) == 0 {
p.stmtBatch = make([]ast.Stmt, 16)
}
s := &p.stmtBatch[0]
s.Position = pos
p.stmtBatch = p.stmtBatch[1:]
return s
}
type quoteState int
const (
noState quoteState = 1 << iota
subCmd
subCmdBckquo
sglQuotes
dblQuotes
hdocBody
hdocBodyTabs
arithmExpr
arithmExprCmd
arithmExprBrack
testRegexp
switchCase
paramExpName
paramExpInd
paramExpRepl
paramExpExp
allRegTokens = noState | subCmd | subCmdBckquo | switchCase
allArithmExpr = arithmExpr | arithmExprCmd | arithmExprBrack
allRbrack = arithmExprBrack | paramExpInd
allHdoc = hdocBody | hdocBodyTabs
)
func (p *parser) bash() bool { return p.mode&PosixConformant == 0 }
func (p *parser) reset() {
p.spaced, p.newLine = false, false
p.stopNewline, p.forbidNested = false, false
p.err = nil
p.npos = 0
p.tok, p.quote = token.ILLEGAL, noState
p.heredocs = p.heredocs[:]
}
func (p *parser) unquotedWordBytes(w ast.Word) ([]byte, bool) {
p.helperBuf.Reset()
didUnquote := false
for _, wp := range w.Parts {
if p.unquotedWordPart(p.helperBuf, wp) {
didUnquote = true
}
}
return p.helperBuf.Bytes(), didUnquote
}
func (p *parser) unquotedWordPart(b *bytes.Buffer, wp ast.WordPart) bool {
switch x := wp.(type) {
case *ast.Lit:
if x.Value[0] == '\\' {
b.WriteString(x.Value[1:])
return true
}
b.WriteString(x.Value)
return false
case *ast.SglQuoted:
b.WriteString(x.Value)
return true
case *ast.Quoted:
for _, wp2 := range x.Parts {
p.unquotedWordPart(b, wp2)
}
return true
default:
// catch-all for unusual cases such as ParamExp
b.Write(p.src[wp.Pos()-1 : wp.End()-1])
return false
}
}
func (p *parser) doHeredocs() {
p.tok = token.ILLEGAL
old := p.quote
hdocs := p.heredocs
p.heredocs = p.heredocs[:0]
for i, r := range hdocs {
if r.Op == token.DHEREDOC {
p.quote = hdocBodyTabs
} else {
p.quote = hdocBody
}
var quoted bool
p.hdocStop, quoted = p.unquotedWordBytes(r.Word)
if i > 0 && p.npos < len(p.src) && p.src[p.npos] == '\n' {
p.npos++
p.f.Lines = append(p.f.Lines, p.npos)
}
if !quoted {
p.next()
r.Hdoc = ast.Word{Parts: p.wordParts()}
continue
}
r.Hdoc = p.hdocLitWord()
}
p.quote = old
}
func (p *parser) got(tok token.Token) bool {
if p.tok == tok {
p.next()
return true
}
return false
}
func (p *parser) gotRsrv(val string) bool {
if p.tok == token.LITWORD && p.val == val {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok token.Token) bool {
if !p.newLine && p.tok == tok {
p.next()
return true
}
return false
}
func readableStr(s string) string {
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(pos token.Pos, left, right string) {
leftStr := readableStr(left)
p.posErr(pos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) follow(lpos token.Pos, left string, tok token.Token) token.Pos {
pos := p.pos
if !p.got(tok) {
p.followErr(lpos, left, tok.String())
}
return pos
}
func (p *parser) followRsrv(lpos token.Pos, left, val string) token.Pos {
pos := p.pos
if !p.gotRsrv(val) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, val))
}
return pos
}
func (p *parser) followStmts(left string, lpos token.Pos, stops ...string) []*ast.Stmt {
if p.gotSameLine(token.SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWordTok(tok token.Token, pos token.Pos) ast.Word {
w := p.word()
if w.Parts == nil {
p.followErr(pos, tok.String(), "a word")
}
return w
}
func (p *parser) followWord(s string, pos token.Pos) ast.Word {
w := p.word()
if w.Parts == nil {
p.followErr(pos, s, "a word")
}
return w
}
func (p *parser) stmtEnd(n ast.Node, start, end string) token.Pos {
pos := p.pos
if !p.gotRsrv(end) {
p.posErr(n.Pos(), `%s statement must end with %q`, start, end)
}
return pos
}
func (p *parser) quoteErr(lpos token.Pos, quote token.Token) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, quote)
}
func (p *parser) matchingErr(lpos token.Pos, left, right token.Token) {
p.posErr(lpos, `reached %s without matching %s with %s`, p.tok, left, right)
}
func (p *parser) matched(lpos token.Pos, left, right token.Token) token.Pos {
pos := p.pos
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return pos
}
func (p *parser) errPass(err error) {
if p.err == nil {
if p.quote == arithmExpr {
if err == io.EOF {
p.tok = token.EOF
} else {
p.err = err
}
return
}
if err != io.EOF {
p.err = err
}
p.tok = token.EOF
}
}
// ParseError represents an error found when parsing a source file.
type ParseError struct {
token.Position
Filename, Text string
}
func (e *ParseError) Error() string {
prefix := ""
if e.Filename != "" {
prefix = e.Filename + ":"
}
return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text)
}
func (p *parser) posErr(pos token.Pos, format string, a ...interface{}) {
p.errPass(&ParseError{
Position: p.f.Position(pos),
Filename: p.f.Name,
Text: fmt.Sprintf(format, a...),
})
}
func (p *parser) curErr(format string, a ...interface{}) {
p.posErr(p.pos, format, a...)
}
func (p *parser) stmts(stops ...string) (sts []*ast.Stmt) {
p.got(token.STOPPED)
if p.forbidNested {
p.curErr("nested statements not allowed in this word")
}
q := p.quote
gotEnd := true
for p.tok != token.EOF {
switch p.tok {
case token.LITWORD:
for _, stop := range stops {
if p.val == stop {
return
}
}
case token.RPAREN:
if q == subCmd {
return
}
case token.BQUOTE:
if q == subCmdBckquo {
return
}
case token.DSEMICOLON, token.SEMIFALL, token.DSEMIFALL:
if q == switchCase {
return
}
p.curErr("%s can only be used in a case clause", p.tok)
}
if !p.newLine && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
if p.tok == token.EOF {
break
}
if s, end := p.getStmt(true); s == nil {
p.invalidStmtStart()
} else {
sts = append(sts, s)
gotEnd = end
}
p.got(token.STOPPED)
}
return
}
func (p *parser) invalidStmtStart() {
switch p.tok {
case token.SEMICOLON, token.AND, token.OR, token.LAND, token.LOR:
p.curErr("%s can only immediately follow a statement", p.tok)
case token.RPAREN:
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) word() ast.Word {
if p.tok == token.LITWORD {
w := ast.Word{Parts: p.wps(p.lit(p.pos, p.val))}
p.next()
return w
}
return ast.Word{Parts: p.wordParts()}
}
func (p *parser) gotLit(l *ast.Lit) bool {
l.ValuePos = p.pos
if p.tok == token.LIT || p.tok == token.LITWORD {
l.Value = p.val
p.next()
return true
}
return false
}
func (p *parser) wordParts() (wps []ast.WordPart) {
for {
lastLit := p.tok == token.LIT
n := p.wordPart()
if n == nil {
return
}
wps = append(wps, n)
if p.spaced {
return
}
if p.quote&allHdoc != 0 && p.hdocStop == nil {
// TODO: is this is a hack around a bug?
if p.tok == token.LIT && !lastLit {
wps = append(wps, p.lit(p.pos, p.val))
}
return
}
}
}
func (p *parser) wordPart() ast.WordPart {
switch p.tok {
case token.LIT, token.LITWORD:
l := p.lit(p.pos, p.val)
p.next()
return l
case token.DOLLBR:
return p.paramExp()
case token.DOLLDP, token.DOLLBK:
left := p.tok
ar := &ast.ArithmExp{Token: p.tok, Left: p.pos}
oldQuote := p.quote
if ar.Token == token.DOLLBK {
// treat deprecated $[ as $((
ar.Token = token.DOLLDP
p.quote = arithmExprBrack
} else {
p.quote = arithmExpr
}
if p.err != nil {
return nil
}
p.next()
ar.X = p.arithmExpr(ar.Token, ar.Left, 0, false)
hasEnd := p.peekArithmEnd(p.tok)
oldTok := p.tok
oldErr := p.err
if p.quote == arithmExpr && !hasEnd {
// TODO: this will probably break if there is
// extra lingering state, such as pending
// heredocs
p.quote = oldQuote
p.err = nil
p.tok, p.pos = token.DOLLPR, ar.Left
p.npos = int(ar.Left) + 1
wp := p.wordPart()
if p.err != nil {
if oldErr != nil {
// if retrying fails, report the
// arithmetic expr error as that's got
// higher precedence
p.err = oldErr
} else if !hasEnd {
// if retrying fails and the
// arithmetic expression wasn't
// closed, report that properly
p.err = nil
p.tok = oldTok
goto arithmClose
}
}
return wp
}
if oldErr != nil {
// not retrying, so recover error
p.err = oldErr
p.tok = token.EOF
}
arithmClose:
if left == token.DOLLBK {
if p.tok != token.RBRACK {
p.matchingErr(ar.Left, left, token.RBRACK)
}
p.quote = oldQuote
ar.Right = p.pos
p.next()
} else {
ar.Right = p.arithmEnd(left, ar.Left, oldQuote)
}
return ar
case token.DOLLPR:
cs := &ast.CmdSubst{Left: p.pos}
old := p.quote
p.quote = subCmd
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.matched(cs.Left, token.LPAREN, token.RPAREN)
return cs
case token.DOLLAR:
var b byte
if p.npos >= len(p.src) {
p.errPass(io.EOF)
} else {
b = p.src[p.npos]
}
if p.tok == token.EOF || wordBreak(b) || b == '"' || b == '\'' || b == '`' {
l := p.lit(p.pos, "$")
p.next()
return l
}
pe := &ast.ParamExp{Dollar: p.pos, Short: true}
if b == '#' || b == '$' || b == '?' {
p.npos++
p.pos++
p.tok, p.val = token.LIT, string(b)
} else {
old := p.quote
if p.quote&allHdoc != 0 {
p.quote = noState
}
p.next()
p.quote = old
}
p.gotLit(&pe.Param)
return pe
case token.CMDIN, token.CMDOUT:
ps := &ast.ProcSubst{Op: p.tok, OpPos: p.pos}
old := p.quote
p.quote = subCmd
p.next()
ps.Stmts = p.stmts()
p.quote = old
ps.Rparen = p.matched(ps.OpPos, ps.Op, token.RPAREN)
return ps
case token.SQUOTE:
if p.quote == sglQuotes {
return nil
}
sq := &ast.SglQuoted{Quote: p.pos}
bs, found := p.readUntil('\'')
rem := bs
for {
i := bytes.IndexByte(rem, '\n')
if i < 0 {
p.npos += len(rem)
break
}
p.npos += i + 1
p.f.Lines = append(p.f.Lines, p.npos)
rem = rem[i+1:]
}
p.npos++
if !found {
p.posErr(sq.Pos(), `reached EOF without closing quote %s`, token.SQUOTE)
}
sq.Value = string(bs)
p.next()
return sq
case token.DQUOTE:
if p.quote == dblQuotes {
return nil
}
fallthrough
case token.DOLLSQ, token.DOLLDQ:
q := &ast.Quoted{Quote: p.tok, QuotePos: p.pos}
stop := q.Quote
old := p.quote
switch q.Quote {
case token.DOLLSQ:
stop = token.SQUOTE
p.quote = sglQuotes
case token.DOLLDQ:
stop = token.DQUOTE
p.quote = dblQuotes
case token.SQUOTE:
p.quote = sglQuotes
case token.DQUOTE:
p.quote = dblQuotes
}
p.next()
q.Parts = p.wordParts()
p.quote = old
if !p.got(stop) {
p.quoteErr(q.Pos(), stop)
}
return q
case token.BQUOTE:
if p.quote == subCmdBckquo {
return nil
}
cs := &ast.CmdSubst{Backquotes: true, Left: p.pos}
old := p.quote
p.quote = subCmdBckquo
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.pos
if !p.got(token.BQUOTE) {
p.quoteErr(cs.Pos(), token.BQUOTE)
}
return cs
}
return nil
}
func arithmOpLevel(tok token.Token) int {
switch tok {
case token.COMMA:
return 0
case token.ADDASSGN, token.SUBASSGN, token.MULASSGN, token.QUOASSGN,
token.REMASSGN, token.ANDASSGN, token.ORASSGN, token.XORASSGN,
token.SHLASSGN, token.SHRASSGN:
return 1
case token.ASSIGN:
return 2
case token.QUEST, token.COLON:
return 3
case token.LOR:
return 4
case token.LAND:
return 5
case token.AND, token.OR, token.XOR:
return 5
case token.EQL, token.NEQ:
return 6
case token.LSS, token.GTR, token.LEQ, token.GEQ:
return 7
case token.SHL, token.SHR:
return 8
case token.ADD, token.SUB:
return 9
case token.MUL, token.QUO, token.REM:
return 10
case token.POW:
return 11
}
return -1
}
func (p *parser) arithmExpr(ftok token.Token, fpos token.Pos, level int, compact bool) ast.ArithmExpr {
if p.tok == token.EOF || p.peekArithmEnd(p.tok) {
return nil
}
var left ast.ArithmExpr
if level > 11 {
left = p.arithmExprBase(ftok, fpos, compact)
} else {
left = p.arithmExpr(ftok, fpos, level+1, compact)
}
if compact && p.spaced {
return left
}
newLevel := arithmOpLevel(p.tok)
if p.quote == arithmExpr && p.tok == token.SEMICOLON {
p.curErr("not a valid arithmetic operator: %v", p.tok)
newLevel = 0
} else if p.tok == token.LIT || p.tok == token.LITWORD {
p.curErr("not a valid arithmetic operator: %s", p.val)
newLevel = 0
}
if newLevel < 0 || newLevel < level {
return left
}
b := &ast.BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
if p.next(); compact && p.spaced {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
if b.Y = p.arithmExpr(b.Op, b.OpPos, newLevel, compact); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func (p *parser) arithmExprBase(ftok token.Token, fpos token.Pos, compact bool) ast.ArithmExpr {
if p.tok == token.INC || p.tok == token.DEC || p.tok == token.NOT {
pre := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
pre.X = p.arithmExprBase(pre.Op, pre.OpPos, compact)
return pre
}
var x ast.ArithmExpr
switch p.tok {
case token.LPAREN:
pe := &ast.ParenExpr{Lparen: p.pos}
p.next()
if pe.X = p.arithmExpr(token.LPAREN, pe.Lparen, 0, false); pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matched(pe.Lparen, token.LPAREN, token.RPAREN)
x = pe
case token.ADD, token.SUB:
ue := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
if p.next(); compact && p.spaced {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
if ue.X = p.arithmExpr(ue.Op, ue.OpPos, 0, compact); ue.X == nil {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
x = ue
default:
w := p.followWordTok(ftok, fpos)
x = &w
}
if compact && p.spaced {
return x
}
if p.tok == token.INC || p.tok == token.DEC {
u := &ast.UnaryExpr{
Post: true,
OpPos: p.pos,
Op: p.tok,
X: x,
}
p.next()
return u
}
return x
}
func (p *parser) gotParamLit(l *ast.Lit) bool {
l.ValuePos = p.pos
switch p.tok {
case token.LIT, token.LITWORD:
l.Value = p.val
case token.DOLLAR:
l.Value = "$"
case token.QUEST:
l.Value = "?"
default:
return false
}
p.next()
return true
}
func (p *parser) paramExp() *ast.ParamExp {
pe := &ast.ParamExp{Dollar: p.pos}
old := p.quote
p.quote = paramExpName
p.next()
pe.Length = p.got(token.HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.tok == token.RBRACE {
p.quote = old
p.next()
return pe
}
if p.tok == token.LBRACK {
lpos := p.pos
p.quote = paramExpInd
p.next()
pe.Ind = &ast.Index{Word: p.word()}
p.quote = paramExpName
p.matched(lpos, token.LBRACK, token.RBRACK)
}
if p.tok == token.RBRACE {
p.quote = old
p.next()
return pe
}
if pe.Length {
p.curErr(`can only get length of a simple parameter`)
}
if p.tok == token.QUO || p.tok == token.DQUO {
pe.Repl = &ast.Replace{All: p.tok == token.DQUO}
p.quote = paramExpRepl
p.next()
pe.Repl.Orig = p.word()
if p.tok == token.QUO {
p.quote = paramExpExp
p.next()
pe.Repl.With = p.word()
}
} else {
pe.Exp = &ast.Expansion{Op: p.tok}
p.quote = paramExpExp
p.next()
pe.Exp.Word = p.word()
}
p.quote = old
p.matched(pe.Dollar, token.DOLLBR, token.RBRACE)
return pe
}
func (p *parser) peekArithmEnd(tok token.Token) bool {
return tok == token.RPAREN && p.npos < len(p.src) && p.src[p.npos] == ')'
}
func (p *parser) arithmEnd(ltok token.Token, lpos token.Pos, old quoteState) token.Pos {
if p.peekArithmEnd(p.tok) {
p.npos++
} else {
p.matchingErr(lpos, ltok, token.DRPAREN)
}
p.quote = old
pos := p.pos
p.next()
return pos
}
func stopToken(tok token.Token) bool {
return tok == token.EOF || tok == token.SEMICOLON || tok == token.AND ||
tok == token.OR || tok == token.LAND || tok == token.LOR ||
tok == token.PIPEALL || tok == token.DSEMICOLON ||
tok == token.SEMIFALL || tok == token.DSEMIFALL
}
func validIdent(s string) bool {
for i, c := range s {
switch {
case 'a' <= c && c <= 'z':
case 'A' <= c && c <= 'Z':
case c == '_':
case i > 0 && '0' <= c && c <= '9':
default:
return false
}
}
return true
}
func (p *parser) getAssign() (*ast.Assign, bool) {
i := strings.Index(p.val, "=")
if i <= 0 {
return nil, false
}
if p.val[i-1] == '+' {
i--
}
if !validIdent(p.val[:i]) {
return nil, false
}
as := &ast.Assign{}
as.Name = p.lit(p.pos, p.val[:i])
if p.val[i] == '+' {
as.Append = true
i++
}
start := p.lit(p.pos+1, p.val[i+1:])
if start.Value != "" {
start.ValuePos += token.Pos(i)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.tok == token.LPAREN {
ae := &ast.ArrayExpr{Lparen: p.pos}
p.next()
for p.tok != token.EOF && p.tok != token.RPAREN {
if w := p.word(); w.Parts == nil {
p.curErr("array elements must be words")
} else {
ae.List = append(ae.List, w)
}
}
ae.Rparen = p.matched(ae.Lparen, token.LPAREN, token.RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.newLine && !stopToken(p.tok) {
if w := p.word(); start.Value == "" {
as.Value = w
} else {
as.Value.Parts = append(as.Value.Parts, w.Parts...)
}
}
return as, true
}
func (p *parser) peekRedir() bool {
switch p.tok {
case token.LITWORD:
return p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<')
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC, token.WHEREDOC,
token.RDRALL, token.APPALL:
return true
}
return false
}
func (p *parser) doRedirect(s *ast.Stmt) {
r := &ast.Redirect{}
var l ast.Lit
if p.gotLit(&l) {
r.N = &l
}
r.Op, r.OpPos = p.tok, p.pos
p.next()
switch r.Op {
case token.SHL, token.DHEREDOC:
p.stopNewline = true
p.forbidNested = true
if p.newLine {
p.curErr("heredoc stop word must be on the same line")
}
r.Word = p.followWordTok(r.Op, r.OpPos)
p.forbidNested = false
p.heredocs = append(p.heredocs, r)
p.got(token.STOPPED)
default:
if p.newLine {
p.curErr("redirect word must be on the same line")
}
r.Word = p.followWordTok(r.Op, r.OpPos)
}
s.Redirs = append(s.Redirs, r)
}
func (p *parser) getStmt(readEnd bool) (s *ast.Stmt, gotEnd bool) {
s = p.stmt(p.pos)
if p.gotRsrv("!") {
s.Negated = true
}
preLoop:
for {
switch p.tok {
case token.LIT, token.LITWORD:
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
} else {
break preLoop
}
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC,
token.WHEREDOC, token.RDRALL, token.APPALL:
p.doRedirect(s)
default:
break preLoop
}
switch {
case p.newLine, p.tok == token.EOF:
return
case p.tok == token.SEMICOLON:
if readEnd {
p.next()
gotEnd = true
}
return
}
}
if s = p.gotStmtPipe(s); s == nil {
return
}
switch p.tok {
case token.LAND, token.LOR:
b := &ast.BinaryCmd{OpPos: p.pos, Op: p.tok, X: s}
p.next()
p.got(token.STOPPED)
if b.Y, _ = p.getStmt(false); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
s = p.stmt(s.Position)
s.Cmd = b
if readEnd && p.gotSameLine(token.SEMICOLON) {
gotEnd = true
}
case token.AND:
p.next()
s.Background = true
gotEnd = true
case token.SEMICOLON:
if !p.newLine && readEnd {
p.next()
gotEnd = true
}
}
return
}
func (p *parser) gotStmtPipe(s *ast.Stmt) *ast.Stmt {
switch p.tok {
case token.LPAREN:
s.Cmd = p.subshell()
case token.DLPAREN:
s.Cmd = p.arithmExpCmd()
case token.LITWORD:
switch {
case p.val == "}":
p.curErr("%s can only be used to close a block", p.val)
case p.val == "{":
s.Cmd = p.block()
case p.val == "if":
s.Cmd = p.ifClause()
case p.val == "while":
s.Cmd = p.whileClause()
case p.val == "until":
s.Cmd = p.untilClause()
case p.val == "for":
s.Cmd = p.forClause()
case p.val == "case":
s.Cmd = p.caseClause()
case p.bash() && p.val == "[[":
s.Cmd = p.testClause()
case p.bash() && (p.val == "declare" || p.val == "local"):
s.Cmd = p.declClause()
case p.bash() && p.val == "eval":
s.Cmd = p.evalClause()
case p.bash() && p.val == "let":
s.Cmd = p.letClause()
case p.bash() && p.val == "function":
s.Cmd = p.bashFuncDecl()
default:
name := ast.Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(token.LPAREN) {
p.follow(name.ValuePos, "foo(", token.RPAREN)
s.Cmd = p.funcDecl(name, name.ValuePos)
} else {
s.Cmd = p.callExpr(s, ast.Word{
Parts: p.wps(&name),
})
}
}
case token.LIT, token.DOLLBR, token.DOLLDP, token.DOLLPR, token.DOLLAR,
token.CMDIN, token.CMDOUT, token.SQUOTE, token.DOLLSQ,
token.DQUOTE, token.DOLLDQ, token.BQUOTE, token.DOLLBK:
w := ast.Word{Parts: p.wordParts()}
if p.gotSameLine(token.LPAREN) && p.err == nil {
rawName := string(p.src[w.Pos()-1 : w.End()-1])
p.posErr(w.Pos(), "invalid func name: %q", rawName)
}
s.Cmd = p.callExpr(s, w)
}
for !p.newLine && p.peekRedir() {
p.doRedirect(s)
}
if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 {
return nil
}
if p.tok == token.OR || p.tok == token.PIPEALL {
b := &ast.BinaryCmd{OpPos: p.pos, Op: p.tok, X: s}
p.next()
p.got(token.STOPPED)
if b.Y = p.gotStmtPipe(p.stmt(p.pos)); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
s = p.stmt(s.Position)
s.Cmd = b
}
return s
}
func (p *parser) subshell() *ast.Subshell {
s := &ast.Subshell{Lparen: p.pos}
old := p.quote
p.quote = subCmd
p.next()
s.Stmts = p.stmts()
p.quote = old
s.Rparen = p.matched(s.Lparen, token.LPAREN, token.RPAREN)
return s
}
func (p *parser) arithmExpCmd() *ast.ArithmExp {
ar := &ast.ArithmExp{Token: p.tok, Left: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
ar.X = p.arithmExpr(ar.Token, ar.Left, 0, false)
ar.Right = p.arithmEnd(ar.Token, ar.Left, old)
return ar
}
func (p *parser) block() *ast.Block {
b := &ast.Block{Lbrace: p.pos}
p.next()
b.Stmts = p.stmts("}")
b.Rbrace = p.pos
if !p.gotRsrv("}") {
p.matchingErr(b.Lbrace, token.LBRACE, token.RBRACE)
}
return b
}
func (p *parser) ifClause() *ast.IfClause {
ic := &ast.IfClause{If: p.pos}
p.next()
ic.CondStmts = p.followStmts("if", ic.If, "then")
ic.Then = p.followRsrv(ic.If, "if <cond>", "then")
ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else")
elifPos := p.pos
for p.gotRsrv("elif") {
elf := &ast.Elif{Elif: elifPos}
elf.CondStmts = p.followStmts("elif", elf.Elif, "then")
elf.Then = p.followRsrv(elf.Elif, "elif <cond>", "then")
elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else")
ic.Elifs = append(ic.Elifs, elf)
elifPos = p.pos
}
if elsePos := p.pos; p.gotRsrv("else") {
ic.Else = elsePos
ic.ElseStmts = p.followStmts("else", ic.Else, "fi")
}
ic.Fi = p.stmtEnd(ic, "if", "fi")
return ic
}
func (p *parser) whileClause() *ast.WhileClause {
wc := &ast.WhileClause{While: p.pos}
p.next()
wc.CondStmts = p.followStmts("while", wc.While, "do")
wc.Do = p.followRsrv(wc.While, "while <cond>", "do")
wc.DoStmts = p.followStmts("do", wc.Do, "done")
wc.Done = p.stmtEnd(wc, "while", "done")
return wc
}
func (p *parser) untilClause() *ast.UntilClause {
uc := &ast.UntilClause{Until: p.pos}
p.next()
uc.CondStmts = p.followStmts("until", uc.Until, "do")
uc.Do = p.followRsrv(uc.Until, "until <cond>", "do")
uc.DoStmts = p.followStmts("do", uc.Do, "done")
uc.Done = p.stmtEnd(uc, "until", "done")
return uc
}
func (p *parser) forClause() *ast.ForClause {
fc := &ast.ForClause{For: p.pos}
p.next()
fc.Loop = p.loop(fc.For)
fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do")
fc.DoStmts = p.followStmts("do", fc.Do, "done")
fc.Done = p.stmtEnd(fc, "for", "done")
return fc
}
func (p *parser) loop(forPos token.Pos) ast.Loop {
if p.tok == token.DLPAREN {
cl := &ast.CStyleLoop{Lparen: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
cl.Init = p.arithmExpr(token.DLPAREN, cl.Lparen, 0, false)
scPos := p.pos
p.follow(p.pos, "expression", token.SEMICOLON)
cl.Cond = p.arithmExpr(token.SEMICOLON, scPos, 0, false)
scPos = p.pos
p.follow(p.pos, "expression", token.SEMICOLON)
cl.Post = p.arithmExpr(token.SEMICOLON, scPos, 0, false)
cl.Rparen = p.arithmEnd(token.DLPAREN, cl.Lparen, old)
p.gotSameLine(token.SEMICOLON)
return cl
}
wi := &ast.WordIter{}
if !p.gotLit(&wi.Name) {
p.followErr(forPos, "for", "a literal")
}
if p.gotRsrv("in") {
for !p.newLine && p.tok != token.EOF && p.tok != token.SEMICOLON {
if w := p.word(); w.Parts == nil {
p.curErr("word list can only contain words")
} else {
wi.List = append(wi.List, w)
}
}
p.gotSameLine(token.SEMICOLON)
} else if !p.newLine && !p.got(token.SEMICOLON) {
p.followErr(forPos, "for foo", `"in", ; or a newline`)
}
return wi
}
func (p *parser) caseClause() *ast.CaseClause {
cc := &ast.CaseClause{Case: p.pos}
p.next()
cc.Word = p.followWord("case", cc.Case)
p.followRsrv(cc.Case, "case x", "in")
cc.List = p.patLists()
cc.Esac = p.stmtEnd(cc, "case", "esac")
return cc
}
func (p *parser) patLists() (pls []*ast.PatternList) {
for p.tok != token.EOF && !(p.tok == token.LITWORD && p.val == "esac") {
pl := &ast.PatternList{}
p.got(token.LPAREN)
for p.tok != token.EOF {
if w := p.word(); w.Parts == nil {
p.curErr("case patterns must consist of words")
} else {
pl.Patterns = append(pl.Patterns, w)
}
if p.tok == token.RPAREN {
break
}
if !p.got(token.OR) {
p.curErr("case patterns must be separated with |")
}
}
old := p.quote
p.quote = switchCase
p.next()
pl.Stmts = p.stmts("esac")
p.quote = old
pl.OpPos = p.pos
if p.tok != token.DSEMICOLON && p.tok != token.SEMIFALL && p.tok != token.DSEMIFALL {
pl.Op = token.DSEMICOLON
pls = append(pls, pl)
break
}
pl.Op = p.tok
p.next()
pls = append(pls, pl)
p.got(token.STOPPED)
}
return
}
func (p *parser) testClause() *ast.TestClause {
tc := &ast.TestClause{Left: p.pos}
p.next()
if p.tok == token.EOF || p.gotRsrv("]]") {
p.posErr(tc.Left, `test clause requires at least one expression`)
}
tc.X = p.testExpr(token.DLBRCK, tc.Left)
tc.Right = p.pos
if !p.gotRsrv("]]") {
p.matchingErr(tc.Left, token.DLBRCK, token.DRBRCK)
}
return tc
}
func (p *parser) testExpr(ftok token.Token, fpos token.Pos) ast.ArithmExpr {
if p.tok == token.EOF || (p.tok == token.LITWORD && p.val == "]]") {
return nil
}
if p.tok == token.LITWORD {
if op := testUnaryOp(p.val); op != token.ILLEGAL {
p.tok = op
}
}
if p.tok == token.NOT {
u := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
u.X = p.testExpr(u.Op, u.OpPos)
return u
}
var left ast.ArithmExpr
switch p.tok {
case token.TEXISTS, token.TREGFILE, token.TDIRECT, token.TCHARSP,
token.TBLCKSP, token.TNMPIPE, token.TSOCKET, token.TSMBLINK,
token.TSGIDSET, token.TSUIDSET, token.TREAD, token.TWRITE,
token.TEXEC, token.TNOEMPTY, token.TFDTERM, token.TEMPSTR,
token.TNEMPSTR, token.TOPTSET, token.TVARSET, token.TNRFVAR:
u := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
w := p.followWordTok(ftok, fpos)
u.X = &w
left = u
case token.LPAREN:
pe := &ast.ParenExpr{Lparen: p.pos}
p.next()
if pe.X = p.testExpr(token.LPAREN, pe.Lparen); pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matched(pe.Lparen, token.LPAREN, token.RPAREN)
left = pe
case token.RPAREN:
return nil
default:
w := p.followWordTok(ftok, fpos)
left = &w
}
if p.tok == token.EOF || (p.tok == token.LITWORD && p.val == "]]") {
return left
}
switch p.tok {
case token.LAND, token.LOR, token.LSS, token.GTR:
case token.LITWORD:
if p.tok = testBinaryOp(p.val); p.tok == token.ILLEGAL {
p.curErr("not a valid test operator: %s", p.val)
}
case token.RPAREN:
return left
default:
p.curErr("not a valid test operator: %v", p.tok)
}
b := &ast.BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
if p.tok == token.TREMATCH {
old := p.quote
p.quote = testRegexp
p.next()
p.quote = old
} else {
p.next()
}
if b.Y = p.testExpr(b.Op, b.OpPos); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func testUnaryOp(val string) token.Token {
switch val {
case "!":
return token.NOT
case "-e", "-a":
return token.TEXISTS
case "-f":
return token.TREGFILE
case "-d":
return token.TDIRECT
case "-c":
return token.TCHARSP
case "-b":
return token.TBLCKSP
case "-p":
return token.TNMPIPE
case "-S":
return token.TSOCKET
case "-L", "-h":
return token.TSMBLINK
case "-g":
return token.TSGIDSET
case "-u":
return token.TSUIDSET
case "-r":
return token.TREAD
case "-w":
return token.TWRITE
case "-x":
return token.TEXEC
case "-s":
return token.TNOEMPTY
case "-t":
return token.TFDTERM
case "-z":
return token.TEMPSTR
case "-n":
return token.TNEMPSTR
case "-o":
return token.TOPTSET
case "-v":
return token.TVARSET
case "-R":
return token.TNRFVAR
default:
return token.ILLEGAL
}
}
func testBinaryOp(val string) token.Token {
switch val {
case "=":
return token.ASSIGN
case "==":
return token.EQL
case "=~":
return token.TREMATCH
case "!=":
return token.NEQ
case "-nt":
return token.TNEWER
case "-ot":
return token.TOLDER
case "-ef":
return token.TDEVIND
case "-eq":
return token.TEQL
case "-ne":
return token.TNEQ
case "-le":
return token.TLEQ
case "-ge":
return token.TGEQ
case "-lt":
return token.TLSS
case "-gt":
return token.TGTR
default:
return token.ILLEGAL
}
}
func (p *parser) declClause() *ast.DeclClause {
ds := &ast.DeclClause{Declare: p.pos, Local: p.val == "local"}
p.next()
for p.tok == token.LITWORD && p.val[0] == '-' {
ds.Opts = append(ds.Opts, p.word())
}
for !p.newLine && !stopToken(p.tok) && !p.peekRedir() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if w := p.word(); w.Parts == nil {
p.followErr(p.pos, "declare", "words")
} else {
ds.Assigns = append(ds.Assigns, &ast.Assign{Value: w})
}
}
return ds
}
func (p *parser) evalClause() *ast.EvalClause {
ec := &ast.EvalClause{Eval: p.pos}
p.next()
ec.Stmt, _ = p.getStmt(false)
return ec
}
func (p *parser) letClause() *ast.LetClause {
lc := &ast.LetClause{Let: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
p.stopNewline = true
for !p.newLine && !stopToken(p.tok) && p.tok != token.STOPPED && !p.peekRedir() {
x := p.arithmExpr(token.LET, lc.Let, 0, true)
if x == nil {
p.followErr(p.pos, "let", "arithmetic expressions")
}
lc.Exprs = append(lc.Exprs, x)
}
if len(lc.Exprs) == 0 {
p.posErr(lc.Let, "let clause requires at least one expression")
}
p.stopNewline = false
p.quote = old
p.got(token.STOPPED)
return lc
}
func (p *parser) bashFuncDecl() *ast.FuncDecl {
fpos := p.pos
p.next()
if p.tok != token.LITWORD {
if w := p.followWord("function", fpos); p.err == nil {
rawName := string(p.src[w.Pos()-1 : w.End()-1])
p.posErr(w.Pos(), "invalid func name: %q", rawName)
}
}
name := ast.Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(token.LPAREN) {
p.follow(name.ValuePos, "foo(", token.RPAREN)
}
return p.funcDecl(name, fpos)
}
func (p *parser) callExpr(s *ast.Stmt, w ast.Word) *ast.CallExpr {
alloc := &struct {
ce ast.CallExpr
ws [4]ast.Word
}{}
ce := &alloc.ce
ce.Args = alloc.ws[:1]
ce.Args[0] = w
for !p.newLine {
switch p.tok {
case token.EOF, token.SEMICOLON, token.AND, token.OR,
token.LAND, token.LOR, token.PIPEALL,
token.DSEMICOLON, token.SEMIFALL, token.DSEMIFALL:
return ce
case token.STOPPED:
p.next()
case token.LITWORD:
if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
continue
}
ce.Args = append(ce.Args, ast.Word{
Parts: p.wps(p.lit(p.pos, p.val)),
})
p.next()
case token.BQUOTE:
if p.quote == subCmdBckquo {
return ce
}
fallthrough
case token.LIT, token.DOLLBR, token.DOLLDP, token.DOLLPR,
token.DOLLAR, token.CMDIN, token.CMDOUT, token.SQUOTE,
token.DOLLSQ, token.DQUOTE, token.DOLLDQ, token.DOLLBK:
ce.Args = append(ce.Args, ast.Word{Parts: p.wordParts()})
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC,
token.WHEREDOC, token.RDRALL, token.APPALL:
p.doRedirect(s)
case token.RPAREN:
if p.quote == subCmd {
return ce
}
fallthrough
default:
p.curErr("a command can only contain words and redirects")
}
}
return ce
}
func (p *parser) funcDecl(name ast.Lit, pos token.Pos) *ast.FuncDecl {
fd := &ast.FuncDecl{
Position: pos,
BashStyle: pos != name.ValuePos,
Name: name,
}
if fd.Body, _ = p.getStmt(false); fd.Body == nil {
p.followErr(fd.Pos(), "foo()", "a statement")
}
return fd
}
parser: avoid first multi-part word alloc
Here we don't know how many parts a word will have. But we can still
avoid the first alloc, which is a net win either way.
name old time/op new time/op delta
Parse/Space+Comment-4 804ns ± 1% 816ns ± 1% +1.47% (p=0.002 n=6+6)
Parse/LongLit-4 919ns ± 1% 908ns ± 1% -1.20% (p=0.004 n=6+6)
Parse/Cmds-4 6.20µs ± 1% 6.09µs ± 1% -1.65% (p=0.004 n=5+6)
Parse/Quoted-4 1.15µs ± 1% 1.07µs ± 2% -6.77% (p=0.002 n=6+6)
Parse/NestedStmts-4 5.09µs ± 1% 4.88µs ± 1% -4.12% (p=0.002 n=6+6)
Parse/Assign+Clauses-4 4.70µs ± 1% 4.64µs ± 1% -1.20% (p=0.002 n=6+6)
Parse/Binary-4 3.16µs ± 1% 3.15µs ± 1% ~ (p=0.255 n=6+6)
Parse/Redirect-4 3.22µs ± 1% 3.12µs ± 1% -3.09% (p=0.004 n=5+6)
name old allocs/op new allocs/op delta
Parse/Space+Comment-4 5.00 ± 0% 5.00 ± 0% ~ (all samples are equal)
Parse/LongLit-4 5.00 ± 0% 5.00 ± 0% ~ (all samples are equal)
Parse/Cmds-4 68.0 ± 0% 68.0 ± 0% ~ (all samples are equal)
Parse/Quoted-4 10.0 ± 0% 8.0 ± 0% -20.00% (p=0.002 n=6+6)
Parse/NestedStmts-4 55.0 ± 0% 51.0 ± 0% -7.27% (p=0.002 n=6+6)
Parse/Assign+Clauses-4 51.0 ± 0% 51.0 ± 0% ~ (all samples are equal)
Parse/Binary-4 34.0 ± 0% 34.0 ± 0% ~ (all samples are equal)
Parse/Redirect-4 27.0 ± 0% 26.0 ± 0% -3.70% (p=0.002 n=6+6)
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package parser
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"sync"
"github.com/mvdan/sh/ast"
"github.com/mvdan/sh/token"
)
// Mode controls the parser behaviour via a set of flags.
type Mode uint
const (
ParseComments Mode = 1 << iota // add comments to the AST
PosixConformant // match the POSIX standard where it differs from bash
)
var parserFree = sync.Pool{
New: func() interface{} {
return &parser{helperBuf: new(bytes.Buffer)}
},
}
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
func Parse(src []byte, name string, mode Mode) (*ast.File, error) {
p := parserFree.Get().(*parser)
p.reset()
alloc := &struct {
f ast.File
l [16]int
}{}
p.f = &alloc.f
p.f.Name = name
p.f.Lines = alloc.l[:1]
p.src, p.mode = src, mode
p.next()
p.f.Stmts = p.stmts()
parserFree.Put(p)
return p.f, p.err
}
type parser struct {
src []byte
f *ast.File
mode Mode
spaced, newLine bool
stopNewline, forbidNested bool
err error
tok token.Token
val string
pos token.Pos
npos int
quote quoteState
// list of pending heredoc bodies
heredocs []*ast.Redirect
hdocStop []byte
helperBuf *bytes.Buffer
litBatch []ast.Lit
wpsBatch []ast.WordPart
stmtBatch []ast.Stmt
}
func (p *parser) lit(pos token.Pos, val string) *ast.Lit {
if len(p.litBatch) == 0 {
p.litBatch = make([]ast.Lit, 32)
}
l := &p.litBatch[0]
l.ValuePos = pos
l.Value = val
p.litBatch = p.litBatch[1:]
return l
}
func (p *parser) wps(wp ast.WordPart) []ast.WordPart {
if len(p.wpsBatch) == 0 {
p.wpsBatch = make([]ast.WordPart, 32)
}
wps := p.wpsBatch[:1:1]
p.wpsBatch = p.wpsBatch[1:]
wps[0] = wp
return wps
}
func (p *parser) stmt(pos token.Pos) *ast.Stmt {
if len(p.stmtBatch) == 0 {
p.stmtBatch = make([]ast.Stmt, 16)
}
s := &p.stmtBatch[0]
s.Position = pos
p.stmtBatch = p.stmtBatch[1:]
return s
}
type quoteState int
const (
noState quoteState = 1 << iota
subCmd
subCmdBckquo
sglQuotes
dblQuotes
hdocBody
hdocBodyTabs
arithmExpr
arithmExprCmd
arithmExprBrack
testRegexp
switchCase
paramExpName
paramExpInd
paramExpRepl
paramExpExp
allRegTokens = noState | subCmd | subCmdBckquo | switchCase
allArithmExpr = arithmExpr | arithmExprCmd | arithmExprBrack
allRbrack = arithmExprBrack | paramExpInd
allHdoc = hdocBody | hdocBodyTabs
)
func (p *parser) bash() bool { return p.mode&PosixConformant == 0 }
func (p *parser) reset() {
p.spaced, p.newLine = false, false
p.stopNewline, p.forbidNested = false, false
p.err = nil
p.npos = 0
p.tok, p.quote = token.ILLEGAL, noState
p.heredocs = p.heredocs[:]
}
func (p *parser) unquotedWordBytes(w ast.Word) ([]byte, bool) {
p.helperBuf.Reset()
didUnquote := false
for _, wp := range w.Parts {
if p.unquotedWordPart(p.helperBuf, wp) {
didUnquote = true
}
}
return p.helperBuf.Bytes(), didUnquote
}
func (p *parser) unquotedWordPart(b *bytes.Buffer, wp ast.WordPart) bool {
switch x := wp.(type) {
case *ast.Lit:
if x.Value[0] == '\\' {
b.WriteString(x.Value[1:])
return true
}
b.WriteString(x.Value)
return false
case *ast.SglQuoted:
b.WriteString(x.Value)
return true
case *ast.Quoted:
for _, wp2 := range x.Parts {
p.unquotedWordPart(b, wp2)
}
return true
default:
// catch-all for unusual cases such as ParamExp
b.Write(p.src[wp.Pos()-1 : wp.End()-1])
return false
}
}
func (p *parser) doHeredocs() {
p.tok = token.ILLEGAL
old := p.quote
hdocs := p.heredocs
p.heredocs = p.heredocs[:0]
for i, r := range hdocs {
if r.Op == token.DHEREDOC {
p.quote = hdocBodyTabs
} else {
p.quote = hdocBody
}
var quoted bool
p.hdocStop, quoted = p.unquotedWordBytes(r.Word)
if i > 0 && p.npos < len(p.src) && p.src[p.npos] == '\n' {
p.npos++
p.f.Lines = append(p.f.Lines, p.npos)
}
if !quoted {
p.next()
r.Hdoc = ast.Word{Parts: p.wordParts()}
continue
}
r.Hdoc = p.hdocLitWord()
}
p.quote = old
}
func (p *parser) got(tok token.Token) bool {
if p.tok == tok {
p.next()
return true
}
return false
}
func (p *parser) gotRsrv(val string) bool {
if p.tok == token.LITWORD && p.val == val {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok token.Token) bool {
if !p.newLine && p.tok == tok {
p.next()
return true
}
return false
}
func readableStr(s string) string {
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(pos token.Pos, left, right string) {
leftStr := readableStr(left)
p.posErr(pos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) follow(lpos token.Pos, left string, tok token.Token) token.Pos {
pos := p.pos
if !p.got(tok) {
p.followErr(lpos, left, tok.String())
}
return pos
}
func (p *parser) followRsrv(lpos token.Pos, left, val string) token.Pos {
pos := p.pos
if !p.gotRsrv(val) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, val))
}
return pos
}
func (p *parser) followStmts(left string, lpos token.Pos, stops ...string) []*ast.Stmt {
if p.gotSameLine(token.SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWordTok(tok token.Token, pos token.Pos) ast.Word {
w := p.word()
if w.Parts == nil {
p.followErr(pos, tok.String(), "a word")
}
return w
}
func (p *parser) followWord(s string, pos token.Pos) ast.Word {
w := p.word()
if w.Parts == nil {
p.followErr(pos, s, "a word")
}
return w
}
func (p *parser) stmtEnd(n ast.Node, start, end string) token.Pos {
pos := p.pos
if !p.gotRsrv(end) {
p.posErr(n.Pos(), `%s statement must end with %q`, start, end)
}
return pos
}
func (p *parser) quoteErr(lpos token.Pos, quote token.Token) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, quote)
}
func (p *parser) matchingErr(lpos token.Pos, left, right token.Token) {
p.posErr(lpos, `reached %s without matching %s with %s`, p.tok, left, right)
}
func (p *parser) matched(lpos token.Pos, left, right token.Token) token.Pos {
pos := p.pos
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return pos
}
func (p *parser) errPass(err error) {
if p.err == nil {
if p.quote == arithmExpr {
if err == io.EOF {
p.tok = token.EOF
} else {
p.err = err
}
return
}
if err != io.EOF {
p.err = err
}
p.tok = token.EOF
}
}
// ParseError represents an error found when parsing a source file.
type ParseError struct {
token.Position
Filename, Text string
}
func (e *ParseError) Error() string {
prefix := ""
if e.Filename != "" {
prefix = e.Filename + ":"
}
return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text)
}
func (p *parser) posErr(pos token.Pos, format string, a ...interface{}) {
p.errPass(&ParseError{
Position: p.f.Position(pos),
Filename: p.f.Name,
Text: fmt.Sprintf(format, a...),
})
}
func (p *parser) curErr(format string, a ...interface{}) {
p.posErr(p.pos, format, a...)
}
func (p *parser) stmts(stops ...string) (sts []*ast.Stmt) {
p.got(token.STOPPED)
if p.forbidNested {
p.curErr("nested statements not allowed in this word")
}
q := p.quote
gotEnd := true
for p.tok != token.EOF {
switch p.tok {
case token.LITWORD:
for _, stop := range stops {
if p.val == stop {
return
}
}
case token.RPAREN:
if q == subCmd {
return
}
case token.BQUOTE:
if q == subCmdBckquo {
return
}
case token.DSEMICOLON, token.SEMIFALL, token.DSEMIFALL:
if q == switchCase {
return
}
p.curErr("%s can only be used in a case clause", p.tok)
}
if !p.newLine && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
if p.tok == token.EOF {
break
}
if s, end := p.getStmt(true); s == nil {
p.invalidStmtStart()
} else {
sts = append(sts, s)
gotEnd = end
}
p.got(token.STOPPED)
}
return
}
func (p *parser) invalidStmtStart() {
switch p.tok {
case token.SEMICOLON, token.AND, token.OR, token.LAND, token.LOR:
p.curErr("%s can only immediately follow a statement", p.tok)
case token.RPAREN:
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) word() ast.Word {
if p.tok == token.LITWORD {
w := ast.Word{Parts: p.wps(p.lit(p.pos, p.val))}
p.next()
return w
}
return ast.Word{Parts: p.wordParts()}
}
func (p *parser) gotLit(l *ast.Lit) bool {
l.ValuePos = p.pos
if p.tok == token.LIT || p.tok == token.LITWORD {
l.Value = p.val
p.next()
return true
}
return false
}
func (p *parser) wordParts() (wps []ast.WordPart) {
for {
lastLit := p.tok == token.LIT
n := p.wordPart()
if n == nil {
return
}
if wps == nil {
wps = p.wps(n)
} else {
wps = append(wps, n)
}
if p.spaced {
return
}
if p.quote&allHdoc != 0 && p.hdocStop == nil {
// TODO: is this is a hack around a bug?
if p.tok == token.LIT && !lastLit {
wps = append(wps, p.lit(p.pos, p.val))
}
return
}
}
}
func (p *parser) wordPart() ast.WordPart {
switch p.tok {
case token.LIT, token.LITWORD:
l := p.lit(p.pos, p.val)
p.next()
return l
case token.DOLLBR:
return p.paramExp()
case token.DOLLDP, token.DOLLBK:
left := p.tok
ar := &ast.ArithmExp{Token: p.tok, Left: p.pos}
oldQuote := p.quote
if ar.Token == token.DOLLBK {
// treat deprecated $[ as $((
ar.Token = token.DOLLDP
p.quote = arithmExprBrack
} else {
p.quote = arithmExpr
}
if p.err != nil {
return nil
}
p.next()
ar.X = p.arithmExpr(ar.Token, ar.Left, 0, false)
hasEnd := p.peekArithmEnd(p.tok)
oldTok := p.tok
oldErr := p.err
if p.quote == arithmExpr && !hasEnd {
// TODO: this will probably break if there is
// extra lingering state, such as pending
// heredocs
p.quote = oldQuote
p.err = nil
p.tok, p.pos = token.DOLLPR, ar.Left
p.npos = int(ar.Left) + 1
wp := p.wordPart()
if p.err != nil {
if oldErr != nil {
// if retrying fails, report the
// arithmetic expr error as that's got
// higher precedence
p.err = oldErr
} else if !hasEnd {
// if retrying fails and the
// arithmetic expression wasn't
// closed, report that properly
p.err = nil
p.tok = oldTok
goto arithmClose
}
}
return wp
}
if oldErr != nil {
// not retrying, so recover error
p.err = oldErr
p.tok = token.EOF
}
arithmClose:
if left == token.DOLLBK {
if p.tok != token.RBRACK {
p.matchingErr(ar.Left, left, token.RBRACK)
}
p.quote = oldQuote
ar.Right = p.pos
p.next()
} else {
ar.Right = p.arithmEnd(left, ar.Left, oldQuote)
}
return ar
case token.DOLLPR:
cs := &ast.CmdSubst{Left: p.pos}
old := p.quote
p.quote = subCmd
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.matched(cs.Left, token.LPAREN, token.RPAREN)
return cs
case token.DOLLAR:
var b byte
if p.npos >= len(p.src) {
p.errPass(io.EOF)
} else {
b = p.src[p.npos]
}
if p.tok == token.EOF || wordBreak(b) || b == '"' || b == '\'' || b == '`' {
l := p.lit(p.pos, "$")
p.next()
return l
}
pe := &ast.ParamExp{Dollar: p.pos, Short: true}
if b == '#' || b == '$' || b == '?' {
p.npos++
p.pos++
p.tok, p.val = token.LIT, string(b)
} else {
old := p.quote
if p.quote&allHdoc != 0 {
p.quote = noState
}
p.next()
p.quote = old
}
p.gotLit(&pe.Param)
return pe
case token.CMDIN, token.CMDOUT:
ps := &ast.ProcSubst{Op: p.tok, OpPos: p.pos}
old := p.quote
p.quote = subCmd
p.next()
ps.Stmts = p.stmts()
p.quote = old
ps.Rparen = p.matched(ps.OpPos, ps.Op, token.RPAREN)
return ps
case token.SQUOTE:
if p.quote == sglQuotes {
return nil
}
sq := &ast.SglQuoted{Quote: p.pos}
bs, found := p.readUntil('\'')
rem := bs
for {
i := bytes.IndexByte(rem, '\n')
if i < 0 {
p.npos += len(rem)
break
}
p.npos += i + 1
p.f.Lines = append(p.f.Lines, p.npos)
rem = rem[i+1:]
}
p.npos++
if !found {
p.posErr(sq.Pos(), `reached EOF without closing quote %s`, token.SQUOTE)
}
sq.Value = string(bs)
p.next()
return sq
case token.DQUOTE:
if p.quote == dblQuotes {
return nil
}
fallthrough
case token.DOLLSQ, token.DOLLDQ:
q := &ast.Quoted{Quote: p.tok, QuotePos: p.pos}
stop := q.Quote
old := p.quote
switch q.Quote {
case token.DOLLSQ:
stop = token.SQUOTE
p.quote = sglQuotes
case token.DOLLDQ:
stop = token.DQUOTE
p.quote = dblQuotes
case token.SQUOTE:
p.quote = sglQuotes
case token.DQUOTE:
p.quote = dblQuotes
}
p.next()
q.Parts = p.wordParts()
p.quote = old
if !p.got(stop) {
p.quoteErr(q.Pos(), stop)
}
return q
case token.BQUOTE:
if p.quote == subCmdBckquo {
return nil
}
cs := &ast.CmdSubst{Backquotes: true, Left: p.pos}
old := p.quote
p.quote = subCmdBckquo
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.pos
if !p.got(token.BQUOTE) {
p.quoteErr(cs.Pos(), token.BQUOTE)
}
return cs
}
return nil
}
func arithmOpLevel(tok token.Token) int {
switch tok {
case token.COMMA:
return 0
case token.ADDASSGN, token.SUBASSGN, token.MULASSGN, token.QUOASSGN,
token.REMASSGN, token.ANDASSGN, token.ORASSGN, token.XORASSGN,
token.SHLASSGN, token.SHRASSGN:
return 1
case token.ASSIGN:
return 2
case token.QUEST, token.COLON:
return 3
case token.LOR:
return 4
case token.LAND:
return 5
case token.AND, token.OR, token.XOR:
return 5
case token.EQL, token.NEQ:
return 6
case token.LSS, token.GTR, token.LEQ, token.GEQ:
return 7
case token.SHL, token.SHR:
return 8
case token.ADD, token.SUB:
return 9
case token.MUL, token.QUO, token.REM:
return 10
case token.POW:
return 11
}
return -1
}
func (p *parser) arithmExpr(ftok token.Token, fpos token.Pos, level int, compact bool) ast.ArithmExpr {
if p.tok == token.EOF || p.peekArithmEnd(p.tok) {
return nil
}
var left ast.ArithmExpr
if level > 11 {
left = p.arithmExprBase(ftok, fpos, compact)
} else {
left = p.arithmExpr(ftok, fpos, level+1, compact)
}
if compact && p.spaced {
return left
}
newLevel := arithmOpLevel(p.tok)
if p.quote == arithmExpr && p.tok == token.SEMICOLON {
p.curErr("not a valid arithmetic operator: %v", p.tok)
newLevel = 0
} else if p.tok == token.LIT || p.tok == token.LITWORD {
p.curErr("not a valid arithmetic operator: %s", p.val)
newLevel = 0
}
if newLevel < 0 || newLevel < level {
return left
}
b := &ast.BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
if p.next(); compact && p.spaced {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
if b.Y = p.arithmExpr(b.Op, b.OpPos, newLevel, compact); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func (p *parser) arithmExprBase(ftok token.Token, fpos token.Pos, compact bool) ast.ArithmExpr {
if p.tok == token.INC || p.tok == token.DEC || p.tok == token.NOT {
pre := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
pre.X = p.arithmExprBase(pre.Op, pre.OpPos, compact)
return pre
}
var x ast.ArithmExpr
switch p.tok {
case token.LPAREN:
pe := &ast.ParenExpr{Lparen: p.pos}
p.next()
if pe.X = p.arithmExpr(token.LPAREN, pe.Lparen, 0, false); pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matched(pe.Lparen, token.LPAREN, token.RPAREN)
x = pe
case token.ADD, token.SUB:
ue := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
if p.next(); compact && p.spaced {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
if ue.X = p.arithmExpr(ue.Op, ue.OpPos, 0, compact); ue.X == nil {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
x = ue
default:
w := p.followWordTok(ftok, fpos)
x = &w
}
if compact && p.spaced {
return x
}
if p.tok == token.INC || p.tok == token.DEC {
u := &ast.UnaryExpr{
Post: true,
OpPos: p.pos,
Op: p.tok,
X: x,
}
p.next()
return u
}
return x
}
func (p *parser) gotParamLit(l *ast.Lit) bool {
l.ValuePos = p.pos
switch p.tok {
case token.LIT, token.LITWORD:
l.Value = p.val
case token.DOLLAR:
l.Value = "$"
case token.QUEST:
l.Value = "?"
default:
return false
}
p.next()
return true
}
func (p *parser) paramExp() *ast.ParamExp {
pe := &ast.ParamExp{Dollar: p.pos}
old := p.quote
p.quote = paramExpName
p.next()
pe.Length = p.got(token.HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.tok == token.RBRACE {
p.quote = old
p.next()
return pe
}
if p.tok == token.LBRACK {
lpos := p.pos
p.quote = paramExpInd
p.next()
pe.Ind = &ast.Index{Word: p.word()}
p.quote = paramExpName
p.matched(lpos, token.LBRACK, token.RBRACK)
}
if p.tok == token.RBRACE {
p.quote = old
p.next()
return pe
}
if pe.Length {
p.curErr(`can only get length of a simple parameter`)
}
if p.tok == token.QUO || p.tok == token.DQUO {
pe.Repl = &ast.Replace{All: p.tok == token.DQUO}
p.quote = paramExpRepl
p.next()
pe.Repl.Orig = p.word()
if p.tok == token.QUO {
p.quote = paramExpExp
p.next()
pe.Repl.With = p.word()
}
} else {
pe.Exp = &ast.Expansion{Op: p.tok}
p.quote = paramExpExp
p.next()
pe.Exp.Word = p.word()
}
p.quote = old
p.matched(pe.Dollar, token.DOLLBR, token.RBRACE)
return pe
}
func (p *parser) peekArithmEnd(tok token.Token) bool {
return tok == token.RPAREN && p.npos < len(p.src) && p.src[p.npos] == ')'
}
func (p *parser) arithmEnd(ltok token.Token, lpos token.Pos, old quoteState) token.Pos {
if p.peekArithmEnd(p.tok) {
p.npos++
} else {
p.matchingErr(lpos, ltok, token.DRPAREN)
}
p.quote = old
pos := p.pos
p.next()
return pos
}
func stopToken(tok token.Token) bool {
return tok == token.EOF || tok == token.SEMICOLON || tok == token.AND ||
tok == token.OR || tok == token.LAND || tok == token.LOR ||
tok == token.PIPEALL || tok == token.DSEMICOLON ||
tok == token.SEMIFALL || tok == token.DSEMIFALL
}
func validIdent(s string) bool {
for i, c := range s {
switch {
case 'a' <= c && c <= 'z':
case 'A' <= c && c <= 'Z':
case c == '_':
case i > 0 && '0' <= c && c <= '9':
default:
return false
}
}
return true
}
func (p *parser) getAssign() (*ast.Assign, bool) {
i := strings.Index(p.val, "=")
if i <= 0 {
return nil, false
}
if p.val[i-1] == '+' {
i--
}
if !validIdent(p.val[:i]) {
return nil, false
}
as := &ast.Assign{}
as.Name = p.lit(p.pos, p.val[:i])
if p.val[i] == '+' {
as.Append = true
i++
}
start := p.lit(p.pos+1, p.val[i+1:])
if start.Value != "" {
start.ValuePos += token.Pos(i)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.tok == token.LPAREN {
ae := &ast.ArrayExpr{Lparen: p.pos}
p.next()
for p.tok != token.EOF && p.tok != token.RPAREN {
if w := p.word(); w.Parts == nil {
p.curErr("array elements must be words")
} else {
ae.List = append(ae.List, w)
}
}
ae.Rparen = p.matched(ae.Lparen, token.LPAREN, token.RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.newLine && !stopToken(p.tok) {
if w := p.word(); start.Value == "" {
as.Value = w
} else {
as.Value.Parts = append(as.Value.Parts, w.Parts...)
}
}
return as, true
}
func (p *parser) peekRedir() bool {
switch p.tok {
case token.LITWORD:
return p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<')
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC, token.WHEREDOC,
token.RDRALL, token.APPALL:
return true
}
return false
}
func (p *parser) doRedirect(s *ast.Stmt) {
r := &ast.Redirect{}
var l ast.Lit
if p.gotLit(&l) {
r.N = &l
}
r.Op, r.OpPos = p.tok, p.pos
p.next()
switch r.Op {
case token.SHL, token.DHEREDOC:
p.stopNewline = true
p.forbidNested = true
if p.newLine {
p.curErr("heredoc stop word must be on the same line")
}
r.Word = p.followWordTok(r.Op, r.OpPos)
p.forbidNested = false
p.heredocs = append(p.heredocs, r)
p.got(token.STOPPED)
default:
if p.newLine {
p.curErr("redirect word must be on the same line")
}
r.Word = p.followWordTok(r.Op, r.OpPos)
}
s.Redirs = append(s.Redirs, r)
}
func (p *parser) getStmt(readEnd bool) (s *ast.Stmt, gotEnd bool) {
s = p.stmt(p.pos)
if p.gotRsrv("!") {
s.Negated = true
}
preLoop:
for {
switch p.tok {
case token.LIT, token.LITWORD:
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
} else {
break preLoop
}
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC,
token.WHEREDOC, token.RDRALL, token.APPALL:
p.doRedirect(s)
default:
break preLoop
}
switch {
case p.newLine, p.tok == token.EOF:
return
case p.tok == token.SEMICOLON:
if readEnd {
p.next()
gotEnd = true
}
return
}
}
if s = p.gotStmtPipe(s); s == nil {
return
}
switch p.tok {
case token.LAND, token.LOR:
b := &ast.BinaryCmd{OpPos: p.pos, Op: p.tok, X: s}
p.next()
p.got(token.STOPPED)
if b.Y, _ = p.getStmt(false); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
s = p.stmt(s.Position)
s.Cmd = b
if readEnd && p.gotSameLine(token.SEMICOLON) {
gotEnd = true
}
case token.AND:
p.next()
s.Background = true
gotEnd = true
case token.SEMICOLON:
if !p.newLine && readEnd {
p.next()
gotEnd = true
}
}
return
}
func (p *parser) gotStmtPipe(s *ast.Stmt) *ast.Stmt {
switch p.tok {
case token.LPAREN:
s.Cmd = p.subshell()
case token.DLPAREN:
s.Cmd = p.arithmExpCmd()
case token.LITWORD:
switch {
case p.val == "}":
p.curErr("%s can only be used to close a block", p.val)
case p.val == "{":
s.Cmd = p.block()
case p.val == "if":
s.Cmd = p.ifClause()
case p.val == "while":
s.Cmd = p.whileClause()
case p.val == "until":
s.Cmd = p.untilClause()
case p.val == "for":
s.Cmd = p.forClause()
case p.val == "case":
s.Cmd = p.caseClause()
case p.bash() && p.val == "[[":
s.Cmd = p.testClause()
case p.bash() && (p.val == "declare" || p.val == "local"):
s.Cmd = p.declClause()
case p.bash() && p.val == "eval":
s.Cmd = p.evalClause()
case p.bash() && p.val == "let":
s.Cmd = p.letClause()
case p.bash() && p.val == "function":
s.Cmd = p.bashFuncDecl()
default:
name := ast.Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(token.LPAREN) {
p.follow(name.ValuePos, "foo(", token.RPAREN)
s.Cmd = p.funcDecl(name, name.ValuePos)
} else {
s.Cmd = p.callExpr(s, ast.Word{
Parts: p.wps(&name),
})
}
}
case token.LIT, token.DOLLBR, token.DOLLDP, token.DOLLPR, token.DOLLAR,
token.CMDIN, token.CMDOUT, token.SQUOTE, token.DOLLSQ,
token.DQUOTE, token.DOLLDQ, token.BQUOTE, token.DOLLBK:
w := ast.Word{Parts: p.wordParts()}
if p.gotSameLine(token.LPAREN) && p.err == nil {
rawName := string(p.src[w.Pos()-1 : w.End()-1])
p.posErr(w.Pos(), "invalid func name: %q", rawName)
}
s.Cmd = p.callExpr(s, w)
}
for !p.newLine && p.peekRedir() {
p.doRedirect(s)
}
if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 {
return nil
}
if p.tok == token.OR || p.tok == token.PIPEALL {
b := &ast.BinaryCmd{OpPos: p.pos, Op: p.tok, X: s}
p.next()
p.got(token.STOPPED)
if b.Y = p.gotStmtPipe(p.stmt(p.pos)); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
s = p.stmt(s.Position)
s.Cmd = b
}
return s
}
func (p *parser) subshell() *ast.Subshell {
s := &ast.Subshell{Lparen: p.pos}
old := p.quote
p.quote = subCmd
p.next()
s.Stmts = p.stmts()
p.quote = old
s.Rparen = p.matched(s.Lparen, token.LPAREN, token.RPAREN)
return s
}
func (p *parser) arithmExpCmd() *ast.ArithmExp {
ar := &ast.ArithmExp{Token: p.tok, Left: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
ar.X = p.arithmExpr(ar.Token, ar.Left, 0, false)
ar.Right = p.arithmEnd(ar.Token, ar.Left, old)
return ar
}
func (p *parser) block() *ast.Block {
b := &ast.Block{Lbrace: p.pos}
p.next()
b.Stmts = p.stmts("}")
b.Rbrace = p.pos
if !p.gotRsrv("}") {
p.matchingErr(b.Lbrace, token.LBRACE, token.RBRACE)
}
return b
}
func (p *parser) ifClause() *ast.IfClause {
ic := &ast.IfClause{If: p.pos}
p.next()
ic.CondStmts = p.followStmts("if", ic.If, "then")
ic.Then = p.followRsrv(ic.If, "if <cond>", "then")
ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else")
elifPos := p.pos
for p.gotRsrv("elif") {
elf := &ast.Elif{Elif: elifPos}
elf.CondStmts = p.followStmts("elif", elf.Elif, "then")
elf.Then = p.followRsrv(elf.Elif, "elif <cond>", "then")
elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else")
ic.Elifs = append(ic.Elifs, elf)
elifPos = p.pos
}
if elsePos := p.pos; p.gotRsrv("else") {
ic.Else = elsePos
ic.ElseStmts = p.followStmts("else", ic.Else, "fi")
}
ic.Fi = p.stmtEnd(ic, "if", "fi")
return ic
}
func (p *parser) whileClause() *ast.WhileClause {
wc := &ast.WhileClause{While: p.pos}
p.next()
wc.CondStmts = p.followStmts("while", wc.While, "do")
wc.Do = p.followRsrv(wc.While, "while <cond>", "do")
wc.DoStmts = p.followStmts("do", wc.Do, "done")
wc.Done = p.stmtEnd(wc, "while", "done")
return wc
}
func (p *parser) untilClause() *ast.UntilClause {
uc := &ast.UntilClause{Until: p.pos}
p.next()
uc.CondStmts = p.followStmts("until", uc.Until, "do")
uc.Do = p.followRsrv(uc.Until, "until <cond>", "do")
uc.DoStmts = p.followStmts("do", uc.Do, "done")
uc.Done = p.stmtEnd(uc, "until", "done")
return uc
}
func (p *parser) forClause() *ast.ForClause {
fc := &ast.ForClause{For: p.pos}
p.next()
fc.Loop = p.loop(fc.For)
fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do")
fc.DoStmts = p.followStmts("do", fc.Do, "done")
fc.Done = p.stmtEnd(fc, "for", "done")
return fc
}
func (p *parser) loop(forPos token.Pos) ast.Loop {
if p.tok == token.DLPAREN {
cl := &ast.CStyleLoop{Lparen: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
cl.Init = p.arithmExpr(token.DLPAREN, cl.Lparen, 0, false)
scPos := p.pos
p.follow(p.pos, "expression", token.SEMICOLON)
cl.Cond = p.arithmExpr(token.SEMICOLON, scPos, 0, false)
scPos = p.pos
p.follow(p.pos, "expression", token.SEMICOLON)
cl.Post = p.arithmExpr(token.SEMICOLON, scPos, 0, false)
cl.Rparen = p.arithmEnd(token.DLPAREN, cl.Lparen, old)
p.gotSameLine(token.SEMICOLON)
return cl
}
wi := &ast.WordIter{}
if !p.gotLit(&wi.Name) {
p.followErr(forPos, "for", "a literal")
}
if p.gotRsrv("in") {
for !p.newLine && p.tok != token.EOF && p.tok != token.SEMICOLON {
if w := p.word(); w.Parts == nil {
p.curErr("word list can only contain words")
} else {
wi.List = append(wi.List, w)
}
}
p.gotSameLine(token.SEMICOLON)
} else if !p.newLine && !p.got(token.SEMICOLON) {
p.followErr(forPos, "for foo", `"in", ; or a newline`)
}
return wi
}
func (p *parser) caseClause() *ast.CaseClause {
cc := &ast.CaseClause{Case: p.pos}
p.next()
cc.Word = p.followWord("case", cc.Case)
p.followRsrv(cc.Case, "case x", "in")
cc.List = p.patLists()
cc.Esac = p.stmtEnd(cc, "case", "esac")
return cc
}
func (p *parser) patLists() (pls []*ast.PatternList) {
for p.tok != token.EOF && !(p.tok == token.LITWORD && p.val == "esac") {
pl := &ast.PatternList{}
p.got(token.LPAREN)
for p.tok != token.EOF {
if w := p.word(); w.Parts == nil {
p.curErr("case patterns must consist of words")
} else {
pl.Patterns = append(pl.Patterns, w)
}
if p.tok == token.RPAREN {
break
}
if !p.got(token.OR) {
p.curErr("case patterns must be separated with |")
}
}
old := p.quote
p.quote = switchCase
p.next()
pl.Stmts = p.stmts("esac")
p.quote = old
pl.OpPos = p.pos
if p.tok != token.DSEMICOLON && p.tok != token.SEMIFALL && p.tok != token.DSEMIFALL {
pl.Op = token.DSEMICOLON
pls = append(pls, pl)
break
}
pl.Op = p.tok
p.next()
pls = append(pls, pl)
p.got(token.STOPPED)
}
return
}
func (p *parser) testClause() *ast.TestClause {
tc := &ast.TestClause{Left: p.pos}
p.next()
if p.tok == token.EOF || p.gotRsrv("]]") {
p.posErr(tc.Left, `test clause requires at least one expression`)
}
tc.X = p.testExpr(token.DLBRCK, tc.Left)
tc.Right = p.pos
if !p.gotRsrv("]]") {
p.matchingErr(tc.Left, token.DLBRCK, token.DRBRCK)
}
return tc
}
func (p *parser) testExpr(ftok token.Token, fpos token.Pos) ast.ArithmExpr {
if p.tok == token.EOF || (p.tok == token.LITWORD && p.val == "]]") {
return nil
}
if p.tok == token.LITWORD {
if op := testUnaryOp(p.val); op != token.ILLEGAL {
p.tok = op
}
}
if p.tok == token.NOT {
u := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
u.X = p.testExpr(u.Op, u.OpPos)
return u
}
var left ast.ArithmExpr
switch p.tok {
case token.TEXISTS, token.TREGFILE, token.TDIRECT, token.TCHARSP,
token.TBLCKSP, token.TNMPIPE, token.TSOCKET, token.TSMBLINK,
token.TSGIDSET, token.TSUIDSET, token.TREAD, token.TWRITE,
token.TEXEC, token.TNOEMPTY, token.TFDTERM, token.TEMPSTR,
token.TNEMPSTR, token.TOPTSET, token.TVARSET, token.TNRFVAR:
u := &ast.UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
w := p.followWordTok(ftok, fpos)
u.X = &w
left = u
case token.LPAREN:
pe := &ast.ParenExpr{Lparen: p.pos}
p.next()
if pe.X = p.testExpr(token.LPAREN, pe.Lparen); pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matched(pe.Lparen, token.LPAREN, token.RPAREN)
left = pe
case token.RPAREN:
return nil
default:
w := p.followWordTok(ftok, fpos)
left = &w
}
if p.tok == token.EOF || (p.tok == token.LITWORD && p.val == "]]") {
return left
}
switch p.tok {
case token.LAND, token.LOR, token.LSS, token.GTR:
case token.LITWORD:
if p.tok = testBinaryOp(p.val); p.tok == token.ILLEGAL {
p.curErr("not a valid test operator: %s", p.val)
}
case token.RPAREN:
return left
default:
p.curErr("not a valid test operator: %v", p.tok)
}
b := &ast.BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
if p.tok == token.TREMATCH {
old := p.quote
p.quote = testRegexp
p.next()
p.quote = old
} else {
p.next()
}
if b.Y = p.testExpr(b.Op, b.OpPos); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func testUnaryOp(val string) token.Token {
switch val {
case "!":
return token.NOT
case "-e", "-a":
return token.TEXISTS
case "-f":
return token.TREGFILE
case "-d":
return token.TDIRECT
case "-c":
return token.TCHARSP
case "-b":
return token.TBLCKSP
case "-p":
return token.TNMPIPE
case "-S":
return token.TSOCKET
case "-L", "-h":
return token.TSMBLINK
case "-g":
return token.TSGIDSET
case "-u":
return token.TSUIDSET
case "-r":
return token.TREAD
case "-w":
return token.TWRITE
case "-x":
return token.TEXEC
case "-s":
return token.TNOEMPTY
case "-t":
return token.TFDTERM
case "-z":
return token.TEMPSTR
case "-n":
return token.TNEMPSTR
case "-o":
return token.TOPTSET
case "-v":
return token.TVARSET
case "-R":
return token.TNRFVAR
default:
return token.ILLEGAL
}
}
func testBinaryOp(val string) token.Token {
switch val {
case "=":
return token.ASSIGN
case "==":
return token.EQL
case "=~":
return token.TREMATCH
case "!=":
return token.NEQ
case "-nt":
return token.TNEWER
case "-ot":
return token.TOLDER
case "-ef":
return token.TDEVIND
case "-eq":
return token.TEQL
case "-ne":
return token.TNEQ
case "-le":
return token.TLEQ
case "-ge":
return token.TGEQ
case "-lt":
return token.TLSS
case "-gt":
return token.TGTR
default:
return token.ILLEGAL
}
}
func (p *parser) declClause() *ast.DeclClause {
ds := &ast.DeclClause{Declare: p.pos, Local: p.val == "local"}
p.next()
for p.tok == token.LITWORD && p.val[0] == '-' {
ds.Opts = append(ds.Opts, p.word())
}
for !p.newLine && !stopToken(p.tok) && !p.peekRedir() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if w := p.word(); w.Parts == nil {
p.followErr(p.pos, "declare", "words")
} else {
ds.Assigns = append(ds.Assigns, &ast.Assign{Value: w})
}
}
return ds
}
func (p *parser) evalClause() *ast.EvalClause {
ec := &ast.EvalClause{Eval: p.pos}
p.next()
ec.Stmt, _ = p.getStmt(false)
return ec
}
func (p *parser) letClause() *ast.LetClause {
lc := &ast.LetClause{Let: p.pos}
old := p.quote
p.quote = arithmExprCmd
p.next()
p.stopNewline = true
for !p.newLine && !stopToken(p.tok) && p.tok != token.STOPPED && !p.peekRedir() {
x := p.arithmExpr(token.LET, lc.Let, 0, true)
if x == nil {
p.followErr(p.pos, "let", "arithmetic expressions")
}
lc.Exprs = append(lc.Exprs, x)
}
if len(lc.Exprs) == 0 {
p.posErr(lc.Let, "let clause requires at least one expression")
}
p.stopNewline = false
p.quote = old
p.got(token.STOPPED)
return lc
}
func (p *parser) bashFuncDecl() *ast.FuncDecl {
fpos := p.pos
p.next()
if p.tok != token.LITWORD {
if w := p.followWord("function", fpos); p.err == nil {
rawName := string(p.src[w.Pos()-1 : w.End()-1])
p.posErr(w.Pos(), "invalid func name: %q", rawName)
}
}
name := ast.Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(token.LPAREN) {
p.follow(name.ValuePos, "foo(", token.RPAREN)
}
return p.funcDecl(name, fpos)
}
func (p *parser) callExpr(s *ast.Stmt, w ast.Word) *ast.CallExpr {
alloc := &struct {
ce ast.CallExpr
ws [4]ast.Word
}{}
ce := &alloc.ce
ce.Args = alloc.ws[:1]
ce.Args[0] = w
for !p.newLine {
switch p.tok {
case token.EOF, token.SEMICOLON, token.AND, token.OR,
token.LAND, token.LOR, token.PIPEALL,
token.DSEMICOLON, token.SEMIFALL, token.DSEMIFALL:
return ce
case token.STOPPED:
p.next()
case token.LITWORD:
if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
continue
}
ce.Args = append(ce.Args, ast.Word{
Parts: p.wps(p.lit(p.pos, p.val)),
})
p.next()
case token.BQUOTE:
if p.quote == subCmdBckquo {
return ce
}
fallthrough
case token.LIT, token.DOLLBR, token.DOLLDP, token.DOLLPR,
token.DOLLAR, token.CMDIN, token.CMDOUT, token.SQUOTE,
token.DOLLSQ, token.DQUOTE, token.DOLLDQ, token.DOLLBK:
ce.Args = append(ce.Args, ast.Word{Parts: p.wordParts()})
case token.GTR, token.SHR, token.LSS, token.DPLIN, token.DPLOUT,
token.RDRINOUT, token.SHL, token.DHEREDOC,
token.WHEREDOC, token.RDRALL, token.APPALL:
p.doRedirect(s)
case token.RPAREN:
if p.quote == subCmd {
return ce
}
fallthrough
default:
p.curErr("a command can only contain words and redirects")
}
}
return ce
}
func (p *parser) funcDecl(name ast.Lit, pos token.Pos) *ast.FuncDecl {
fd := &ast.FuncDecl{
Position: pos,
BashStyle: pos != name.ValuePos,
Name: name,
}
if fd.Body, _ = p.getStmt(false); fd.Body == nil {
p.followErr(fd.Pos(), "foo()", "a statement")
}
return fd
}
|
// Package Mode implements the Mode Client MQTT API
// The interface is through the MqttClient struct, which supports the MQTT
// subset that is required for our devices and configuration is through the
// MqttDelegate.
package mode
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"sync"
"time"
packet "github.com/moderepo/device-sdk-go/v3/mqtt_packet"
)
const (
mqttConnectTimeout = time.Second * 10
connResponseDeadline = time.Second * 10
)
var mqttDialer = &net.Dialer{Timeout: mqttConnectTimeout}
// QoS level of message delivery. This is used in sending events to MODE.
type QOSLevel int
const (
// QoS 0 - message delivery is not guaranteed.
QOSAtMostOnce QOSLevel = iota
// QoS 1 - message is delivered at least once, but duplicates may happen.
QOSAtLeastOnce
// QoS 2 - message is always delivered exactly once. This is currently not
// supported.
QOSExactlyOnce
)
type packetSendType int
const (
// Packets are written out to the stream immediately (or after the
// the previous packet has finished sending)
directPacketSendType packetSendType = iota
// Packets are queued and written when processed
queuedPacketSendType
unhandledPacketSendType
)
type NetworkStatus int
const (
// There is currently an active connection to the server
ConnectedNetworkStatus NetworkStatus = iota
// We have successfully disconnected to the server
DisconnectedNetworkStatus
// If we have had requests time out, we set to timing out. We should
// reconnect.
TimingOutNetworkStatus
// Not yet connected state
DefaultNetworkStatus
)
type (
// MqttSubData to send in the channel to the client when we receive data
// published for our subscription.
MqttSubData struct {
Topic string
Data []byte
ReceiveTime time.Time
}
// MqttResponse is result of an MQTT Request. Not all of these members will
// be valid. For example, only PUBACK's will have PacketIds, and PUBLISH'es
// will send subscription data. In some cases, there will be multiple
// errors, so the Errs slice will be populated rather than the Errs.
MqttResponse struct {
PacketID uint16
Err error
Errs []error
}
// MqttAuthDelegate methods provide the security and authentication
// information to start a connection to the MqttServer
MqttAuthDelegate interface {
// Returns the tls usage and configuration. If useTLS is false, a nil
// tlsConfig should be returned.
TLSUsageAndConfiguration() (useTLS bool, tlsConfig *tls.Config)
// Returns authentication information
AuthInfo() (username string, password string)
}
// MqttReceiverDelegate methods allow the MqttClient to communicate
// information and events back to the user.
MqttReceiverDelegate interface {
// SetReceiveChannels will be called by the MqttClient. The MqttClient
// will create the channels with the buffer size returned by
// GetReceieveQueueSize(). The implementor of the delegate will use
// these channels to receive information from the server, such as
// queued responses and subscriptions:
// subRecvCh: Data published from our subscriptions.
// queueAckCh: API requests that are queued will receive MqttPublishID's
// which will be ACK'ed. The MqttQueueResult will have the
// MqttPublishID and the result
// pingAckCh: True if our ping received an ACK or false if timeout
// Note: These channels will be closed when the connection is closed (from
// a Disconnect), so the user should stop listening to these channels when
// OnClose() is called..
SetReceiveChannels(subRecvCh <-chan MqttSubData,
queueAckCh <-chan MqttResponse,
pingAckCh <-chan MqttResponse)
// Hook so we can clean up on closing of connections
OnClose()
}
// MqttConfigDelegate methods allow the MqttClient to configure itself
// according to the requirements of the user
MqttConfigDelegate interface {
// Buffer size of the incoming queues to the delegate. This is the
// size of the three receive channels
GetReceiveQueueSize() uint16
// Buffer size of the outgoing queue to the server. This cannot be
// changed after a connection is created
GetSendQueueSize() uint16
}
// MqttErrorDelegate is an optional delegate which allows the MqttClient
// a method of signaling errors that are not able to be communicated
// through the normal channels. See handling errors in the documentation.
MqttErrorDelegate interface {
// The buffer size of the error channel
GetErrorChannelSize() uint16
// Provides the delegate the channel to receive errors
SetErrorChannel(errCh chan error)
}
// MqttDelegate is the combined required interfaces that must be implemented
// to use the MqttClient. This is a convenience that the user can use to
// allow a single struct to implement all the required interfaces
MqttDelegate interface {
MqttAuthDelegate
MqttReceiverDelegate
MqttConfigDelegate
}
// MqttClient provides the public API to MQTT. We handle
// connect, disconnect, ping, publish, and subscribe.
// Connect, disconnect, and subscribe will block and wait for the
// response. Ping and publish will return after the packet has been
// sent and the response will be sent on a channel that is provided
// by the delegate. For ping, since MQTT does not provide a mechanism
// to distinguish between different ping requests, we do not provide
// an API to distinguish them either. For publish, the function returns
// a packet ID. This packet ID will be returned to the caller in the
// channel.
MqttClient struct {
mqttHost string
mqttPort int
authDelegate MqttAuthDelegate
recvDelegate MqttReceiverDelegate
confDelegate MqttConfigDelegate
errorDelegate MqttErrorDelegate
conn *mqttConn
wgSend sync.WaitGroup
stopWriterCh chan struct{}
wgRecv sync.WaitGroup
lastErrors []error
delegateSubRecvCh chan MqttSubData
connMtx sync.Mutex
errorMtx sync.Mutex
}
// Delegate for the mqqtConn to call back to the MqttClient
mqttReceiver interface {
// Called by the connection when receiving publishes
handlePubReceive(pkt *packet.PublishPacket, receiveTime time.Time)
// Called by the connection when there is an error
appendError(error)
}
packetSendData struct {
pkt packet.Packet
resultCh chan<- MqttResponse
}
// Internal structure used by the client to communicate with the mqttd
// server. This is a thin wrapper over the mqtt_packet package.
mqttConn struct {
// delegate to handle receiving asynchronous events from the server.
// This is more explicit than a channel, but it has the drawback
// of just being a functional call. So, the implementation should
// consist of routing (which it is, since it is just a callback
// into the MqttClient). If circumstances change, change, we can
// revisit this decision.
Receiver mqttReceiver
conn net.Conn
stream *packet.Stream
// Sequential packet ID. Used to match to acks our actions (pub)
// excluding connects and pings. We also don't have packet ID's for
// receiving pubs from our subscriptions because we didn't initiate them.
lastPacketID uint16
status NetworkStatus
// Updated on every send and receive so we know if we can avoid
// sending pings
lastActivity time.Time
// Channel to write to the server stream
directSendPktCh chan packetSendData
queuedSendPktCh chan packetSendData
// Channels to respond to clients.
connRespCh chan MqttResponse
subRespCh chan MqttResponse
unsubRespCh chan MqttResponse
pingRespCh chan MqttResponse
queueAckCh chan MqttResponse
// This is optional and may be nil
errCh chan error
mutex sync.Mutex
statusMutex sync.RWMutex
lastActivityMutex sync.RWMutex
}
)
func WithMqttAuthDelegate(authDelegate MqttAuthDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.authDelegate = authDelegate
}
}
func WithMqttReceiverDelegate(recvDelegate MqttReceiverDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.recvDelegate = recvDelegate
}
}
func WithMqttConfigDelegate(confDelegate MqttConfigDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.confDelegate = confDelegate
}
}
func WithMqttErrorDelegate(errorDelegate MqttErrorDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.errorDelegate = errorDelegate
}
}
func WithMqttDelegate(delegate MqttDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.authDelegate = delegate
c.recvDelegate = delegate
c.confDelegate = delegate
}
}
// NewMqttClient will create client and open a stream. A client is invalid if
// not connected, and you need to create a new client to reconnect.
func NewMqttClient(mqttHost string, mqttPort int,
dels ...func(*MqttClient)) *MqttClient {
client := &MqttClient{
mqttHost: mqttHost,
mqttPort: mqttPort,
lastErrors: make([]error, 0, 5),
}
for _, del := range dels {
del(client)
}
if client.authDelegate != nil && client.recvDelegate != nil &&
client.confDelegate != nil {
return client
}
return nil
}
// IsConnected will return true if we have a successfully CONNACK'ed response.
//
func (client *MqttClient) IsConnected() bool {
return client.getConn() != nil && client.getStatus() == ConnectedNetworkStatus
}
func (client *MqttClient) setConn(c *mqttConn) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn = c
}
func (client *MqttClient) getConn() *mqttConn {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn
}
// GetLastActivity will return the time since the last send or
// receive.
func (client *MqttClient) GetLastActivity() time.Time {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.GetLastActivity()
}
func (client *MqttClient) sendPacket(ctx context.Context,
p packet.Packet) (chan MqttResponse, error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.sendPacket(ctx, p)
}
func (client *MqttClient) setStatus(status NetworkStatus) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn.setStatus(status)
}
func (client *MqttClient) getStatus() NetworkStatus {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.getStatus()
}
func (client *MqttClient) getPacketID() uint16 {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.getPacketID()
}
func (client *MqttClient) queuePacket(ctx context.Context,
p packet.Packet) (uint16, error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.queuePacket(ctx, p)
}
func (client *MqttClient) sendQueueingError(err error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn.sendQueueingError(err)
}
// Connect will initiate a connection to the server. It will block until we
// receive a CONNACK from the server.
func (client *MqttClient) Connect(ctx context.Context) error {
if client.getConn() != nil {
return errors.New("Cannot connect when already connected")
}
if err := client.createMqttConnection(); err != nil {
return err
}
user, pwd := client.authDelegate.AuthInfo()
p := packet.NewConnectPacket()
p.Version = packet.Version311
p.Username = user
p.Password = pwd
p.CleanSession = true
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Err != nil {
client.setStatus(DisconnectedNetworkStatus)
return resp.Err
}
// If we made it here, we consider ourselves connected
client.setStatus(ConnectedNetworkStatus)
return nil
}
// Disconnect will end this connection with the server. We will block until
// the server closes the connection.
// Note: We might want to wait, but order is important here.
func (client *MqttClient) Disconnect(ctx context.Context) error {
if client.getConn() == nil {
// nothing to disconnect. Return
return nil
}
defer func() {
client.shutdownConnection()
}()
// Maybe we want to add a Connecting/Disconnecting status?
client.setStatus(DefaultNetworkStatus)
p := packet.NewDisconnectPacket()
_, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
return nil
}
// Subscribe will subscribe to the topics in subs by sending a subscribe
// request. This is a synchronous call so it will block until
// a response is received from the server. It will return a slice of errors
// which will be in the same order as the subscriptions in Subscriptions().
func (client *MqttClient) Subscribe(ctx context.Context,
subs []string) []error {
p := packet.NewSubscribePacket()
p.Subscriptions = make([]packet.Subscription, 0, 10)
p.PacketID = client.getPacketID()
for _, sub := range subs {
// We have no protection to keep you from subscribing to the same
// topic multiple times. Maybe we should? Maybe the server would send
// us 2 messages then for each topic?
logInfo("[MQTT] subscribing to topic %s", sub)
p.Subscriptions = append(p.Subscriptions, packet.Subscription{
Topic: sub,
// MODE only supports QoS0 for subscriptions
QOS: packet.QOSAtMostOnce,
})
}
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return []error{err}
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Errs != nil {
return resp.Errs
}
return nil
}
// Unsubscribe will send an unsubscribe request for the topics in subs.
// This is a synchronous call.
func (client *MqttClient) Unsubscribe(ctx context.Context,
subs []string) []error {
p := packet.NewUnsubscribePacket()
p.Topics = subs
p.PacketID = client.getPacketID()
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return []error{err}
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Errs != nil {
return resp.Errs
}
return nil
}
// Ping sends an MQTT PINGREQ event to the server. This is an asynchronous
// call, so we will always return success if we were able to queue the message
// for delivery. Results will be sent on the delegate's pingAckCh
func (client *MqttClient) Ping(ctx context.Context) error {
p := packet.NewPingreqPacket()
_, err := client.queuePacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
return nil
}
// PingAndWait sends an MQTT PINGREQ event to the server and waits for the
// response. If this method is used instead of the asynchronous Ping, user
// should not be listening on the pingAckCh channel since this function may
// timeout waiting for the response an error will be returned.
func (client *MqttClient) PingAndWait(ctx context.Context) error {
p := packet.NewPingreqPacket()
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
return resp.Err
}
// Publish sends an MQTT Publish event to subscribers on the specified
// topic. This is an asynchronous call, so we will always return a packet
// ID as long as the request is able to be queued. After queueing, the
// any subsequent errors or results will be written to the delegate's
// queueAckCh.
// For QOSAtMostOnce, there will only be an error returned if the request was
// unable to be queued. We receive no ACK from the server.
// For QOSAtLeastOnce, we will receive an ACK if we were successful.
// For any other QOS levels (QOSExactlyOnce), they are not supported and an
// error is returned immediately and the request will not be sent.
func (client *MqttClient) Publish(ctx context.Context, qos QOSLevel,
topic string, data []byte) (uint16, error) {
return client.publishWithID(ctx, qos, topic, data, 0)
}
func (client *MqttClient) Republish(ctx context.Context, qos QOSLevel,
topic string, data []byte, packetID uint16) (uint16, error) {
return client.publishWithID(ctx, qos, topic, data, packetID)
}
func (client *MqttClient) publishWithID(ctx context.Context, qos QOSLevel,
topic string, data []byte, packetID uint16) (uint16, error) {
var pktQos byte
switch qos {
case QOSAtMostOnce:
pktQos = packet.QOSAtMostOnce
case QOSAtLeastOnce:
pktQos = packet.QOSAtLeastOnce
default:
return 0, errors.New("unsupported qos level")
}
p := packet.NewPublishPacket()
p.PacketID = packetID
p.Message = packet.Message{
Topic: topic,
QOS: pktQos,
Payload: data,
}
return client.queuePacket(ctx, p)
}
// Each connect, we need to create a new mqttConnection.
func (client *MqttClient) createMqttConnection() error {
receiveQueueSize := client.confDelegate.GetReceiveQueueSize()
subRecvCh := make(chan MqttSubData, receiveQueueSize)
queueAckCh := make(chan MqttResponse, receiveQueueSize)
pingAckCh := make(chan MqttResponse, receiveQueueSize)
client.recvDelegate.SetReceiveChannels(subRecvCh, queueAckCh, pingAckCh)
client.delegateSubRecvCh = subRecvCh
useTLS, tlsConfig := client.authDelegate.TLSUsageAndConfiguration()
sendQueueSize := client.confDelegate.GetSendQueueSize()
conn := newMqttConn(tlsConfig, client.mqttHost, client.mqttPort, useTLS,
queueAckCh, pingAckCh, sendQueueSize)
if conn == nil {
return errors.New("Unable to create a socket to the server")
}
if client.errorDelegate != nil {
errCh := make(chan error, client.errorDelegate.GetErrorChannelSize())
client.errorDelegate.SetErrorChannel(errCh)
conn.errCh = errCh
}
client.setConn(conn)
conn.Receiver = client
// We want to pass our WaitGroup's to the connection reader and writer, so
// we don't put these in the mqttConn's constructor.
client.stopWriterCh = make(chan struct{})
client.wgSend.Add(1)
go conn.runPacketWriter(client.stopWriterCh, &client.wgSend)
client.wgRecv.Add(1)
go conn.runPacketReader(&client.wgRecv)
return nil
}
// Shutting down gracefully is tricky. But, we try our best to drain the channels
// and avoid any panics.
func (client *MqttClient) shutdownConnection() {
client.connMtx.Lock()
defer client.connMtx.Unlock()
// We skip encapsulation of the connection class so the steps are clear
// 1. Send close to the packetWriter goroutine
client.stopWriterCh <- struct{}{}
// 2. Wait until the done channel has been read, since there are some queued
// writes that might be sent before the done channel has been read.
client.wgSend.Wait()
// 3. Close the channels writer channels
close(client.conn.directSendPktCh)
close(client.conn.queuedSendPktCh)
// 4. Close the connection with the server. This will break the
// packet reader out of its loop. Set to disconnected here so their
// reader knows that it was not an error
client.conn.setStatus(DisconnectedNetworkStatus)
client.conn.conn.Close()
// 5. Wait for the packet reader to finish
client.wgRecv.Wait()
// 6. Notify the client that we are disconnecting
client.recvDelegate.OnClose()
// 6. Close the channels for handling responses
close(client.conn.connRespCh)
close(client.conn.subRespCh)
// 7. Close the channel to the client readers.
close(client.conn.queueAckCh)
close(client.conn.pingRespCh)
close(client.delegateSubRecvCh)
if client.conn.errCh != nil {
close(client.conn.errCh)
}
client.conn = nil
}
// Helper function called by the synchronous API to handle processing
// of the responses. For the asynchronous API, the caller might do something
// similar, but also handling packet ID's.
func (client *MqttClient) receivePacket(ctx context.Context,
respChan chan MqttResponse) MqttResponse {
defer func() {
client.wgRecv.Done()
}()
// Block on the return channel or timeout
select {
case response := <-respChan:
return response
case <-ctx.Done():
err := ctx.Err()
//debug.PrintStack()
logError("[MQTT] timeout waiting for reply packet %s", err)
return MqttResponse{Err: err}
}
}
// Implementation of the delegate for mqttConn to handle publish from the
// server on topics that we've subscribed to. We only unpack it and send it
// to the caller
func (client *MqttClient) handlePubReceive(p *packet.PublishPacket,
receiveTime time.Time) {
logInfo("[MQTT] received message for topic %s", p.Message.Topic)
pubData := MqttSubData{
Topic: p.Message.Topic,
Data: p.Message.Payload,
ReceiveTime: receiveTime,
}
// If we have successfully queued the pub to the user
select {
case client.delegateSubRecvCh <- pubData:
default:
logError("Caller could not receive publish data. SubRecCh full?")
client.sendQueueingError(nil)
return
}
if p.Message.QOS != packet.QOSAtMostOnce {
ackPkt := packet.NewPubackPacket()
ackPkt.PacketID = p.PacketID
// For everything except QOS2: if we have queued the message to the
// user, we consider that successfully published to us, so we attempt
// to send an ACK back to the server.
ctx, cancel := context.WithTimeout(context.Background(),
connResponseDeadline)
defer cancel()
if _, err := client.queuePacket(ctx, ackPkt); err != nil {
logError("[MQTT] Queueing error on handlePubReceive %+v", err)
client.sendQueueingError(err)
}
}
}
// Returns the last errors, and then resets the errors. If there is no
// error delegate or the error delegate's error channel is full, we "queue"
// errors in a slice that can be fetched.
func (client *MqttClient) TakeRemainingErrors() []error {
client.errorMtx.Lock()
defer client.errorMtx.Unlock()
defer func() {
client.lastErrors = make([]error, 0, 5)
}()
return client.lastErrors
}
func (client *MqttClient) appendError(err error) {
client.errorMtx.Lock()
defer client.errorMtx.Unlock()
client.lastErrors = append(client.lastErrors, err)
}
func newMqttConn(tlsConfig *tls.Config, mqttHost string,
mqttPort int, useTLS bool, queueAckCh chan MqttResponse,
pingAckCh chan MqttResponse, outgoingQueueSize uint16) *mqttConn {
addr := fmt.Sprintf("%s:%d", mqttHost, mqttPort)
var conn net.Conn
var err error
if useTLS {
if conn, err = tls.DialWithDialer(mqttDialer, "tcp", addr,
tlsConfig); err != nil {
logError("MQTT TLS dialer failed: %s", err.Error())
return nil
}
} else {
if conn, err = mqttDialer.Dial("tcp", addr); err != nil {
logError("MQTT dialer failed: %s", err.Error())
return nil
}
}
return &mqttConn{
conn: conn,
stream: packet.NewStream(conn, conn),
status: DefaultNetworkStatus,
// The two channels that we write packets to send. The packetWriter
// will listen on these and write out to the stream.
// blocking, non-buffered
directSendPktCh: make(chan packetSendData),
// blocking, queue size specified by the delegate. If it is insufficient,
// we will lose packets.
queuedSendPktCh: make(chan packetSendData, outgoingQueueSize),
// These are synchronous requests, so we send the packet, then wait for the response. But,
// we create a buffer of 1, so if the response is received before we start listening
// to this channel, it won't be dropped
connRespCh: make(chan MqttResponse, 1),
subRespCh: make(chan MqttResponse, 1),
unsubRespCh: make(chan MqttResponse, 1),
// These are passed to us by the client, with a buffer sized specified
// by the delegate, so it is the delegate's responsibility to set the
// size appropriately or we will start losing responses.
pingRespCh: pingAckCh,
queueAckCh: queueAckCh,
}
}
func (conn *mqttConn) setStatus(status NetworkStatus) {
conn.statusMutex.Lock()
defer conn.statusMutex.Unlock()
conn.status = status
}
func (conn *mqttConn) getStatus() NetworkStatus {
conn.statusMutex.Lock()
defer conn.statusMutex.Unlock()
return conn.status
}
func (conn *mqttConn) setLastActivity(t time.Time) {
conn.lastActivityMutex.Lock()
defer conn.lastActivityMutex.Unlock()
conn.lastActivity = t
}
func (conn *mqttConn) GetLastActivity() time.Time {
conn.lastActivityMutex.RLock()
defer conn.lastActivityMutex.RUnlock()
return conn.lastActivity
}
// We only queue pings (PINGREQ) and publishes (PUBLISH). Theoretically, we
// could queue subscribes (SUBSCRIBE) since they have packet ID's like
// publishes. But, for simplicity, those are synchronous, since, in practice,
// those are either on startup, or, at least, on rare occasions.
func (conn *mqttConn) queuePacket(ctx context.Context,
p packet.Packet) (uint16, error) {
if conn == nil || conn.getStatus() == DisconnectedNetworkStatus ||
conn.getStatus() == TimingOutNetworkStatus {
return 0, errors.New("Connection unstable. Unable to send")
}
packetID := uint16(0)
if p.Type() == packet.PUBLISH {
pubPkt := p.(*packet.PublishPacket)
if pubPkt.PacketID != 0 {
packetID = pubPkt.PacketID
} else {
packetID = conn.getPacketID()
pubPkt.PacketID = uint16(packetID)
}
}
pktSendData := packetSendData{pkt: p,
resultCh: conn.getResponseChannel(p.Type()),
}
select {
// Put writing to the channel in a select because the buffer might be full
case conn.queuedSendPktCh <- pktSendData:
// Successfully sent and we don't know when we'll get a response back,
// so just don't do anything.
case <-ctx.Done():
logError("Exceeded timeout sending %s for id %d", p.Type(), packetID)
return 0, fmt.Errorf("Send Queue full %s for id %d", p.Type(), packetID)
}
return packetID, nil
}
// Called by the client to send packets to the server
func (conn *mqttConn) sendPacket(ctx context.Context,
p packet.Packet) (chan MqttResponse, error) {
if conn == nil || conn.getStatus() == DisconnectedNetworkStatus ||
conn.getStatus() == TimingOutNetworkStatus {
return nil, errors.New("Connection unstable. Unable to send")
}
resultCh := make(chan MqttResponse)
defer close(resultCh)
select {
case conn.directSendPktCh <- packetSendData{
pkt: p,
resultCh: resultCh,
}:
case <-ctx.Done():
logError("Exceeded timeout sending %s", p.Type())
return nil, errors.New("Timeout Error")
}
// Wait for the result, and then give that result back to the caller, who
// will handle the error
result := <-resultCh
return conn.getResponseChannel(p.Type()), result.Err
}
func (conn *mqttConn) runPacketWriter(stopWriterCh chan struct{},
wg *sync.WaitGroup) {
defer func() {
logInfo("[MQTT] packet writer is exiting")
wg.Done()
}()
exitLoop := false
for !exitLoop {
// We read two channels for writing out packets. The directSend
// channel received synchronous, which has an unbuffered queue.
// The queuedSend can back up because the client already has an ID to
// check for the response.
// NOTE: Writes are sent on a bufio.Writer, so writes are almost
// guaranteed to succeed, even if they never reach the server. See the
// note in writePacket for more information.
select {
case pktSendData := <-conn.directSendPktCh:
resultCh := pktSendData.resultCh
resultCh <- MqttResponse{Err: conn.writePacket(pktSendData.pkt)}
case pktSendData := <-conn.queuedSendPktCh:
pktID := uint16(0)
if pktSendData.pkt.Type() == packet.PUBLISH {
pubPkt := pktSendData.pkt.(*packet.PublishPacket)
pktID = pubPkt.PacketID
}
// Get the packet ID, if any, for errors. This is a long lived channel, so
// it's possible to be full.
resultCh := pktSendData.resultCh
err := conn.writePacket(pktSendData.pkt)
if err != nil {
// If there was an error sending, we can notify the caller
// immediately.
logError("[MQTT] Error occurred on runPacketWriter %v", err)
select {
case resultCh <- MqttResponse{
PacketID: pktID,
Err: err,
}:
default:
logError("[MQTT] Queueing error on runPacketWriter %+v", err)
conn.sendQueueingError(err)
}
}
case <-stopWriterCh:
exitLoop = true
}
}
}
// Returns the channel to send the response, and the response data. If this
// gets complicated, we can handle each type separately
func (conn *mqttConn) createResponseForPacket(p packet.Packet) MqttResponse {
switch p.Type() {
case packet.PINGRESP:
// successful ping response is just nil errors
return MqttResponse{Err: nil}
case packet.PUBACK:
pubAck := p.(*packet.PubackPacket)
return MqttResponse{PacketID: pubAck.PacketID, Err: nil}
case packet.CONNACK:
connAck := p.(*packet.ConnackPacket)
var err error = nil
if connAck.ReturnCode != packet.ConnectionAccepted {
err = connAck.ReturnCode
}
return MqttResponse{Err: err}
case packet.UNSUBACK:
unsubAck := p.(*packet.UnsubackPacket)
// There's only a packet ID.
return MqttResponse{
PacketID: unsubAck.PacketID}
case packet.SUBACK:
subAck := p.(*packet.SubackPacket)
resp := MqttResponse{
// We have do asynchronous SUBSCRIPTIONS, so our packets won't
// have packet ID's. But, if we ever do them, this is one place that
// we won't have to change our code.
PacketID: subAck.PacketID,
}
for i, code := range subAck.ReturnCodes {
if code == packet.QOSFailure {
err := errors.New("subscription rejected")
if i == 0 {
// If someone just checks Err of the response, at least
// they'll know that there was a failure
resp.Errs = append(resp.Errs, err)
}
}
}
return resp
default:
logError("Unhandled packet type for response: ", p.Type())
return MqttResponse{}
}
}
func (conn *mqttConn) runPacketReader(wg *sync.WaitGroup) {
defer func() {
logInfo("[MQTT] packet reader is exiting")
wg.Done()
}()
for {
// Set a deadline for reads to prevent blocking forever. We'll handle
// this error and continue looping, if appropriate
if err := conn.conn.SetReadDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set read deadline: %s", err.Error())
break
}
p, err := conn.stream.Read()
if err != nil {
// Disconnect "responses" are EOF
if err == io.EOF || conn.getStatus() == DisconnectedNetworkStatus {
// Server disconnected. This happens for 2 reasons:
// 1. We initiated a disconnect
// 2. we don't ping, so the server assumed we're done
conn.setStatus(DisconnectedNetworkStatus)
logInfo("[MQTT] net.Conn disconnected: %s", err.Error())
} else {
// I/O Errors usually return this, so if it is, we can
// figure out what to do next
opError := err.(*net.OpError)
if opError != nil {
if os.IsTimeout(opError.Err) {
// No problem - read deadline just exceeded
continue
}
}
logError("[MQTT] failed to read packet: %s", err.Error())
}
// The signal to the caller that disconnect was complete is the
// exiting of this function (and wg.Done())
break
}
// Wait until here to set last activity, since disconnects and timeouts
// should be included as activity.
conn.setLastActivity(time.Now())
if p.Type() == packet.PUBLISH {
// Incoming publish, received from our subscription.
pubPkt := p.(*packet.PublishPacket)
conn.Receiver.handlePubReceive(pubPkt, time.Now())
} else {
// Everything besides publish and disconnect, so unpackage the
// packet data and send it to the appropriate channel
respData := conn.createResponseForPacket(p)
respCh := conn.getResponseChannel(p.Type())
select {
case respCh <- respData:
default:
logInfo("[MQTT] Queueing error as nil (p.Type: %s / respData: %+v)", p.Type(), respData)
conn.sendQueueingError(nil)
}
}
}
}
func (conn *mqttConn) writePacket(p packet.Packet) error {
// XXX - I've used a SetWriteDeadline() for this, even on Flush, but I've
// never gotten the write's to timeout. I think it's because the underlying
// stream is buffered. It still doesn't quite make sense, because Flush() on
// the buffered stream still forces a Write on the unbuffered Writer. My
// guess is that it's the nature of TCP. If there's no failure, even the
// lack of an ACK on write won't result in timing out. But, in any case, we
// will still have a response timeout on the round trip, which might be
// sufficient.
if err := conn.conn.SetWriteDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set write deadline: %s", err.Error())
return err
}
if err := conn.stream.Write(p); err != nil {
logError("[MQTT] failed to send %s packet: %s", p.Type(), err.Error())
return err
}
if err := conn.conn.SetWriteDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set write deadline: %s", err.Error())
return err
}
if err := conn.stream.Flush(); err != nil {
logError("[MQTT] failed to flush %s packet: %s", p.Type(), err.Error())
return err
}
// Do not call setLastActivity here.
// Write will succeed without error even if the packet is lost somewhere
// before reaching the server.
// The only way to know the actual network activity is to watch the packets
// from the server.
return nil
}
func (conn *mqttConn) getPacketID() uint16 {
conn.mutex.Lock()
defer conn.mutex.Unlock()
// If we were strictly incrementing, we could use atomic.AddUint32(), but
// we're also wrapping around, so we still need the mutex.
conn.lastPacketID++
if conn.lastPacketID == 0 {
conn.lastPacketID = 1
}
return conn.lastPacketID
}
// Sanity check to verify that we are queue'ing or non-queue'ing the correct
// types of packets. ACK's are not queued, of course, but since we can use this
// function to make route returns, we handle ACK's in this function too.
func (conn *mqttConn) verifyPacketType(pktType packet.Type) packetSendType {
switch pktType {
case packet.CONNECT, packet.CONNACK, packet.DISCONNECT, packet.SUBSCRIBE,
packet.SUBACK:
return directPacketSendType
case packet.PUBLISH, packet.PUBACK, packet.PINGREQ, packet.PINGRESP:
return queuedPacketSendType
default:
logError("[MQTT] Unhandled packet type: %s", pktType)
return unhandledPacketSendType
}
}
func (conn *mqttConn) getResponseChannel(pktType packet.Type) chan MqttResponse {
switch pktType {
case packet.CONNECT, packet.CONNACK:
return conn.connRespCh
case packet.SUBSCRIBE, packet.SUBACK:
return conn.subRespCh
case packet.UNSUBSCRIBE, packet.UNSUBACK:
// While using the same channel as subscribe probably wouldn't be a
// problem, it's just safer to use a separate channel for unsubs.
return conn.unsubRespCh
case packet.PUBLISH, packet.PUBACK:
return conn.queueAckCh
case packet.PINGREQ, packet.PINGRESP:
return conn.pingRespCh
case packet.DISCONNECT:
return nil
default:
logError("[MQTT] Unhandled packet type: %s", pktType)
return nil
}
}
func (conn *mqttConn) sendQueueingError(err error) {
if err == nil {
err = errors.New("Channel full. Sending on error channel")
}
if conn.errCh != nil {
select {
case conn.errCh <- err:
logInfo("Error Queued to delegate %d/%d", len(conn.errCh),
cap(conn.errCh))
default:
logInfo("Error delegate channel full. Check TakeRemainingErrors")
conn.Receiver.appendError(err)
}
} else {
logInfo("No Error delegate channel. Check TakeRemainingErrors")
conn.Receiver.appendError(err)
}
}
var (
infoLogger = log.New(os.Stdout, "[MODE - INFO] ", log.LstdFlags)
errorLogger = log.New(os.Stderr, "[MODE - ERROR] ", log.LstdFlags)
)
func logInfo(format string, values ...interface{}) {
infoLogger.Printf(format+"\n", values...)
}
func logError(format string, values ...interface{}) {
errorLogger.Printf(format+"\n", values...)
}
fix: fix isConnection with mutex
// Package Mode implements the Mode Client MQTT API
// The interface is through the MqttClient struct, which supports the MQTT
// subset that is required for our devices and configuration is through the
// MqttDelegate.
package mode
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"sync"
"time"
packet "github.com/moderepo/device-sdk-go/v3/mqtt_packet"
)
const (
mqttConnectTimeout = time.Second * 10
connResponseDeadline = time.Second * 10
)
var mqttDialer = &net.Dialer{Timeout: mqttConnectTimeout}
// QoS level of message delivery. This is used in sending events to MODE.
type QOSLevel int
const (
// QoS 0 - message delivery is not guaranteed.
QOSAtMostOnce QOSLevel = iota
// QoS 1 - message is delivered at least once, but duplicates may happen.
QOSAtLeastOnce
// QoS 2 - message is always delivered exactly once. This is currently not
// supported.
QOSExactlyOnce
)
type packetSendType int
const (
// Packets are written out to the stream immediately (or after the
// the previous packet has finished sending)
directPacketSendType packetSendType = iota
// Packets are queued and written when processed
queuedPacketSendType
unhandledPacketSendType
)
type NetworkStatus int
const (
// There is currently an active connection to the server
ConnectedNetworkStatus NetworkStatus = iota
// We have successfully disconnected to the server
DisconnectedNetworkStatus
// If we have had requests time out, we set to timing out. We should
// reconnect.
TimingOutNetworkStatus
// Not yet connected state
DefaultNetworkStatus
)
type (
// MqttSubData to send in the channel to the client when we receive data
// published for our subscription.
MqttSubData struct {
Topic string
Data []byte
ReceiveTime time.Time
}
// MqttResponse is result of an MQTT Request. Not all of these members will
// be valid. For example, only PUBACK's will have PacketIds, and PUBLISH'es
// will send subscription data. In some cases, there will be multiple
// errors, so the Errs slice will be populated rather than the Errs.
MqttResponse struct {
PacketID uint16
Err error
Errs []error
}
// MqttAuthDelegate methods provide the security and authentication
// information to start a connection to the MqttServer
MqttAuthDelegate interface {
// Returns the tls usage and configuration. If useTLS is false, a nil
// tlsConfig should be returned.
TLSUsageAndConfiguration() (useTLS bool, tlsConfig *tls.Config)
// Returns authentication information
AuthInfo() (username string, password string)
}
// MqttReceiverDelegate methods allow the MqttClient to communicate
// information and events back to the user.
MqttReceiverDelegate interface {
// SetReceiveChannels will be called by the MqttClient. The MqttClient
// will create the channels with the buffer size returned by
// GetReceieveQueueSize(). The implementor of the delegate will use
// these channels to receive information from the server, such as
// queued responses and subscriptions:
// subRecvCh: Data published from our subscriptions.
// queueAckCh: API requests that are queued will receive MqttPublishID's
// which will be ACK'ed. The MqttQueueResult will have the
// MqttPublishID and the result
// pingAckCh: True if our ping received an ACK or false if timeout
// Note: These channels will be closed when the connection is closed (from
// a Disconnect), so the user should stop listening to these channels when
// OnClose() is called..
SetReceiveChannels(subRecvCh <-chan MqttSubData,
queueAckCh <-chan MqttResponse,
pingAckCh <-chan MqttResponse)
// Hook so we can clean up on closing of connections
OnClose()
}
// MqttConfigDelegate methods allow the MqttClient to configure itself
// according to the requirements of the user
MqttConfigDelegate interface {
// Buffer size of the incoming queues to the delegate. This is the
// size of the three receive channels
GetReceiveQueueSize() uint16
// Buffer size of the outgoing queue to the server. This cannot be
// changed after a connection is created
GetSendQueueSize() uint16
}
// MqttErrorDelegate is an optional delegate which allows the MqttClient
// a method of signaling errors that are not able to be communicated
// through the normal channels. See handling errors in the documentation.
MqttErrorDelegate interface {
// The buffer size of the error channel
GetErrorChannelSize() uint16
// Provides the delegate the channel to receive errors
SetErrorChannel(errCh chan error)
}
// MqttDelegate is the combined required interfaces that must be implemented
// to use the MqttClient. This is a convenience that the user can use to
// allow a single struct to implement all the required interfaces
MqttDelegate interface {
MqttAuthDelegate
MqttReceiverDelegate
MqttConfigDelegate
}
// MqttClient provides the public API to MQTT. We handle
// connect, disconnect, ping, publish, and subscribe.
// Connect, disconnect, and subscribe will block and wait for the
// response. Ping and publish will return after the packet has been
// sent and the response will be sent on a channel that is provided
// by the delegate. For ping, since MQTT does not provide a mechanism
// to distinguish between different ping requests, we do not provide
// an API to distinguish them either. For publish, the function returns
// a packet ID. This packet ID will be returned to the caller in the
// channel.
MqttClient struct {
mqttHost string
mqttPort int
authDelegate MqttAuthDelegate
recvDelegate MqttReceiverDelegate
confDelegate MqttConfigDelegate
errorDelegate MqttErrorDelegate
conn *mqttConn
wgSend sync.WaitGroup
stopWriterCh chan struct{}
wgRecv sync.WaitGroup
lastErrors []error
delegateSubRecvCh chan MqttSubData
connMtx sync.Mutex
errorMtx sync.Mutex
}
// Delegate for the mqqtConn to call back to the MqttClient
mqttReceiver interface {
// Called by the connection when receiving publishes
handlePubReceive(pkt *packet.PublishPacket, receiveTime time.Time)
// Called by the connection when there is an error
appendError(error)
}
packetSendData struct {
pkt packet.Packet
resultCh chan<- MqttResponse
}
// Internal structure used by the client to communicate with the mqttd
// server. This is a thin wrapper over the mqtt_packet package.
mqttConn struct {
// delegate to handle receiving asynchronous events from the server.
// This is more explicit than a channel, but it has the drawback
// of just being a functional call. So, the implementation should
// consist of routing (which it is, since it is just a callback
// into the MqttClient). If circumstances change, change, we can
// revisit this decision.
Receiver mqttReceiver
conn net.Conn
stream *packet.Stream
// Sequential packet ID. Used to match to acks our actions (pub)
// excluding connects and pings. We also don't have packet ID's for
// receiving pubs from our subscriptions because we didn't initiate them.
lastPacketID uint16
status NetworkStatus
// Updated on every send and receive so we know if we can avoid
// sending pings
lastActivity time.Time
// Channel to write to the server stream
directSendPktCh chan packetSendData
queuedSendPktCh chan packetSendData
// Channels to respond to clients.
connRespCh chan MqttResponse
subRespCh chan MqttResponse
unsubRespCh chan MqttResponse
pingRespCh chan MqttResponse
queueAckCh chan MqttResponse
// This is optional and may be nil
errCh chan error
mutex sync.Mutex
statusMutex sync.RWMutex
lastActivityMutex sync.RWMutex
}
)
func WithMqttAuthDelegate(authDelegate MqttAuthDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.authDelegate = authDelegate
}
}
func WithMqttReceiverDelegate(recvDelegate MqttReceiverDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.recvDelegate = recvDelegate
}
}
func WithMqttConfigDelegate(confDelegate MqttConfigDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.confDelegate = confDelegate
}
}
func WithMqttErrorDelegate(errorDelegate MqttErrorDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.errorDelegate = errorDelegate
}
}
func WithMqttDelegate(delegate MqttDelegate) func(*MqttClient) {
return func(c *MqttClient) {
c.authDelegate = delegate
c.recvDelegate = delegate
c.confDelegate = delegate
}
}
// NewMqttClient will create client and open a stream. A client is invalid if
// not connected, and you need to create a new client to reconnect.
func NewMqttClient(mqttHost string, mqttPort int,
dels ...func(*MqttClient)) *MqttClient {
client := &MqttClient{
mqttHost: mqttHost,
mqttPort: mqttPort,
lastErrors: make([]error, 0, 5),
}
for _, del := range dels {
del(client)
}
if client.authDelegate != nil && client.recvDelegate != nil &&
client.confDelegate != nil {
return client
}
return nil
}
// IsConnected will return true if we have a successfully CONNACK'ed response.
//
func (client *MqttClient) IsConnected() bool {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn != nil && client.conn.getStatus() == ConnectedNetworkStatus
}
func (client *MqttClient) setConn(c *mqttConn) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn = c
}
func (client *MqttClient) getConn() *mqttConn {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn
}
// GetLastActivity will return the time since the last send or
// receive.
func (client *MqttClient) GetLastActivity() time.Time {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.GetLastActivity()
}
func (client *MqttClient) sendPacket(ctx context.Context,
p packet.Packet) (chan MqttResponse, error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.sendPacket(ctx, p)
}
func (client *MqttClient) setStatus(status NetworkStatus) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn.setStatus(status)
}
func (client *MqttClient) getStatus() NetworkStatus {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.getStatus()
}
func (client *MqttClient) getPacketID() uint16 {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.getPacketID()
}
func (client *MqttClient) queuePacket(ctx context.Context,
p packet.Packet) (uint16, error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
return client.conn.queuePacket(ctx, p)
}
func (client *MqttClient) sendQueueingError(err error) {
client.connMtx.Lock()
defer client.connMtx.Unlock()
client.conn.sendQueueingError(err)
}
// Connect will initiate a connection to the server. It will block until we
// receive a CONNACK from the server.
func (client *MqttClient) Connect(ctx context.Context) error {
if client.getConn() != nil {
return errors.New("Cannot connect when already connected")
}
if err := client.createMqttConnection(); err != nil {
return err
}
user, pwd := client.authDelegate.AuthInfo()
p := packet.NewConnectPacket()
p.Version = packet.Version311
p.Username = user
p.Password = pwd
p.CleanSession = true
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Err != nil {
client.setStatus(DisconnectedNetworkStatus)
return resp.Err
}
// If we made it here, we consider ourselves connected
client.setStatus(ConnectedNetworkStatus)
return nil
}
// Disconnect will end this connection with the server. We will block until
// the server closes the connection.
// Note: We might want to wait, but order is important here.
func (client *MqttClient) Disconnect(ctx context.Context) error {
if client.getConn() == nil {
// nothing to disconnect. Return
return nil
}
defer func() {
client.shutdownConnection()
}()
// Maybe we want to add a Connecting/Disconnecting status?
client.setStatus(DefaultNetworkStatus)
p := packet.NewDisconnectPacket()
_, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
return nil
}
// Subscribe will subscribe to the topics in subs by sending a subscribe
// request. This is a synchronous call so it will block until
// a response is received from the server. It will return a slice of errors
// which will be in the same order as the subscriptions in Subscriptions().
func (client *MqttClient) Subscribe(ctx context.Context,
subs []string) []error {
p := packet.NewSubscribePacket()
p.Subscriptions = make([]packet.Subscription, 0, 10)
p.PacketID = client.getPacketID()
for _, sub := range subs {
// We have no protection to keep you from subscribing to the same
// topic multiple times. Maybe we should? Maybe the server would send
// us 2 messages then for each topic?
logInfo("[MQTT] subscribing to topic %s", sub)
p.Subscriptions = append(p.Subscriptions, packet.Subscription{
Topic: sub,
// MODE only supports QoS0 for subscriptions
QOS: packet.QOSAtMostOnce,
})
}
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return []error{err}
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Errs != nil {
return resp.Errs
}
return nil
}
// Unsubscribe will send an unsubscribe request for the topics in subs.
// This is a synchronous call.
func (client *MqttClient) Unsubscribe(ctx context.Context,
subs []string) []error {
p := packet.NewUnsubscribePacket()
p.Topics = subs
p.PacketID = client.getPacketID()
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return []error{err}
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
if resp.Errs != nil {
return resp.Errs
}
return nil
}
// Ping sends an MQTT PINGREQ event to the server. This is an asynchronous
// call, so we will always return success if we were able to queue the message
// for delivery. Results will be sent on the delegate's pingAckCh
func (client *MqttClient) Ping(ctx context.Context) error {
p := packet.NewPingreqPacket()
_, err := client.queuePacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
return nil
}
// PingAndWait sends an MQTT PINGREQ event to the server and waits for the
// response. If this method is used instead of the asynchronous Ping, user
// should not be listening on the pingAckCh channel since this function may
// timeout waiting for the response an error will be returned.
func (client *MqttClient) PingAndWait(ctx context.Context) error {
p := packet.NewPingreqPacket()
respChan, err := client.sendPacket(ctx, p)
if err != nil {
logError("[MQTT] failed to send packet: %s", err.Error())
return err
}
client.wgRecv.Add(1)
resp := client.receivePacket(ctx, respChan)
return resp.Err
}
// Publish sends an MQTT Publish event to subscribers on the specified
// topic. This is an asynchronous call, so we will always return a packet
// ID as long as the request is able to be queued. After queueing, the
// any subsequent errors or results will be written to the delegate's
// queueAckCh.
// For QOSAtMostOnce, there will only be an error returned if the request was
// unable to be queued. We receive no ACK from the server.
// For QOSAtLeastOnce, we will receive an ACK if we were successful.
// For any other QOS levels (QOSExactlyOnce), they are not supported and an
// error is returned immediately and the request will not be sent.
func (client *MqttClient) Publish(ctx context.Context, qos QOSLevel,
topic string, data []byte) (uint16, error) {
return client.publishWithID(ctx, qos, topic, data, 0)
}
func (client *MqttClient) Republish(ctx context.Context, qos QOSLevel,
topic string, data []byte, packetID uint16) (uint16, error) {
return client.publishWithID(ctx, qos, topic, data, packetID)
}
func (client *MqttClient) publishWithID(ctx context.Context, qos QOSLevel,
topic string, data []byte, packetID uint16) (uint16, error) {
var pktQos byte
switch qos {
case QOSAtMostOnce:
pktQos = packet.QOSAtMostOnce
case QOSAtLeastOnce:
pktQos = packet.QOSAtLeastOnce
default:
return 0, errors.New("unsupported qos level")
}
p := packet.NewPublishPacket()
p.PacketID = packetID
p.Message = packet.Message{
Topic: topic,
QOS: pktQos,
Payload: data,
}
return client.queuePacket(ctx, p)
}
// Each connect, we need to create a new mqttConnection.
func (client *MqttClient) createMqttConnection() error {
receiveQueueSize := client.confDelegate.GetReceiveQueueSize()
subRecvCh := make(chan MqttSubData, receiveQueueSize)
queueAckCh := make(chan MqttResponse, receiveQueueSize)
pingAckCh := make(chan MqttResponse, receiveQueueSize)
client.recvDelegate.SetReceiveChannels(subRecvCh, queueAckCh, pingAckCh)
client.delegateSubRecvCh = subRecvCh
useTLS, tlsConfig := client.authDelegate.TLSUsageAndConfiguration()
sendQueueSize := client.confDelegate.GetSendQueueSize()
conn := newMqttConn(tlsConfig, client.mqttHost, client.mqttPort, useTLS,
queueAckCh, pingAckCh, sendQueueSize)
if conn == nil {
return errors.New("Unable to create a socket to the server")
}
if client.errorDelegate != nil {
errCh := make(chan error, client.errorDelegate.GetErrorChannelSize())
client.errorDelegate.SetErrorChannel(errCh)
conn.errCh = errCh
}
client.setConn(conn)
conn.Receiver = client
// We want to pass our WaitGroup's to the connection reader and writer, so
// we don't put these in the mqttConn's constructor.
client.stopWriterCh = make(chan struct{})
client.wgSend.Add(1)
go conn.runPacketWriter(client.stopWriterCh, &client.wgSend)
client.wgRecv.Add(1)
go conn.runPacketReader(&client.wgRecv)
return nil
}
// Shutting down gracefully is tricky. But, we try our best to drain the channels
// and avoid any panics.
func (client *MqttClient) shutdownConnection() {
client.connMtx.Lock()
defer client.connMtx.Unlock()
// We skip encapsulation of the connection class so the steps are clear
// 1. Send close to the packetWriter goroutine
client.stopWriterCh <- struct{}{}
// 2. Wait until the done channel has been read, since there are some queued
// writes that might be sent before the done channel has been read.
client.wgSend.Wait()
// 3. Close the channels writer channels
close(client.conn.directSendPktCh)
close(client.conn.queuedSendPktCh)
// 4. Close the connection with the server. This will break the
// packet reader out of its loop. Set to disconnected here so their
// reader knows that it was not an error
client.conn.setStatus(DisconnectedNetworkStatus)
client.conn.conn.Close()
// 5. Wait for the packet reader to finish
client.wgRecv.Wait()
// 6. Notify the client that we are disconnecting
client.recvDelegate.OnClose()
// 6. Close the channels for handling responses
close(client.conn.connRespCh)
close(client.conn.subRespCh)
// 7. Close the channel to the client readers.
close(client.conn.queueAckCh)
close(client.conn.pingRespCh)
close(client.delegateSubRecvCh)
if client.conn.errCh != nil {
close(client.conn.errCh)
}
client.conn = nil
}
// Helper function called by the synchronous API to handle processing
// of the responses. For the asynchronous API, the caller might do something
// similar, but also handling packet ID's.
func (client *MqttClient) receivePacket(ctx context.Context,
respChan chan MqttResponse) MqttResponse {
defer func() {
client.wgRecv.Done()
}()
// Block on the return channel or timeout
select {
case response := <-respChan:
return response
case <-ctx.Done():
err := ctx.Err()
//debug.PrintStack()
logError("[MQTT] timeout waiting for reply packet %s", err)
return MqttResponse{Err: err}
}
}
// Implementation of the delegate for mqttConn to handle publish from the
// server on topics that we've subscribed to. We only unpack it and send it
// to the caller
func (client *MqttClient) handlePubReceive(p *packet.PublishPacket,
receiveTime time.Time) {
logInfo("[MQTT] received message for topic %s", p.Message.Topic)
pubData := MqttSubData{
Topic: p.Message.Topic,
Data: p.Message.Payload,
ReceiveTime: receiveTime,
}
// If we have successfully queued the pub to the user
select {
case client.delegateSubRecvCh <- pubData:
default:
logError("Caller could not receive publish data. SubRecCh full?")
client.sendQueueingError(nil)
return
}
if p.Message.QOS != packet.QOSAtMostOnce {
ackPkt := packet.NewPubackPacket()
ackPkt.PacketID = p.PacketID
// For everything except QOS2: if we have queued the message to the
// user, we consider that successfully published to us, so we attempt
// to send an ACK back to the server.
ctx, cancel := context.WithTimeout(context.Background(),
connResponseDeadline)
defer cancel()
if _, err := client.queuePacket(ctx, ackPkt); err != nil {
logError("[MQTT] Queueing error on handlePubReceive %+v", err)
client.sendQueueingError(err)
}
}
}
// Returns the last errors, and then resets the errors. If there is no
// error delegate or the error delegate's error channel is full, we "queue"
// errors in a slice that can be fetched.
func (client *MqttClient) TakeRemainingErrors() []error {
client.errorMtx.Lock()
defer client.errorMtx.Unlock()
defer func() {
client.lastErrors = make([]error, 0, 5)
}()
return client.lastErrors
}
func (client *MqttClient) appendError(err error) {
client.errorMtx.Lock()
defer client.errorMtx.Unlock()
client.lastErrors = append(client.lastErrors, err)
}
func newMqttConn(tlsConfig *tls.Config, mqttHost string,
mqttPort int, useTLS bool, queueAckCh chan MqttResponse,
pingAckCh chan MqttResponse, outgoingQueueSize uint16) *mqttConn {
addr := fmt.Sprintf("%s:%d", mqttHost, mqttPort)
var conn net.Conn
var err error
if useTLS {
if conn, err = tls.DialWithDialer(mqttDialer, "tcp", addr,
tlsConfig); err != nil {
logError("MQTT TLS dialer failed: %s", err.Error())
return nil
}
} else {
if conn, err = mqttDialer.Dial("tcp", addr); err != nil {
logError("MQTT dialer failed: %s", err.Error())
return nil
}
}
return &mqttConn{
conn: conn,
stream: packet.NewStream(conn, conn),
status: DefaultNetworkStatus,
// The two channels that we write packets to send. The packetWriter
// will listen on these and write out to the stream.
// blocking, non-buffered
directSendPktCh: make(chan packetSendData),
// blocking, queue size specified by the delegate. If it is insufficient,
// we will lose packets.
queuedSendPktCh: make(chan packetSendData, outgoingQueueSize),
// These are synchronous requests, so we send the packet, then wait for the response. But,
// we create a buffer of 1, so if the response is received before we start listening
// to this channel, it won't be dropped
connRespCh: make(chan MqttResponse, 1),
subRespCh: make(chan MqttResponse, 1),
unsubRespCh: make(chan MqttResponse, 1),
// These are passed to us by the client, with a buffer sized specified
// by the delegate, so it is the delegate's responsibility to set the
// size appropriately or we will start losing responses.
pingRespCh: pingAckCh,
queueAckCh: queueAckCh,
}
}
func (conn *mqttConn) setStatus(status NetworkStatus) {
conn.statusMutex.Lock()
defer conn.statusMutex.Unlock()
conn.status = status
}
func (conn *mqttConn) getStatus() NetworkStatus {
conn.statusMutex.RLock()
defer conn.statusMutex.RUnlock()
return conn.status
}
func (conn *mqttConn) setLastActivity(t time.Time) {
conn.lastActivityMutex.Lock()
defer conn.lastActivityMutex.Unlock()
conn.lastActivity = t
}
func (conn *mqttConn) GetLastActivity() time.Time {
conn.lastActivityMutex.RLock()
defer conn.lastActivityMutex.RUnlock()
return conn.lastActivity
}
// We only queue pings (PINGREQ) and publishes (PUBLISH). Theoretically, we
// could queue subscribes (SUBSCRIBE) since they have packet ID's like
// publishes. But, for simplicity, those are synchronous, since, in practice,
// those are either on startup, or, at least, on rare occasions.
func (conn *mqttConn) queuePacket(ctx context.Context,
p packet.Packet) (uint16, error) {
if conn == nil || conn.getStatus() == DisconnectedNetworkStatus ||
conn.getStatus() == TimingOutNetworkStatus {
return 0, errors.New("Connection unstable. Unable to send")
}
packetID := uint16(0)
if p.Type() == packet.PUBLISH {
pubPkt := p.(*packet.PublishPacket)
if pubPkt.PacketID != 0 {
packetID = pubPkt.PacketID
} else {
packetID = conn.getPacketID()
pubPkt.PacketID = uint16(packetID)
}
}
pktSendData := packetSendData{pkt: p,
resultCh: conn.getResponseChannel(p.Type()),
}
select {
// Put writing to the channel in a select because the buffer might be full
case conn.queuedSendPktCh <- pktSendData:
// Successfully sent and we don't know when we'll get a response back,
// so just don't do anything.
case <-ctx.Done():
logError("Exceeded timeout sending %s for id %d", p.Type(), packetID)
return 0, fmt.Errorf("Send Queue full %s for id %d", p.Type(), packetID)
}
return packetID, nil
}
// Called by the client to send packets to the server
func (conn *mqttConn) sendPacket(ctx context.Context,
p packet.Packet) (chan MqttResponse, error) {
if conn == nil || conn.getStatus() == DisconnectedNetworkStatus ||
conn.getStatus() == TimingOutNetworkStatus {
return nil, errors.New("Connection unstable. Unable to send")
}
resultCh := make(chan MqttResponse)
defer close(resultCh)
select {
case conn.directSendPktCh <- packetSendData{
pkt: p,
resultCh: resultCh,
}:
case <-ctx.Done():
logError("Exceeded timeout sending %s", p.Type())
return nil, errors.New("Timeout Error")
}
// Wait for the result, and then give that result back to the caller, who
// will handle the error
result := <-resultCh
return conn.getResponseChannel(p.Type()), result.Err
}
func (conn *mqttConn) runPacketWriter(stopWriterCh chan struct{},
wg *sync.WaitGroup) {
defer func() {
logInfo("[MQTT] packet writer is exiting")
wg.Done()
}()
exitLoop := false
for !exitLoop {
// We read two channels for writing out packets. The directSend
// channel received synchronous, which has an unbuffered queue.
// The queuedSend can back up because the client already has an ID to
// check for the response.
// NOTE: Writes are sent on a bufio.Writer, so writes are almost
// guaranteed to succeed, even if they never reach the server. See the
// note in writePacket for more information.
select {
case pktSendData := <-conn.directSendPktCh:
resultCh := pktSendData.resultCh
resultCh <- MqttResponse{Err: conn.writePacket(pktSendData.pkt)}
case pktSendData := <-conn.queuedSendPktCh:
pktID := uint16(0)
if pktSendData.pkt.Type() == packet.PUBLISH {
pubPkt := pktSendData.pkt.(*packet.PublishPacket)
pktID = pubPkt.PacketID
}
// Get the packet ID, if any, for errors. This is a long lived channel, so
// it's possible to be full.
resultCh := pktSendData.resultCh
err := conn.writePacket(pktSendData.pkt)
if err != nil {
// If there was an error sending, we can notify the caller
// immediately.
logError("[MQTT] Error occurred on runPacketWriter %v", err)
select {
case resultCh <- MqttResponse{
PacketID: pktID,
Err: err,
}:
default:
logError("[MQTT] Queueing error on runPacketWriter %+v", err)
conn.sendQueueingError(err)
}
}
case <-stopWriterCh:
exitLoop = true
}
}
}
// Returns the channel to send the response, and the response data. If this
// gets complicated, we can handle each type separately
func (conn *mqttConn) createResponseForPacket(p packet.Packet) MqttResponse {
switch p.Type() {
case packet.PINGRESP:
// successful ping response is just nil errors
return MqttResponse{Err: nil}
case packet.PUBACK:
pubAck := p.(*packet.PubackPacket)
return MqttResponse{PacketID: pubAck.PacketID, Err: nil}
case packet.CONNACK:
connAck := p.(*packet.ConnackPacket)
var err error = nil
if connAck.ReturnCode != packet.ConnectionAccepted {
err = connAck.ReturnCode
}
return MqttResponse{Err: err}
case packet.UNSUBACK:
unsubAck := p.(*packet.UnsubackPacket)
// There's only a packet ID.
return MqttResponse{
PacketID: unsubAck.PacketID}
case packet.SUBACK:
subAck := p.(*packet.SubackPacket)
resp := MqttResponse{
// We have do asynchronous SUBSCRIPTIONS, so our packets won't
// have packet ID's. But, if we ever do them, this is one place that
// we won't have to change our code.
PacketID: subAck.PacketID,
}
for i, code := range subAck.ReturnCodes {
if code == packet.QOSFailure {
err := errors.New("subscription rejected")
if i == 0 {
// If someone just checks Err of the response, at least
// they'll know that there was a failure
resp.Errs = append(resp.Errs, err)
}
}
}
return resp
default:
logError("Unhandled packet type for response: ", p.Type())
return MqttResponse{}
}
}
func (conn *mqttConn) runPacketReader(wg *sync.WaitGroup) {
defer func() {
logInfo("[MQTT] packet reader is exiting")
wg.Done()
}()
for {
// Set a deadline for reads to prevent blocking forever. We'll handle
// this error and continue looping, if appropriate
if err := conn.conn.SetReadDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set read deadline: %s", err.Error())
break
}
p, err := conn.stream.Read()
if err != nil {
// Disconnect "responses" are EOF
if err == io.EOF || conn.getStatus() == DisconnectedNetworkStatus {
// Server disconnected. This happens for 2 reasons:
// 1. We initiated a disconnect
// 2. we don't ping, so the server assumed we're done
conn.setStatus(DisconnectedNetworkStatus)
logInfo("[MQTT] net.Conn disconnected: %s", err.Error())
} else {
// I/O Errors usually return this, so if it is, we can
// figure out what to do next
opError := err.(*net.OpError)
if opError != nil {
if os.IsTimeout(opError.Err) {
// No problem - read deadline just exceeded
continue
}
}
logError("[MQTT] failed to read packet: %s", err.Error())
}
// The signal to the caller that disconnect was complete is the
// exiting of this function (and wg.Done())
break
}
// Wait until here to set last activity, since disconnects and timeouts
// should be included as activity.
conn.setLastActivity(time.Now())
if p.Type() == packet.PUBLISH {
// Incoming publish, received from our subscription.
pubPkt := p.(*packet.PublishPacket)
conn.Receiver.handlePubReceive(pubPkt, time.Now())
} else {
// Everything besides publish and disconnect, so unpackage the
// packet data and send it to the appropriate channel
respData := conn.createResponseForPacket(p)
respCh := conn.getResponseChannel(p.Type())
select {
case respCh <- respData:
default:
logInfo("[MQTT] Queueing error as nil (p.Type: %s / respData: %+v)", p.Type(), respData)
conn.sendQueueingError(nil)
}
}
}
}
func (conn *mqttConn) writePacket(p packet.Packet) error {
// XXX - I've used a SetWriteDeadline() for this, even on Flush, but I've
// never gotten the write's to timeout. I think it's because the underlying
// stream is buffered. It still doesn't quite make sense, because Flush() on
// the buffered stream still forces a Write on the unbuffered Writer. My
// guess is that it's the nature of TCP. If there's no failure, even the
// lack of an ACK on write won't result in timing out. But, in any case, we
// will still have a response timeout on the round trip, which might be
// sufficient.
if err := conn.conn.SetWriteDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set write deadline: %s", err.Error())
return err
}
if err := conn.stream.Write(p); err != nil {
logError("[MQTT] failed to send %s packet: %s", p.Type(), err.Error())
return err
}
if err := conn.conn.SetWriteDeadline(time.Now().Add(connResponseDeadline)); err != nil {
logError("[MQTT] failed to set write deadline: %s", err.Error())
return err
}
if err := conn.stream.Flush(); err != nil {
logError("[MQTT] failed to flush %s packet: %s", p.Type(), err.Error())
return err
}
// Do not call setLastActivity here.
// Write will succeed without error even if the packet is lost somewhere
// before reaching the server.
// The only way to know the actual network activity is to watch the packets
// from the server.
return nil
}
func (conn *mqttConn) getPacketID() uint16 {
conn.mutex.Lock()
defer conn.mutex.Unlock()
// If we were strictly incrementing, we could use atomic.AddUint32(), but
// we're also wrapping around, so we still need the mutex.
conn.lastPacketID++
if conn.lastPacketID == 0 {
conn.lastPacketID = 1
}
return conn.lastPacketID
}
// Sanity check to verify that we are queue'ing or non-queue'ing the correct
// types of packets. ACK's are not queued, of course, but since we can use this
// function to make route returns, we handle ACK's in this function too.
func (conn *mqttConn) verifyPacketType(pktType packet.Type) packetSendType {
switch pktType {
case packet.CONNECT, packet.CONNACK, packet.DISCONNECT, packet.SUBSCRIBE,
packet.SUBACK:
return directPacketSendType
case packet.PUBLISH, packet.PUBACK, packet.PINGREQ, packet.PINGRESP:
return queuedPacketSendType
default:
logError("[MQTT] Unhandled packet type: %s", pktType)
return unhandledPacketSendType
}
}
func (conn *mqttConn) getResponseChannel(pktType packet.Type) chan MqttResponse {
switch pktType {
case packet.CONNECT, packet.CONNACK:
return conn.connRespCh
case packet.SUBSCRIBE, packet.SUBACK:
return conn.subRespCh
case packet.UNSUBSCRIBE, packet.UNSUBACK:
// While using the same channel as subscribe probably wouldn't be a
// problem, it's just safer to use a separate channel for unsubs.
return conn.unsubRespCh
case packet.PUBLISH, packet.PUBACK:
return conn.queueAckCh
case packet.PINGREQ, packet.PINGRESP:
return conn.pingRespCh
case packet.DISCONNECT:
return nil
default:
logError("[MQTT] Unhandled packet type: %s", pktType)
return nil
}
}
func (conn *mqttConn) sendQueueingError(err error) {
if err == nil {
err = errors.New("Channel full. Sending on error channel")
}
if conn.errCh != nil {
select {
case conn.errCh <- err:
logInfo("Error Queued to delegate %d/%d", len(conn.errCh),
cap(conn.errCh))
default:
logInfo("Error delegate channel full. Check TakeRemainingErrors")
conn.Receiver.appendError(err)
}
} else {
logInfo("No Error delegate channel. Check TakeRemainingErrors")
conn.Receiver.appendError(err)
}
}
var (
infoLogger = log.New(os.Stdout, "[MODE - INFO] ", log.LstdFlags)
errorLogger = log.New(os.Stderr, "[MODE - ERROR] ", log.LstdFlags)
)
func logInfo(format string, values ...interface{}) {
infoLogger.Printf(format+"\n", values...)
}
func logError(format string, values ...interface{}) {
errorLogger.Printf(format+"\n", values...)
}
|
package dsc
import (
"database/sql"
"fmt"
"github.com/viant/toolbox"
"path"
"strings"
)
const ansiTableListSQL = "SELECT table_name AS name FROM information_schema.tables WHERE table_schema = ?"
const ansiSequenceSQL = "SELECT auto_increment AS seq_value FROM information_schema.tables WHERE table_name = '%v' AND table_schema = DATABASE()"
const ansiPrimaryKeySQL = "SELECT column_name AS name FROM information_schema.key_column_usage WHERE table_name = '%v' AND table_schema = '%v' AND constraint_name='PRIMARY'"
const defaultAutoincremetSQL = "SELECT 1 AS autoicrement FROM information_schema.COLUMNS WHERE T TABLE_SCHEMA = '%v' AND TABLE_NAME = '%v' AND COLUMN_NAME = '%v' AND EXTRA like '%auto_increment%'"
const defaultSchemaSQL = "SELECT DATABASE() AS name"
const ansiSchemaListSQL = "SELECT schema_name AS name FROM information_schema.schemata"
const mysqlDisableForeignCheck = "SET FOREIGN_KEY_CHECKS=0"
const mysqlEnableForeignCheck = "SET FOREIGN_KEY_CHECKS=1"
const sqlLightTableSQL = "SELECT name FROM SQLITE_MASTER WHERE type='table' AND name NOT IN('sqlite_sequence') AND LENGTH(?) > 0"
const sqlLightSequenceSQL = "SELECT COALESCE(MAX(name), 0) + 1 FROM (SELECT seq AS name FROM SQLITE_SEQUENCE WHERE name = '%v')"
const sqlLightSchemaSQL = "PRAGMA database_list"
const sqlLightPkSQL = "pragma table_info(%v);"
const pgCurrentSchemaSQL = "SELECT current_database() AS name"
const pgSchemaListSQL = "SELECT datname AS name FROM pg_catalog.pg_database"
const pgTableListSQL = "SELECT table_name AS name FROM information_schema.tables WHERE table_catalog = ? AND table_schema = 'public' "
const pgPrimaryKeySQL = `SELECT c.column_name AS name FROM information_schema.key_column_usage u
JOIN information_schema.columns c ON u.column_name = c.column_name AND u.table_name = c.table_name AND u.constraint_catalog = c.table_catalog
JOIN information_schema.table_constraints tc ON tc.constraint_name = u.constraint_name AND tc.table_name = c.table_name AND tc.constraint_catalog = c.table_catalog
WHERE u.table_name = c.table_name
AND tc.constraint_type = 'PRIMARY KEY'
AND c.table_name = '%v'
AND c.table_catalog = '%v'
ORDER BY u.ordinal_position
`
const pgAutoincrementSQL = `SELECT LIKE(column_default, 'nextval(%v') AS is_autoincrement FROM information_schema.key_column_usage u
JOIN information_schema.columns c ON u.column_name = c.column_name AND u.table_name = c.table_name AND u.constraint_catalog = c.table_catalog
JOIN information_schema.table_constraints tc ON tc.constraint_name = u.constraint_name AND tc.table_name = c.table_name AND tc.constraint_catalog = c.table_catalog
WHERE u.table_name = c.table_name
AND tc.constraint_type = 'PRIMARY KEY'
AND c.table_name = '%v'
AND c.table_catalog = '%v'
`
const oraTableSQL = `SELECT table_name AS "name" FROM all_tables WHERE owner = ?`
const oraSchemaSQL = `SELECT sys_context( 'userenv', 'current_schema' ) AS "name" FROM dual`
const oraSchemaListSQL = `SELECT USERNAME AS "name" FROM ALL_USERS`
const oraPrimaryKeySQL = `SELECT c.column_name AS "name"
FROM all_constraints p
JOIN all_cons_columns c ON p.constraint_name = c.constraint_name AND p.owner = c.owner
WHERE c.table_name = UPPER('%v')
AND p.owner = UPPER('%v')
AND p.constraint_type = 'P'
ORDER BY c.position`
const msSchemaSQL = "SELECT SCHEMA_NAME() AS name"
const msSequenceSQL = "SELECT current_value AS seq_value FROM sys.sequences WHERE name = '%v'"
const verticaTableInfo = `SELECT column_name,
data_type,
data_type_length,
numeric_precision,
numeric_scale,
is_nullable
FROM v_catalog.columns
WHERE table_schema = ? AND table_name = ?
ORDER BY ordinal_position`
const ansiTableInfo = ` SELECT
column_name,
data_type,
character_maximum_length AS data_type_length,
numeric_precision,
numeric_scale,
is_nullable
FROM information_schema.columns
WHERE table_schema = ? AND table_name = ?
ORDER BY ordinal_position`
type nameRecord struct {
Name string `TableColumn:"name"`
}
type sqlDatastoreDialect struct {
tablesSQL string
sequenceSQL string
schemaSQL string
allSchemaSQL string
keySQL string
disableForeignKeyCheck string
enableForeignKeyCheck string
autoIncrementSQL string
tableInfoSQL string
schemaResultsetIndex int
}
//CanDropDatastore returns true if this dialect can create datastore
func (d sqlDatastoreDialect) CanCreateDatastore(manager Manager) bool {
return true
}
//CanDropDatastore returns true if this dialect can drop datastore
func (d sqlDatastoreDialect) CanDropDatastore(manager Manager) bool {
return true
}
func (d sqlDatastoreDialect) Init(manager Manager, connection Connection) error {
return nil
}
func hasColumnType(columns []*sql.ColumnType) bool {
if len(columns) == 0 {
return false
}
return columns[0].DatabaseTypeName() != ""
}
func (d sqlDatastoreDialect) GetColumns(manager Manager, datastore, tableName string) ([]Column, error) {
provider := manager.ConnectionProvider()
connection, err := provider.Get()
if err != nil {
return nil, err
}
defer connection.Close()
dbConnection, err := asSQLDb(connection.Unwrap((*sql.DB)(nil)))
if err != nil {
return nil, err
}
rows, err := dbConnection.Query("SELECT * FROM " + datastore + "." + tableName + " WHERE 1 = 0")
if err != nil {
return nil, err
}
columns, err := rows.ColumnTypes()
if err != nil {
return nil, err
}
var result = make([]Column, 0)
if !hasColumnType(columns) {
var tableColumns = []*TableColumn{}
err := manager.ReadAll(&tableColumns, d.tableInfoSQL, []interface{}{datastore, tableName}, nil)
if err == nil {
for _, column := range tableColumns {
if index := strings.Index(column.DataType, "("); index != -1 {
column.DataType = string(column.DataType[:index])
}
column.DataType = strings.ToUpper(column.DataType)
result = append(result, column)
}
return result, nil
}
}
for _, column := range columns {
result = append(result, column)
}
return result, nil
}
func (d sqlDatastoreDialect) EachTable(manager Manager, handler func(table string) error) error {
dbname, err := d.GetCurrentDatastore(manager)
if err != nil {
return err
}
tables, err := d.GetTables(manager, dbname)
if err != nil {
return err
}
for _, table := range tables {
if err := handler(table); err != nil {
return err
}
}
return err
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d sqlDatastoreDialect) CreateDatastore(manager Manager, datastore string) error {
_, err := manager.Execute("CREATE DATABASE " + datastore)
return err
}
//DropTable drops a datastore (database/schema), it takes manager and datastore to be droped
func (d sqlDatastoreDialect) DropDatastore(manager Manager, datastore string) error {
_, err := manager.Execute("DROP DATABASE " + datastore)
return err
}
//DropTable drops a table in datastore managed by manager.
func (d sqlDatastoreDialect) DropTable(manager Manager, datastore string, table string) error {
_, err := manager.Execute("DROP TABLE " + table)
return err
}
//CreateTable creates table on in datastore managed by manager.
func (d sqlDatastoreDialect) CreateTable(manager Manager, datastore string, table string, specification string) error {
_, err := manager.Execute("CREATE TABLE " + table + "(" + specification + ")")
return err
}
//GetTables return tables names for passed in datastore managed by manager.
func (d sqlDatastoreDialect) GetTables(manager Manager, datastore string) ([]string, error) {
var rows = make([]nameRecord, 0)
err := manager.ReadAll(&rows, d.tablesSQL, []interface{}{datastore}, nil)
if err != nil {
return nil, err
}
var result = make([]string, 0)
for _, row := range rows {
if len(row.Name) > 0 {
result = append(result, row.Name)
}
}
return result, nil
}
func normalizeName(name string) string {
if !strings.Contains(name, "/") && !strings.Contains(name, "\\") {
return name
}
_, file := path.Split(name)
return file
}
//GetKeyName returns key name
func (d sqlDatastoreDialect) GetKeyName(manager Manager, datastore, table string) string {
if d.keySQL == "" {
return ""
}
SQL := fmt.Sprintf(d.keySQL, table, datastore)
var records = make([]map[string]interface{}, 0)
err := manager.ReadAll(&records, SQL, []interface{}{}, nil)
if err != nil {
return ""
}
var result = make([]string, 0)
for _, item := range records {
result = append(result, toolbox.AsString(item["name"]))
}
return strings.Join(result, ",")
}
//GetDatastores returns name of datastores, takes manager as parameter
func (d sqlDatastoreDialect) GetDatastores(manager Manager) ([]string, error) {
var rows = make([][]interface{}, 0)
err := manager.ReadAll(&rows, d.allSchemaSQL, nil, nil)
if err != nil {
if strings.Contains(err.Error(), "unable to open database") {
return []string{}, nil
}
return nil, err
}
var result = make([]string, 0)
for _, row := range rows {
result = append(result, normalizeName(toolbox.AsString(row[d.schemaResultsetIndex])))
}
return result, nil
}
//GetCurrentDatastore returns name of current schema
func (d sqlDatastoreDialect) GetCurrentDatastore(manager Manager) (string, error) {
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, d.schemaSQL, nil, nil)
if err != nil || !success {
return "", err
}
return normalizeName(toolbox.AsString(result[d.schemaResultsetIndex])), nil
}
func (d sqlDatastoreDialect) IsAutoincrement(manager Manager, datastore, table string) bool {
if d.autoIncrementSQL == "" {
return false
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return false
}
var key = d.GetKeyName(manager, datastore, table)
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, fmt.Sprintf(d.autoIncrementSQL, datastore, table, key), nil, nil)
if err != nil || !success {
return false
}
if len(result) == 1 {
return toolbox.AsInt(result[0]) == 1
}
return false
}
//GetSequence returns sequence value or error for passed in manager and table/sequence
func (d sqlDatastoreDialect) GetSequence(manager Manager, name string) (int64, error) {
var result = make([]interface{}, 0)
var sequenceError error
if d.sequenceSQL != "" {
var success bool
success, sequenceError = manager.ReadSingle(&result, fmt.Sprintf(d.sequenceSQL, name), []interface{}{}, nil)
if success && len(result) == 1 {
var intResult = toolbox.AsInt(result[0])
if intResult > 0 {
return int64(intResult), nil
}
}
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return 0, err
}
var key = d.GetKeyName(manager, datastore, name)
if key != "" {
success, err := manager.ReadSingle(&result, fmt.Sprintf("SELECT MAX(%v) AS seq_value FROM %v", key, name), []interface{}{}, nil)
if err != nil || !success {
return 0, err
}
if len(result) == 1 {
return int64(toolbox.AsInt(result[0]) + 1), nil
}
}
return 0, sequenceError
}
//DisableForeignKeyCheck disables fk check
func (d sqlDatastoreDialect) DisableForeignKeyCheck(manager Manager, connection Connection) error {
if d.disableForeignKeyCheck == "" {
return nil
}
_, err := manager.ExecuteOnConnection(connection, d.disableForeignKeyCheck, nil)
return err
}
//EnableForeignKeyCheck disables fk check
func (d sqlDatastoreDialect) EnableForeignKeyCheck(manager Manager, connection Connection) error {
if d.enableForeignKeyCheck == "" {
return nil
}
_, err := manager.ExecuteOnConnection(connection, d.enableForeignKeyCheck, nil)
return err
}
func (d sqlDatastoreDialect) NormalizePlaceholders(SQL string) string {
return SQL
}
//CanPersistBatch return true if datastore can persist in batch
func (d sqlDatastoreDialect) CanPersistBatch() bool {
return false
}
//NewSQLDatastoreDialect creates a new default sql dialect
func NewSQLDatastoreDialect(tablesSQL, sequenceSQL, schemaSQL, allSchemaSQL, keySQL, disableForeignKeyCheck, enableForeignKeyCheck, autoIncrementSQL, tableInfoSQL string, schmeaIndex int) DatastoreDialect {
return &sqlDatastoreDialect{tablesSQL, sequenceSQL, schemaSQL, allSchemaSQL, keySQL, disableForeignKeyCheck, enableForeignKeyCheck, autoIncrementSQL, tableInfoSQL, schmeaIndex}
}
type mySQLDialect struct {
DatastoreDialect
}
func (d mySQLDialect) CanPersistBatch() bool {
return true
}
func newMySQLDialect() mySQLDialect {
return mySQLDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, ansiSequenceSQL, defaultSchemaSQL, ansiSchemaListSQL, ansiPrimaryKeySQL, mysqlDisableForeignCheck, mysqlEnableForeignCheck, defaultAutoincremetSQL, ansiTableInfo, 0)}
}
type sqlLiteDialect struct {
DatastoreDialect
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d sqlLiteDialect) CreateDatastore(manager Manager, datastore string) error {
return nil
}
//GetSequence returns sequence value or error for passed in manager and table/sequence
func (d sqlLiteDialect) GetSequence(manager Manager, name string) (int64, error) {
var result = make([]interface{}, 0)
success, sequenceError := manager.ReadSingle(&result, fmt.Sprintf(sqlLightSequenceSQL, name), []interface{}{}, nil)
if success && len(result) == 1 {
var intResult = toolbox.AsInt(result[0])
if intResult > 0 {
return int64(intResult), nil
}
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return 0, err
}
var key = d.GetKeyName(manager, datastore, name)
if key != "" {
success, err := manager.ReadSingle(&result, fmt.Sprintf("SELECT MAX(%v) AS seq_value FROM %v", key, name), []interface{}{}, nil)
if err != nil || !success {
return 0, err
}
if len(result) == 1 {
return int64(toolbox.AsInt(result[0]) + 1), nil
}
}
return 0, sequenceError
}
func (d sqlLiteDialect) DropDatastore(manager Manager, datastore string) error {
tables, err := d.GetTables(manager, datastore)
if err != nil {
return err
}
for _, table := range tables {
err := d.DropTable(manager, datastore, table)
if err != nil {
return err
}
}
return err
}
func (d sqlLiteDialect) GetKeyName(manager Manager, datastore, table string) string {
var records = make([]map[string]interface{}, 0)
err := manager.ReadAll(&records, fmt.Sprintf(sqlLightPkSQL, table), []interface{}{}, nil)
if err != nil {
return ""
}
var result = make([]string, 0)
for _, item := range records {
if toolbox.AsString(item["pk"]) == "1" {
result = append(result, toolbox.AsString(item["name"]))
}
}
return strings.Join(result, ",")
}
func newSQLLiteDialect() *sqlLiteDialect {
return &sqlLiteDialect{DatastoreDialect: NewSQLDatastoreDialect(sqlLightTableSQL, sqlLightSequenceSQL, sqlLightSchemaSQL, sqlLightSchemaSQL, sqlLightPkSQL, "", "", "", ansiTableInfo, 2)}
}
type pgDialect struct {
DatastoreDialect
}
func (d pgDialect) CanPersistBatch() bool {
return true
}
func newPgDialect() *pgDialect {
return &pgDialect{DatastoreDialect: NewSQLDatastoreDialect(pgTableListSQL, "", pgCurrentSchemaSQL, pgSchemaListSQL, pgPrimaryKeySQL, "", "", pgAutoincrementSQL, ansiTableInfo, 0)}
}
func (d pgDialect) NormalizePlaceholders(SQL string) string {
count := 1
var normalizedSQL = ""
for _, r := range SQL {
aChar := string(r)
if aChar == "?" {
normalizedSQL += "$" + toolbox.AsString(count)
count++
} else {
normalizedSQL += aChar
}
}
return normalizedSQL
}
func (d pgDialect) IsAutoincrement(manager Manager, datastore, table string) bool {
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return false
}
var SQL = fmt.Sprintf(pgAutoincrementSQL, "%", table, datastore)
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, SQL, nil, nil)
if err != nil || !success {
return false
}
if len(result) == 1 {
return toolbox.AsBoolean(result[0])
}
return false
}
func (d pgDialect) DisableForeignKeyCheck(manager Manager, connection Connection) error {
return d.EachTable(manager, func(table string) error {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER TABLE %v DISABLE TRIGGER ALL", table), nil)
return err
})
}
func (d pgDialect) EnableForeignKeyCheck(manager Manager, connection Connection) error {
return d.EachTable(manager, func(table string) error {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER TABLE %v ENABLE TRIGGER ALL", table), nil)
return err
})
}
type oraDialect struct {
DatastoreDialect
}
func (d oraDialect) CanPersistBatch() bool {
return true
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d oraDialect) CreateDatastore(manager Manager, datastore string) error {
var password, ok = manager.Config().Parameters["password"]
if !ok {
return fmt.Errorf("password was empty")
}
DCL := fmt.Sprintf("CREATE USER %v IDENTIFIED BY %v", datastore, password)
if _, err := manager.Execute(DCL); err != nil {
return err
}
DCL = fmt.Sprintf("GRANT CONNECT, RESOURCE, DBA TO %v", datastore)
if _, err := manager.Execute(DCL); err != nil {
return err
}
return nil
}
//DropTable drops a datastore (database/schema), it takes manager and datastore to be droped
func (d oraDialect) DropDatastore(manager Manager, datastore string) error {
_, err := manager.Execute(fmt.Sprintf("DROP USER %v CASCADE", datastore))
return err
}
func (d oraDialect) NormalizePlaceholders(SQL string) string {
count := 1
var normalizedSQL = ""
for _, r := range SQL {
aChar := string(r)
if aChar == "?" {
normalizedSQL += ":" + toolbox.AsString(count)
count++
} else {
normalizedSQL += aChar
}
}
return normalizedSQL
}
func (d oraDialect) Init(manager Manager, connection Connection) error {
config := manager.Config()
if _, has := config.Parameters["session"]; !has {
return nil
}
session := config.GetMap("session")
if session == nil {
return nil
}
for k, v := range session {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER SESSION SET %v = '%v'", k, v), nil)
if err != nil {
return err
}
}
return nil
}
func newOraDialect() *oraDialect {
return &oraDialect{DatastoreDialect: NewSQLDatastoreDialect(oraTableSQL, "", oraSchemaSQL, oraSchemaListSQL, oraPrimaryKeySQL, "", "", "", ansiTableInfo, 0)}
}
type odbcDialect struct {
DatastoreDialect
}
func (d *odbcDialect) Init(manager Manager, connection Connection) error {
searchPath := manager.Config().Get("SEARCH_PATH")
if searchPath != "" {
if _, err := manager.Execute(fmt.Sprintf("SET SEARCH_PATH=%v" ,searchPath));err != nil {
return err
}
}
return nil
}
func newOdbcDialect() *odbcDialect {
return &odbcDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, "", "", ansiSchemaListSQL, "", "", "", "", verticaTableInfo, 0)}
}
type msSQLDialect struct {
DatastoreDialect
}
func newMsSQLDialect() *msSQLDialect {
return &msSQLDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, msSequenceSQL, msSchemaSQL, ansiSchemaListSQL, "", "", "", "", ansiTableInfo, 0)}
}
added search namespace option for vertica
package dsc
import (
"database/sql"
"fmt"
"github.com/viant/toolbox"
"path"
"strings"
)
const ansiTableListSQL = "SELECT table_name AS name FROM information_schema.tables WHERE table_schema = ?"
const ansiSequenceSQL = "SELECT auto_increment AS seq_value FROM information_schema.tables WHERE table_name = '%v' AND table_schema = DATABASE()"
const ansiPrimaryKeySQL = "SELECT column_name AS name FROM information_schema.key_column_usage WHERE table_name = '%v' AND table_schema = '%v' AND constraint_name='PRIMARY'"
const defaultAutoincremetSQL = "SELECT 1 AS autoicrement FROM information_schema.COLUMNS WHERE T TABLE_SCHEMA = '%v' AND TABLE_NAME = '%v' AND COLUMN_NAME = '%v' AND EXTRA like '%auto_increment%'"
const defaultSchemaSQL = "SELECT DATABASE() AS name"
const ansiSchemaListSQL = "SELECT schema_name AS name FROM information_schema.schemata"
const mysqlDisableForeignCheck = "SET FOREIGN_KEY_CHECKS=0"
const mysqlEnableForeignCheck = "SET FOREIGN_KEY_CHECKS=1"
const sqlLightTableSQL = "SELECT name FROM SQLITE_MASTER WHERE type='table' AND name NOT IN('sqlite_sequence') AND LENGTH(?) > 0"
const sqlLightSequenceSQL = "SELECT COALESCE(MAX(name), 0) + 1 FROM (SELECT seq AS name FROM SQLITE_SEQUENCE WHERE name = '%v')"
const sqlLightSchemaSQL = "PRAGMA database_list"
const sqlLightPkSQL = "pragma table_info(%v);"
const pgCurrentSchemaSQL = "SELECT current_database() AS name"
const pgSchemaListSQL = "SELECT datname AS name FROM pg_catalog.pg_database"
const pgTableListSQL = "SELECT table_name AS name FROM information_schema.tables WHERE table_catalog = ? AND table_schema = 'public' "
const pgPrimaryKeySQL = `SELECT c.column_name AS name FROM information_schema.key_column_usage u
JOIN information_schema.columns c ON u.column_name = c.column_name AND u.table_name = c.table_name AND u.constraint_catalog = c.table_catalog
JOIN information_schema.table_constraints tc ON tc.constraint_name = u.constraint_name AND tc.table_name = c.table_name AND tc.constraint_catalog = c.table_catalog
WHERE u.table_name = c.table_name
AND tc.constraint_type = 'PRIMARY KEY'
AND c.table_name = '%v'
AND c.table_catalog = '%v'
ORDER BY u.ordinal_position
`
const pgAutoincrementSQL = `SELECT LIKE(column_default, 'nextval(%v') AS is_autoincrement FROM information_schema.key_column_usage u
JOIN information_schema.columns c ON u.column_name = c.column_name AND u.table_name = c.table_name AND u.constraint_catalog = c.table_catalog
JOIN information_schema.table_constraints tc ON tc.constraint_name = u.constraint_name AND tc.table_name = c.table_name AND tc.constraint_catalog = c.table_catalog
WHERE u.table_name = c.table_name
AND tc.constraint_type = 'PRIMARY KEY'
AND c.table_name = '%v'
AND c.table_catalog = '%v'
`
const oraTableSQL = `SELECT table_name AS "name" FROM all_tables WHERE owner = ?`
const oraSchemaSQL = `SELECT sys_context( 'userenv', 'current_schema' ) AS "name" FROM dual`
const oraSchemaListSQL = `SELECT USERNAME AS "name" FROM ALL_USERS`
const oraPrimaryKeySQL = `SELECT c.column_name AS "name"
FROM all_constraints p
JOIN all_cons_columns c ON p.constraint_name = c.constraint_name AND p.owner = c.owner
WHERE c.table_name = UPPER('%v')
AND p.owner = UPPER('%v')
AND p.constraint_type = 'P'
ORDER BY c.position`
const msSchemaSQL = "SELECT SCHEMA_NAME() AS name"
const msSequenceSQL = "SELECT current_value AS seq_value FROM sys.sequences WHERE name = '%v'"
const verticaTableInfo = `SELECT column_name,
data_type,
data_type_length,
numeric_precision,
numeric_scale,
is_nullable
FROM v_catalog.columns
WHERE table_schema = ? AND table_name = ?
ORDER BY ordinal_position`
const ansiTableInfo = ` SELECT
column_name,
data_type,
character_maximum_length AS data_type_length,
numeric_precision,
numeric_scale,
is_nullable
FROM information_schema.columns
WHERE table_schema = ? AND table_name = ?
ORDER BY ordinal_position`
type nameRecord struct {
Name string `TableColumn:"name"`
}
type sqlDatastoreDialect struct {
tablesSQL string
sequenceSQL string
schemaSQL string
allSchemaSQL string
keySQL string
disableForeignKeyCheck string
enableForeignKeyCheck string
autoIncrementSQL string
tableInfoSQL string
schemaResultsetIndex int
}
//CanDropDatastore returns true if this dialect can create datastore
func (d sqlDatastoreDialect) CanCreateDatastore(manager Manager) bool {
return true
}
//CanDropDatastore returns true if this dialect can drop datastore
func (d sqlDatastoreDialect) CanDropDatastore(manager Manager) bool {
return true
}
func (d sqlDatastoreDialect) Init(manager Manager, connection Connection) error {
return nil
}
func hasColumnType(columns []*sql.ColumnType) bool {
if len(columns) == 0 {
return false
}
return columns[0].DatabaseTypeName() != ""
}
func (d sqlDatastoreDialect) GetColumns(manager Manager, datastore, tableName string) ([]Column, error) {
provider := manager.ConnectionProvider()
connection, err := provider.Get()
if err != nil {
return nil, err
}
defer connection.Close()
dbConnection, err := asSQLDb(connection.Unwrap((*sql.DB)(nil)))
if err != nil {
return nil, err
}
rows, err := dbConnection.Query("SELECT * FROM " + datastore + "." + tableName + " WHERE 1 = 0")
if err != nil {
return nil, err
}
columns, err := rows.ColumnTypes()
if err != nil {
return nil, err
}
var result = make([]Column, 0)
if !hasColumnType(columns) {
var tableColumns = []*TableColumn{}
err := manager.ReadAll(&tableColumns, d.tableInfoSQL, []interface{}{datastore, tableName}, nil)
if err == nil {
for _, column := range tableColumns {
if index := strings.Index(column.DataType, "("); index != -1 {
column.DataType = string(column.DataType[:index])
}
column.DataType = strings.ToUpper(column.DataType)
result = append(result, column)
}
return result, nil
}
}
for _, column := range columns {
result = append(result, column)
}
return result, nil
}
func (d sqlDatastoreDialect) EachTable(manager Manager, handler func(table string) error) error {
dbname, err := d.GetCurrentDatastore(manager)
if err != nil {
return err
}
tables, err := d.GetTables(manager, dbname)
if err != nil {
return err
}
for _, table := range tables {
if err := handler(table); err != nil {
return err
}
}
return err
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d sqlDatastoreDialect) CreateDatastore(manager Manager, datastore string) error {
_, err := manager.Execute("CREATE DATABASE " + datastore)
return err
}
//DropTable drops a datastore (database/schema), it takes manager and datastore to be droped
func (d sqlDatastoreDialect) DropDatastore(manager Manager, datastore string) error {
_, err := manager.Execute("DROP DATABASE " + datastore)
return err
}
//DropTable drops a table in datastore managed by manager.
func (d sqlDatastoreDialect) DropTable(manager Manager, datastore string, table string) error {
_, err := manager.Execute("DROP TABLE " + table)
return err
}
//CreateTable creates table on in datastore managed by manager.
func (d sqlDatastoreDialect) CreateTable(manager Manager, datastore string, table string, specification string) error {
_, err := manager.Execute("CREATE TABLE " + table + "(" + specification + ")")
return err
}
//GetTables return tables names for passed in datastore managed by manager.
func (d sqlDatastoreDialect) GetTables(manager Manager, datastore string) ([]string, error) {
var rows = make([]nameRecord, 0)
err := manager.ReadAll(&rows, d.tablesSQL, []interface{}{datastore}, nil)
if err != nil {
return nil, err
}
var result = make([]string, 0)
for _, row := range rows {
if len(row.Name) > 0 {
result = append(result, row.Name)
}
}
return result, nil
}
func normalizeName(name string) string {
if !strings.Contains(name, "/") && !strings.Contains(name, "\\") {
return name
}
_, file := path.Split(name)
return file
}
//GetKeyName returns key name
func (d sqlDatastoreDialect) GetKeyName(manager Manager, datastore, table string) string {
if d.keySQL == "" {
return ""
}
SQL := fmt.Sprintf(d.keySQL, table, datastore)
var records = make([]map[string]interface{}, 0)
err := manager.ReadAll(&records, SQL, []interface{}{}, nil)
if err != nil {
return ""
}
var result = make([]string, 0)
for _, item := range records {
result = append(result, toolbox.AsString(item["name"]))
}
return strings.Join(result, ",")
}
//GetDatastores returns name of datastores, takes manager as parameter
func (d sqlDatastoreDialect) GetDatastores(manager Manager) ([]string, error) {
var rows = make([][]interface{}, 0)
err := manager.ReadAll(&rows, d.allSchemaSQL, nil, nil)
if err != nil {
if strings.Contains(err.Error(), "unable to open database") {
return []string{}, nil
}
return nil, err
}
var result = make([]string, 0)
for _, row := range rows {
result = append(result, normalizeName(toolbox.AsString(row[d.schemaResultsetIndex])))
}
return result, nil
}
//GetCurrentDatastore returns name of current schema
func (d sqlDatastoreDialect) GetCurrentDatastore(manager Manager) (string, error) {
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, d.schemaSQL, nil, nil)
if err != nil || !success {
return "", err
}
return normalizeName(toolbox.AsString(result[d.schemaResultsetIndex])), nil
}
func (d sqlDatastoreDialect) IsAutoincrement(manager Manager, datastore, table string) bool {
if d.autoIncrementSQL == "" {
return false
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return false
}
var key = d.GetKeyName(manager, datastore, table)
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, fmt.Sprintf(d.autoIncrementSQL, datastore, table, key), nil, nil)
if err != nil || !success {
return false
}
if len(result) == 1 {
return toolbox.AsInt(result[0]) == 1
}
return false
}
//GetSequence returns sequence value or error for passed in manager and table/sequence
func (d sqlDatastoreDialect) GetSequence(manager Manager, name string) (int64, error) {
var result = make([]interface{}, 0)
var sequenceError error
if d.sequenceSQL != "" {
var success bool
success, sequenceError = manager.ReadSingle(&result, fmt.Sprintf(d.sequenceSQL, name), []interface{}{}, nil)
if success && len(result) == 1 {
var intResult = toolbox.AsInt(result[0])
if intResult > 0 {
return int64(intResult), nil
}
}
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return 0, err
}
var key = d.GetKeyName(manager, datastore, name)
if key != "" {
success, err := manager.ReadSingle(&result, fmt.Sprintf("SELECT MAX(%v) AS seq_value FROM %v", key, name), []interface{}{}, nil)
if err != nil || !success {
return 0, err
}
if len(result) == 1 {
return int64(toolbox.AsInt(result[0]) + 1), nil
}
}
return 0, sequenceError
}
//DisableForeignKeyCheck disables fk check
func (d sqlDatastoreDialect) DisableForeignKeyCheck(manager Manager, connection Connection) error {
if d.disableForeignKeyCheck == "" {
return nil
}
_, err := manager.ExecuteOnConnection(connection, d.disableForeignKeyCheck, nil)
return err
}
//EnableForeignKeyCheck disables fk check
func (d sqlDatastoreDialect) EnableForeignKeyCheck(manager Manager, connection Connection) error {
if d.enableForeignKeyCheck == "" {
return nil
}
_, err := manager.ExecuteOnConnection(connection, d.enableForeignKeyCheck, nil)
return err
}
func (d sqlDatastoreDialect) NormalizePlaceholders(SQL string) string {
return SQL
}
//CanPersistBatch return true if datastore can persist in batch
func (d sqlDatastoreDialect) CanPersistBatch() bool {
return false
}
//NewSQLDatastoreDialect creates a new default sql dialect
func NewSQLDatastoreDialect(tablesSQL, sequenceSQL, schemaSQL, allSchemaSQL, keySQL, disableForeignKeyCheck, enableForeignKeyCheck, autoIncrementSQL, tableInfoSQL string, schmeaIndex int) DatastoreDialect {
return &sqlDatastoreDialect{tablesSQL, sequenceSQL, schemaSQL, allSchemaSQL, keySQL, disableForeignKeyCheck, enableForeignKeyCheck, autoIncrementSQL, tableInfoSQL, schmeaIndex}
}
type mySQLDialect struct {
DatastoreDialect
}
func (d mySQLDialect) CanPersistBatch() bool {
return true
}
func newMySQLDialect() mySQLDialect {
return mySQLDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, ansiSequenceSQL, defaultSchemaSQL, ansiSchemaListSQL, ansiPrimaryKeySQL, mysqlDisableForeignCheck, mysqlEnableForeignCheck, defaultAutoincremetSQL, ansiTableInfo, 0)}
}
type sqlLiteDialect struct {
DatastoreDialect
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d sqlLiteDialect) CreateDatastore(manager Manager, datastore string) error {
return nil
}
//GetSequence returns sequence value or error for passed in manager and table/sequence
func (d sqlLiteDialect) GetSequence(manager Manager, name string) (int64, error) {
var result = make([]interface{}, 0)
success, sequenceError := manager.ReadSingle(&result, fmt.Sprintf(sqlLightSequenceSQL, name), []interface{}{}, nil)
if success && len(result) == 1 {
var intResult = toolbox.AsInt(result[0])
if intResult > 0 {
return int64(intResult), nil
}
}
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return 0, err
}
var key = d.GetKeyName(manager, datastore, name)
if key != "" {
success, err := manager.ReadSingle(&result, fmt.Sprintf("SELECT MAX(%v) AS seq_value FROM %v", key, name), []interface{}{}, nil)
if err != nil || !success {
return 0, err
}
if len(result) == 1 {
return int64(toolbox.AsInt(result[0]) + 1), nil
}
}
return 0, sequenceError
}
func (d sqlLiteDialect) DropDatastore(manager Manager, datastore string) error {
tables, err := d.GetTables(manager, datastore)
if err != nil {
return err
}
for _, table := range tables {
err := d.DropTable(manager, datastore, table)
if err != nil {
return err
}
}
return err
}
func (d sqlLiteDialect) GetKeyName(manager Manager, datastore, table string) string {
var records = make([]map[string]interface{}, 0)
err := manager.ReadAll(&records, fmt.Sprintf(sqlLightPkSQL, table), []interface{}{}, nil)
if err != nil {
return ""
}
var result = make([]string, 0)
for _, item := range records {
if toolbox.AsString(item["pk"]) == "1" {
result = append(result, toolbox.AsString(item["name"]))
}
}
return strings.Join(result, ",")
}
func newSQLLiteDialect() *sqlLiteDialect {
return &sqlLiteDialect{DatastoreDialect: NewSQLDatastoreDialect(sqlLightTableSQL, sqlLightSequenceSQL, sqlLightSchemaSQL, sqlLightSchemaSQL, sqlLightPkSQL, "", "", "", ansiTableInfo, 2)}
}
type pgDialect struct {
DatastoreDialect
}
func (d pgDialect) CanPersistBatch() bool {
return true
}
func newPgDialect() *pgDialect {
return &pgDialect{DatastoreDialect: NewSQLDatastoreDialect(pgTableListSQL, "", pgCurrentSchemaSQL, pgSchemaListSQL, pgPrimaryKeySQL, "", "", pgAutoincrementSQL, ansiTableInfo, 0)}
}
func (d pgDialect) NormalizePlaceholders(SQL string) string {
count := 1
var normalizedSQL = ""
for _, r := range SQL {
aChar := string(r)
if aChar == "?" {
normalizedSQL += "$" + toolbox.AsString(count)
count++
} else {
normalizedSQL += aChar
}
}
return normalizedSQL
}
func (d pgDialect) IsAutoincrement(manager Manager, datastore, table string) bool {
datastore, err := d.GetCurrentDatastore(manager)
if err != nil {
return false
}
var SQL = fmt.Sprintf(pgAutoincrementSQL, "%", table, datastore)
var result = make([]interface{}, 0)
success, err := manager.ReadSingle(&result, SQL, nil, nil)
if err != nil || !success {
return false
}
if len(result) == 1 {
return toolbox.AsBoolean(result[0])
}
return false
}
func (d pgDialect) DisableForeignKeyCheck(manager Manager, connection Connection) error {
return d.EachTable(manager, func(table string) error {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER TABLE %v DISABLE TRIGGER ALL", table), nil)
return err
})
}
func (d pgDialect) EnableForeignKeyCheck(manager Manager, connection Connection) error {
return d.EachTable(manager, func(table string) error {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER TABLE %v ENABLE TRIGGER ALL", table), nil)
return err
})
}
type oraDialect struct {
DatastoreDialect
}
func (d oraDialect) CanPersistBatch() bool {
return true
}
//CreateDatastore create a new datastore (database/schema), it takes manager and target datastore
func (d oraDialect) CreateDatastore(manager Manager, datastore string) error {
var password, ok = manager.Config().Parameters["password"]
if !ok {
return fmt.Errorf("password was empty")
}
DCL := fmt.Sprintf("CREATE USER %v IDENTIFIED BY %v", datastore, password)
if _, err := manager.Execute(DCL); err != nil {
return err
}
DCL = fmt.Sprintf("GRANT CONNECT, RESOURCE, DBA TO %v", datastore)
if _, err := manager.Execute(DCL); err != nil {
return err
}
return nil
}
//DropTable drops a datastore (database/schema), it takes manager and datastore to be droped
func (d oraDialect) DropDatastore(manager Manager, datastore string) error {
_, err := manager.Execute(fmt.Sprintf("DROP USER %v CASCADE", datastore))
return err
}
func (d oraDialect) NormalizePlaceholders(SQL string) string {
count := 1
var normalizedSQL = ""
for _, r := range SQL {
aChar := string(r)
if aChar == "?" {
normalizedSQL += ":" + toolbox.AsString(count)
count++
} else {
normalizedSQL += aChar
}
}
return normalizedSQL
}
func (d oraDialect) Init(manager Manager, connection Connection) error {
config := manager.Config()
if _, has := config.Parameters["session"]; !has {
return nil
}
session := config.GetMap("session")
if session == nil {
return nil
}
for k, v := range session {
_, err := manager.ExecuteOnConnection(connection, fmt.Sprintf("ALTER SESSION SET %v = '%v'", k, v), nil)
if err != nil {
return err
}
}
return nil
}
func newOraDialect() *oraDialect {
return &oraDialect{DatastoreDialect: NewSQLDatastoreDialect(oraTableSQL, "", oraSchemaSQL, oraSchemaListSQL, oraPrimaryKeySQL, "", "", "", ansiTableInfo, 0)}
}
type odbcDialect struct {
DatastoreDialect
}
func (d *odbcDialect) Init(manager Manager, connection Connection) error {
searchPath := manager.Config().Get("SEARCH_PATH")
if searchPath != "" {
var SQL = fmt.Sprintf("SET SEARCH_PATH=%v" ,searchPath)
if _, err := manager.ExecuteOnConnection(connection, SQL, nil);err != nil {
return err
}
}
return nil
}
func newOdbcDialect() *odbcDialect {
return &odbcDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, "", "", ansiSchemaListSQL, "", "", "", "", verticaTableInfo, 0)}
}
type msSQLDialect struct {
DatastoreDialect
}
func newMsSQLDialect() *msSQLDialect {
return &msSQLDialect{DatastoreDialect: NewSQLDatastoreDialect(ansiTableListSQL, msSequenceSQL, msSchemaSQL, ansiSchemaListSQL, "", "", "", "", ansiTableInfo, 0)}
}
|
package registry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/utils"
"github.com/shin-/cookiejar"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
)
var ErrAlreadyExists error = errors.New("Image already exists")
func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
for _, cookie := range c.Jar.Cookies(req.URL) {
req.AddCookie(cookie)
}
return c.Do(req)
}
// Retrieve the history of a given image from the Registry.
// Return a list of the parent's json (requested image included)
func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]string, error) {
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/ancestry", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
}
return nil, err
}
defer res.Body.Close()
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Error while reading the http response: %s", err)
}
utils.Debugf("Ancestry: %s", jsonString)
history := new([]string)
if err := json.Unmarshal(jsonString, history); err != nil {
return nil, err
}
return *history, nil
}
// Check if an image exists in the Registry
func (r *Registry) LookupRemoteImage(imgId, registry string, authConfig *auth.AuthConfig) bool {
rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
if err != nil {
return false
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := rt.RoundTrip(req)
return err == nil && res.StatusCode == 307
}
func (r *Registry) getImagesInRepository(repository string, authConfig *auth.AuthConfig) ([]map[string]string, error) {
u := auth.IndexServerAddress() + "/repositories/" + repository + "/images"
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if authConfig != nil && len(authConfig.Username) > 0 {
req.SetBasicAuth(authConfig.Username, authConfig.Password)
}
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
// Repository doesn't exist yet
if res.StatusCode == 404 {
return nil, nil
}
jsonData, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
imageList := []map[string]string{}
if err := json.Unmarshal(jsonData, &imageList); err != nil {
utils.Debugf("Body: %s (%s)\n", res.Body, u)
return nil, err
}
return imageList, nil
}
// Retrieve an image from the Registry.
// Returns the Image object as well as the layer as an Archive (io.Reader)
func (r *Registry) GetRemoteImageJson(imgId, registry string, token []string) ([]byte, error) {
// Get the Json
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
if err != nil {
return nil, fmt.Errorf("Failed to download json: %s", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil {
return nil, fmt.Errorf("Failed to download json: %s", err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("HTTP code %d", res.StatusCode)
}
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
}
return jsonString, nil
}
func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, int, error) {
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
if err != nil {
return nil, -1, fmt.Errorf("Error while getting from the server: %s\n", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil {
return nil, -1, err
}
return res.Body, int(res.ContentLength), nil
}
func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
if strings.Count(repository, "/") == 0 {
// This will be removed once the Registry supports auto-resolution on
// the "library" namespace
repository = "library/" + repository
}
for _, host := range registries {
endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
defer res.Body.Close()
utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
if err != nil || (res.StatusCode != 200 && res.StatusCode != 404) {
continue
} else if res.StatusCode == 404 {
return nil, fmt.Errorf("Repository not found")
}
result := make(map[string]string)
rawJson, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if err := json.Unmarshal(rawJson, &result); err != nil {
return nil, err
}
return result, nil
}
return nil, fmt.Errorf("Could not reach any registry endpoint")
}
func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
utils.Debugf("Pulling repository %s from %s\r\n", remote, auth.IndexServerAddress())
repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
req, err := http.NewRequest("GET", repositoryTarget, nil)
if err != nil {
return nil, err
}
if r.authConfig != nil && len(r.authConfig.Username) > 0 {
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
}
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode == 401 {
return nil, fmt.Errorf("Please login first (HTTP code %d)", res.StatusCode)
}
// TODO: Right now we're ignoring checksums in the response body.
// In the future, we need to use them to check image validity.
if res.StatusCode != 200 {
return nil, fmt.Errorf("HTTP code: %d", res.StatusCode)
}
var tokens []string
if res.Header.Get("X-Docker-Token") != "" {
tokens = res.Header["X-Docker-Token"]
}
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints = res.Header["X-Docker-Endpoints"]
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
}
checksumsJson, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
remoteChecksums := []*ImgData{}
if err := json.Unmarshal(checksumsJson, &remoteChecksums); err != nil {
return nil, err
}
// Forge a better object from the retrieved data
imgsData := make(map[string]*ImgData)
for _, elem := range remoteChecksums {
imgsData[elem.Id] = elem
}
return &RepositoryData{
ImgList: imgsData,
Endpoints: endpoints,
Tokens: tokens,
}, nil
}
// Push a local image to the registry
func (r *Registry) PushImageJsonRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
registry = "https://" + registry + "/v1"
// FIXME: try json with UTF8
req, err := http.NewRequest("PUT", registry+"/images/"+imgData.Id+"/json", strings.NewReader(string(jsonRaw)))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
utils.Debugf("Setting checksum for %s: %s", imgData.Id, imgData.Checksum)
res, err := doWithCookies(r.client, req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
defer res.Body.Close()
if len(res.Cookies()) > 0 {
r.client.Jar.SetCookies(req.URL, res.Cookies())
}
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
}
var jsonBody map[string]string
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
errBody = []byte(err.Error())
} else if jsonBody["error"] == "Image already exists" {
return ErrAlreadyExists
}
return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
}
return nil
}
func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registry string, token []string) error {
registry = "https://" + registry + "/v1"
req, err := http.NewRequest("PUT", registry+"/images/"+imgId+"/layer", layer)
if err != nil {
return err
}
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
res, err := doWithCookies(r.client, req)
if err != nil {
return fmt.Errorf("Failed to upload layer: %s", err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
}
return fmt.Errorf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody)
}
return nil
}
// push a tag on the registry.
// Remote has the format '<user>/<repo>
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
// "jsonify" the string
revision = "\"" + revision + "\""
registry = "https://" + registry + "/v1"
req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
req.ContentLength = int64(len(revision))
res, err := doWithCookies(r.client, req)
if err != nil {
return err
}
res.Body.Close()
if res.StatusCode != 200 && res.StatusCode != 201 {
return fmt.Errorf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote)
}
return nil
}
func (r *Registry) PushImageJsonIndex(remote string, imgList []*ImgData, validate bool) (*RepositoryData, error) {
imgListJson, err := json.Marshal(imgList)
if err != nil {
return nil, err
}
utils.Debugf("json sent: %s\n", imgListJson)
req, err := http.NewRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/", bytes.NewReader(imgListJson))
if err != nil {
return nil, err
}
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJson))
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
// Redirect if necessary
for res.StatusCode >= 300 && res.StatusCode < 400 {
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJson))
if err != nil {
return nil, err
}
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJson))
req.Header.Set("X-Docker-Token", "true")
res, err = r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
}
if res.StatusCode != 200 && res.StatusCode != 201 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody)
}
var tokens []string
if res.Header.Get("X-Docker-Token") != "" {
tokens = res.Header["X-Docker-Token"]
utils.Debugf("Auth token: %v", tokens)
} else {
return nil, fmt.Errorf("Index response didn't contain an access token")
}
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints = res.Header["X-Docker-Endpoints"]
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
}
if validate {
if res.StatusCode != 204 {
if errBody, err := ioutil.ReadAll(res.Body); err != nil {
return nil, err
} else {
return nil, fmt.Errorf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody)
}
}
}
return &RepositoryData{
Tokens: tokens,
Endpoints: endpoints,
}, nil
}
func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
u := auth.IndexServerAddress() + "/search?q=" + url.QueryEscape(term)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("Unexepected status code %d", res.StatusCode)
}
rawData, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
result := new(SearchResults)
err = json.Unmarshal(rawData, result)
return result, err
}
func (r *Registry) ResetClient(authConfig *auth.AuthConfig) {
r.authConfig = authConfig
r.client.Jar = cookiejar.NewCookieJar()
}
func (r *Registry) GetAuthConfig() *auth.AuthConfig {
return &auth.AuthConfig{
Username: r.authConfig.Username,
Email: r.authConfig.Email,
}
}
type SearchResults struct {
Query string `json:"query"`
NumResults int `json:"num_results"`
Results []map[string]string `json:"results"`
}
type RepositoryData struct {
ImgList map[string]*ImgData
Endpoints []string
Tokens []string
}
type ImgData struct {
Id string `json:"id"`
Checksum string `json:"checksum,omitempty"`
Tag string `json:",omitempty"`
}
type Registry struct {
client *http.Client
authConfig *auth.AuthConfig
}
func NewRegistry(root string) *Registry {
// If the auth file does not exist, keep going
authConfig, _ := auth.LoadConfig(root)
r := &Registry{
authConfig: authConfig,
client: &http.Client{},
}
r.client.Jar = cookiejar.NewCookieJar()
return r
}
Remove hijack from api when not necessary
package registry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/utils"
"github.com/shin-/cookiejar"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
)
var ErrAlreadyExists error = errors.New("Image already exists")
func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
for _, cookie := range c.Jar.Cookies(req.URL) {
req.AddCookie(cookie)
}
return c.Do(req)
}
// Retrieve the history of a given image from the Registry.
// Return a list of the parent's json (requested image included)
func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]string, error) {
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/ancestry", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
}
return nil, err
}
defer res.Body.Close()
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Error while reading the http response: %s", err)
}
utils.Debugf("Ancestry: %s", jsonString)
history := new([]string)
if err := json.Unmarshal(jsonString, history); err != nil {
return nil, err
}
return *history, nil
}
// Check if an image exists in the Registry
func (r *Registry) LookupRemoteImage(imgId, registry string, authConfig *auth.AuthConfig) bool {
rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
if err != nil {
return false
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := rt.RoundTrip(req)
return err == nil && res.StatusCode == 307
}
func (r *Registry) getImagesInRepository(repository string, authConfig *auth.AuthConfig) ([]map[string]string, error) {
u := auth.IndexServerAddress() + "/repositories/" + repository + "/images"
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if authConfig != nil && len(authConfig.Username) > 0 {
req.SetBasicAuth(authConfig.Username, authConfig.Password)
}
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
// Repository doesn't exist yet
if res.StatusCode == 404 {
return nil, nil
}
jsonData, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
imageList := []map[string]string{}
if err := json.Unmarshal(jsonData, &imageList); err != nil {
utils.Debugf("Body: %s (%s)\n", res.Body, u)
return nil, err
}
return imageList, nil
}
// Retrieve an image from the Registry.
// Returns the Image object as well as the layer as an Archive (io.Reader)
func (r *Registry) GetRemoteImageJson(imgId, registry string, token []string) ([]byte, error) {
// Get the Json
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
if err != nil {
return nil, fmt.Errorf("Failed to download json: %s", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil {
return nil, fmt.Errorf("Failed to download json: %s", err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("HTTP code %d", res.StatusCode)
}
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
}
return jsonString, nil
}
func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, int, error) {
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
if err != nil {
return nil, -1, fmt.Errorf("Error while getting from the server: %s\n", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
if err != nil {
return nil, -1, err
}
return res.Body, int(res.ContentLength), nil
}
func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
if strings.Count(repository, "/") == 0 {
// This will be removed once the Registry supports auto-resolution on
// the "library" namespace
repository = "library/" + repository
}
for _, host := range registries {
endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
res, err := r.client.Do(req)
defer res.Body.Close()
utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
if err != nil || (res.StatusCode != 200 && res.StatusCode != 404) {
continue
} else if res.StatusCode == 404 {
return nil, fmt.Errorf("Repository not found")
}
result := make(map[string]string)
rawJson, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if err := json.Unmarshal(rawJson, &result); err != nil {
return nil, err
}
return result, nil
}
return nil, fmt.Errorf("Could not reach any registry endpoint")
}
func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
req, err := http.NewRequest("GET", repositoryTarget, nil)
if err != nil {
return nil, err
}
if r.authConfig != nil && len(r.authConfig.Username) > 0 {
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
}
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode == 401 {
return nil, fmt.Errorf("Please login first (HTTP code %d)", res.StatusCode)
}
// TODO: Right now we're ignoring checksums in the response body.
// In the future, we need to use them to check image validity.
if res.StatusCode != 200 {
return nil, fmt.Errorf("HTTP code: %d", res.StatusCode)
}
var tokens []string
if res.Header.Get("X-Docker-Token") != "" {
tokens = res.Header["X-Docker-Token"]
}
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints = res.Header["X-Docker-Endpoints"]
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
}
checksumsJson, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
remoteChecksums := []*ImgData{}
if err := json.Unmarshal(checksumsJson, &remoteChecksums); err != nil {
return nil, err
}
// Forge a better object from the retrieved data
imgsData := make(map[string]*ImgData)
for _, elem := range remoteChecksums {
imgsData[elem.Id] = elem
}
return &RepositoryData{
ImgList: imgsData,
Endpoints: endpoints,
Tokens: tokens,
}, nil
}
// Push a local image to the registry
func (r *Registry) PushImageJsonRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
registry = "https://" + registry + "/v1"
// FIXME: try json with UTF8
req, err := http.NewRequest("PUT", registry+"/images/"+imgData.Id+"/json", strings.NewReader(string(jsonRaw)))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
utils.Debugf("Setting checksum for %s: %s", imgData.Id, imgData.Checksum)
res, err := doWithCookies(r.client, req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
defer res.Body.Close()
if len(res.Cookies()) > 0 {
r.client.Jar.SetCookies(req.URL, res.Cookies())
}
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
}
var jsonBody map[string]string
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
errBody = []byte(err.Error())
} else if jsonBody["error"] == "Image already exists" {
return ErrAlreadyExists
}
return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
}
return nil
}
func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registry string, token []string) error {
registry = "https://" + registry + "/v1"
req, err := http.NewRequest("PUT", registry+"/images/"+imgId+"/layer", layer)
if err != nil {
return err
}
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
res, err := doWithCookies(r.client, req)
if err != nil {
return fmt.Errorf("Failed to upload layer: %s", err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
}
return fmt.Errorf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody)
}
return nil
}
// push a tag on the registry.
// Remote has the format '<user>/<repo>
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
// "jsonify" the string
revision = "\"" + revision + "\""
registry = "https://" + registry + "/v1"
req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
req.ContentLength = int64(len(revision))
res, err := doWithCookies(r.client, req)
if err != nil {
return err
}
res.Body.Close()
if res.StatusCode != 200 && res.StatusCode != 201 {
return fmt.Errorf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote)
}
return nil
}
func (r *Registry) PushImageJsonIndex(remote string, imgList []*ImgData, validate bool) (*RepositoryData, error) {
imgListJson, err := json.Marshal(imgList)
if err != nil {
return nil, err
}
utils.Debugf("json sent: %s\n", imgListJson)
req, err := http.NewRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/", bytes.NewReader(imgListJson))
if err != nil {
return nil, err
}
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJson))
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
// Redirect if necessary
for res.StatusCode >= 300 && res.StatusCode < 400 {
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJson))
if err != nil {
return nil, err
}
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJson))
req.Header.Set("X-Docker-Token", "true")
res, err = r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
}
if res.StatusCode != 200 && res.StatusCode != 201 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody)
}
var tokens []string
if res.Header.Get("X-Docker-Token") != "" {
tokens = res.Header["X-Docker-Token"]
utils.Debugf("Auth token: %v", tokens)
} else {
return nil, fmt.Errorf("Index response didn't contain an access token")
}
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints = res.Header["X-Docker-Endpoints"]
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
}
if validate {
if res.StatusCode != 204 {
if errBody, err := ioutil.ReadAll(res.Body); err != nil {
return nil, err
} else {
return nil, fmt.Errorf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody)
}
}
}
return &RepositoryData{
Tokens: tokens,
Endpoints: endpoints,
}, nil
}
func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
u := auth.IndexServerAddress() + "/search?q=" + url.QueryEscape(term)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
res, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("Unexepected status code %d", res.StatusCode)
}
rawData, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
result := new(SearchResults)
err = json.Unmarshal(rawData, result)
return result, err
}
func (r *Registry) ResetClient(authConfig *auth.AuthConfig) {
r.authConfig = authConfig
r.client.Jar = cookiejar.NewCookieJar()
}
func (r *Registry) GetAuthConfig() *auth.AuthConfig {
return &auth.AuthConfig{
Username: r.authConfig.Username,
Email: r.authConfig.Email,
}
}
type SearchResults struct {
Query string `json:"query"`
NumResults int `json:"num_results"`
Results []map[string]string `json:"results"`
}
type RepositoryData struct {
ImgList map[string]*ImgData
Endpoints []string
Tokens []string
}
type ImgData struct {
Id string `json:"id"`
Checksum string `json:"checksum,omitempty"`
Tag string `json:",omitempty"`
}
type Registry struct {
client *http.Client
authConfig *auth.AuthConfig
}
func NewRegistry(root string) *Registry {
// If the auth file does not exist, keep going
authConfig, _ := auth.LoadConfig(root)
r := &Registry{
authConfig: authConfig,
client: &http.Client{},
}
r.client.Jar = cookiejar.NewCookieJar()
return r
}
|
package registry
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/formatters/logstash"
"github.com/bugsnag/bugsnag-go"
"github.com/docker/distribution/configuration"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
gorhandlers "github.com/gorilla/handlers"
"github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
"golang.org/x/net/context"
)
// Cmd is a cobra command for running the registry.
var Cmd = &cobra.Command{
Use: "registry <config>",
Short: "registry stores and distributes Docker images",
Long: "registry stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
if config.HTTP.Debug.Addr != "" {
go func(addr string) {
log.Infof("debug server listening %v", addr)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("error listening on debug interface: %v", err)
}
}(config.HTTP.Debug.Addr)
}
registry, err := NewRegistry(context.Background(), config)
if err != nil {
log.Fatalln(err)
}
if err = registry.ListenAndServe(); err != nil {
log.Fatalln(err)
}
},
}
var showVersion bool
func init() {
Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct {
config *configuration.Configuration
app *handlers.App
server *http.Server
}
// NewRegistry creates a new registry from a context and configuration struct.
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
// Note this
ctx = ctxu.WithValue(ctx, "version", version.Version)
var err error
ctx, err = configureLogging(ctx, config)
if err != nil {
return nil, fmt.Errorf("error configuring logger: %v", err)
}
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = ctxu.GetLogger(ctx).Warnf
app := handlers.NewApp(ctx, config)
// TODO(aaronl): The global scope of the health checks means NewRegistry
// can only be called once per process.
app.RegisterHealthChecks()
handler := configureReporting(app)
handler = alive("/", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
server := &http.Server{
Handler: handler,
}
return &Registry{
app: app,
config: config,
server: server,
}, nil
}
// ListenAndServe runs the registry's HTTP server.
func (registry *Registry) ListenAndServe() error {
config := registry.config
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
if err != nil {
return err
}
if config.HTTP.TLS.Certificate != "" {
tlsConf := &tls.Config{
ClientAuth: tls.NoClientCert,
NextProtos: []string{"http/1.1"},
Certificates: make([]tls.Certificate, 1),
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
if err != nil {
return err
}
if len(config.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range config.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("Could not add CA to pool")
}
}
for _, subj := range pool.Subjects() {
ctxu.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
ln = tls.NewListener(ln, tlsConf)
ctxu.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
} else {
ctxu.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
}
return registry.server.Serve(ln)
}
func configureReporting(app *handlers.App) http.Handler {
var handler http.Handler = app
if app.Config.Reporting.Bugsnag.APIKey != "" {
bugsnagConfig := bugsnag.Configuration{
APIKey: app.Config.Reporting.Bugsnag.APIKey,
// TODO(brianbland): provide the registry version here
// AppVersion: "2.0",
}
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
}
if app.Config.Reporting.Bugsnag.Endpoint != "" {
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
}
bugsnag.Configure(bugsnagConfig)
handler = bugsnag.Handler(handler)
}
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx ctxu.Context, config *configuration.Configuration) (context.Context, error) {
if config.Log.Level == "" && config.Log.Formatter == "" {
// If no config for logging is set, fallback to deprecated "Loglevel".
log.SetLevel(logLevel(config.Loglevel))
ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version"))
return ctx, nil
}
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
log.SetFormatter(&logstash.LogstashFormatter{
TimestampFormat: time.RFC3339Nano,
})
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
// log the application version with messages
ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version"))
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = ctxu.WithValues(ctx, config.Log.Fields)
ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// panicHandler add a HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
var configurationPath string
if len(args) > 0 {
configurationPath = args[0]
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
return config, nil
}
Only use the distribution/context package in registry.go
This solves a issue from #909 where instance.id was not printed in logs,
because this file was using the background context from
golang.org/x/net/context instead of
github.com/docker/distribution/context.
It's cleaner to standardize on one package, so this commit removes the
import of golang.org/x/net/context entirely. The Context interfaces
defined in both packages are the same, so other code using
golang.org/x/net/context can still pass its context to NewRegistry.
Signed-off-by: Aaron Lehmann <8ecfc6017a87905413dcd7d63696a2a4c351b604@docker.com>
package registry
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/formatters/logstash"
"github.com/bugsnag/bugsnag-go"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
gorhandlers "github.com/gorilla/handlers"
"github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
)
// Cmd is a cobra command for running the registry.
var Cmd = &cobra.Command{
Use: "registry <config>",
Short: "registry stores and distributes Docker images",
Long: "registry stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
if config.HTTP.Debug.Addr != "" {
go func(addr string) {
log.Infof("debug server listening %v", addr)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("error listening on debug interface: %v", err)
}
}(config.HTTP.Debug.Addr)
}
registry, err := NewRegistry(context.Background(), config)
if err != nil {
log.Fatalln(err)
}
if err = registry.ListenAndServe(); err != nil {
log.Fatalln(err)
}
},
}
var showVersion bool
func init() {
Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct {
config *configuration.Configuration
app *handlers.App
server *http.Server
}
// NewRegistry creates a new registry from a context and configuration struct.
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
// Note this
ctx = context.WithValue(ctx, "version", version.Version)
var err error
ctx, err = configureLogging(ctx, config)
if err != nil {
return nil, fmt.Errorf("error configuring logger: %v", err)
}
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = context.GetLogger(ctx).Warnf
app := handlers.NewApp(ctx, config)
// TODO(aaronl): The global scope of the health checks means NewRegistry
// can only be called once per process.
app.RegisterHealthChecks()
handler := configureReporting(app)
handler = alive("/", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
server := &http.Server{
Handler: handler,
}
return &Registry{
app: app,
config: config,
server: server,
}, nil
}
// ListenAndServe runs the registry's HTTP server.
func (registry *Registry) ListenAndServe() error {
config := registry.config
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
if err != nil {
return err
}
if config.HTTP.TLS.Certificate != "" {
tlsConf := &tls.Config{
ClientAuth: tls.NoClientCert,
NextProtos: []string{"http/1.1"},
Certificates: make([]tls.Certificate, 1),
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
if err != nil {
return err
}
if len(config.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range config.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("Could not add CA to pool")
}
}
for _, subj := range pool.Subjects() {
context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
ln = tls.NewListener(ln, tlsConf)
context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
} else {
context.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
}
return registry.server.Serve(ln)
}
func configureReporting(app *handlers.App) http.Handler {
var handler http.Handler = app
if app.Config.Reporting.Bugsnag.APIKey != "" {
bugsnagConfig := bugsnag.Configuration{
APIKey: app.Config.Reporting.Bugsnag.APIKey,
// TODO(brianbland): provide the registry version here
// AppVersion: "2.0",
}
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
}
if app.Config.Reporting.Bugsnag.Endpoint != "" {
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
}
bugsnag.Configure(bugsnagConfig)
handler = bugsnag.Handler(handler)
}
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
if config.Log.Level == "" && config.Log.Formatter == "" {
// If no config for logging is set, fallback to deprecated "Loglevel".
log.SetLevel(logLevel(config.Loglevel))
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version"))
return ctx, nil
}
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
log.SetFormatter(&logstash.LogstashFormatter{
TimestampFormat: time.RFC3339Nano,
})
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
// log the application version with messages
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version"))
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = context.WithValues(ctx, config.Log.Fields)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// panicHandler add a HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
var configurationPath string
if len(args) > 0 {
configurationPath = args[0]
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
return config, nil
}
|
package aws
import (
"fmt"
"log"
"strings"
"time"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/aws/awserr"
"github.com/awslabs/aws-sdk-go/service/iam"
"github.com/awslabs/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsDbInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbInstanceCreate,
Read: resourceAwsDbInstanceRead,
Update: resourceAwsDbInstanceUpdate,
Delete: resourceAwsDbInstanceDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"username": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"password": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"engine": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"engine_version": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"storage_encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"allocated_storage": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"storage_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"backup_retention_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
Default: 1,
},
"backup_window": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"license_model": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"maintenance_window": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"multi_az": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"publicly_accessible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"vpc_security_group_ids": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"security_group_names": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"final_snapshot_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"parameter_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
// apply_immediately is used to determine when the update modifications
// take place.
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
"apply_immediately": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"replicate_source_db": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"replicas": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"tags": tagsSchema(),
},
}
}
func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
if v, ok := d.GetOk("replicate_source_db"); ok {
opts := rds.CreateDBInstanceReadReplicaInput{
SourceDBInstanceIdentifier: aws.String(v.(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
Tags: tags,
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
opts.AvailabilityZone = aws.String(attr.(string))
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
}
_, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}
} else {
opts := rds.CreateDBInstanceInput{
AllocatedStorage: aws.Long(int64(d.Get("allocated_storage").(int))),
DBName: aws.String(d.Get("name").(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
MasterUsername: aws.String(d.Get("username").(string)),
MasterUserPassword: aws.String(d.Get("password").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)),
StorageEncrypted: aws.Boolean(d.Get("storage_encrypted").(bool)),
Tags: tags,
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool))
}
if attr, ok := d.GetOk("maintenance_window"); ok {
opts.PreferredMaintenanceWindow = aws.String(attr.(string))
}
if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}
if attr, ok := d.GetOk("license_model"); ok {
opts.LicenseModel = aws.String(attr.(string))
}
if attr, ok := d.GetOk("parameter_group_name"); ok {
opts.DBParameterGroupName = aws.String(attr.(string))
}
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.VPCSecurityGroupIDs = s
}
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.DBSecurityGroups = s
}
if attr, ok := d.GetOk("storage_type"); ok {
opts.StorageType = aws.String(attr.(string))
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
opts.DBSubnetGroupName = aws.String(attr.(string))
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
opts.AvailabilityZone = aws.String(attr.(string))
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
}
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
var err error
_, err = conn.CreateDBInstance(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}
}
d.SetId(d.Get("identifier").(string))
log.Printf("[INFO] DB Instance ID: %s", d.Id())
log.Println(
"[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
// Wait, catching any errors
_, err := stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsDbInstanceRead(d, meta)
}
func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
v, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
return err
}
if v == nil {
d.SetId("")
return nil
}
d.Set("name", v.DBName)
d.Set("username", v.MasterUsername)
d.Set("engine", v.Engine)
d.Set("engine_version", v.EngineVersion)
d.Set("allocated_storage", v.AllocatedStorage)
d.Set("storage_type", v.StorageType)
d.Set("instance_class", v.DBInstanceClass)
d.Set("availability_zone", v.AvailabilityZone)
d.Set("backup_retention_period", v.BackupRetentionPeriod)
d.Set("backup_window", v.PreferredBackupWindow)
d.Set("license_model", v.LicenseModel)
d.Set("maintenance_window", v.PreferredMaintenanceWindow)
d.Set("multi_az", v.MultiAZ)
if v.DBSubnetGroup != nil {
d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName)
}
if len(v.DBParameterGroups) > 0 {
d.Set("parameter_group_name", v.DBParameterGroups[0].DBParameterGroupName)
}
if v.Endpoint != nil {
d.Set("port", v.Endpoint.Port)
d.Set("address", v.Endpoint.Address)
if v.Endpoint.Address != nil && v.Endpoint.Port != nil {
d.Set("endpoint",
fmt.Sprintf("%s:%d", *v.Endpoint.Address, *v.Endpoint.Port))
}
}
d.Set("status", v.DBInstanceStatus)
d.Set("storage_encrypted", v.StorageEncrypted)
// list tags for resource
// set tags
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSARN(d, meta)
if err != nil {
name := "<empty>"
if v.DBName != nil && *v.DBName != "" {
name = *v.DBName
}
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
} else {
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
// Create an empty schema.Set to hold all vpc security group ids
ids := &schema.Set{
F: schema.HashString,
}
for _, v := range v.VPCSecurityGroups {
ids.Add(*v.VPCSecurityGroupID)
}
d.Set("vpc_security_group_ids", ids)
// Create an empty schema.Set to hold all security group names
sgn := &schema.Set{
F: schema.HashString,
}
for _, v := range v.DBSecurityGroups {
sgn.Add(*v.DBSecurityGroupName)
}
d.Set("security_group_names", sgn)
// replica things
var replicas []string
for _, v := range v.ReadReplicaDBInstanceIdentifiers {
replicas = append(replicas, *v)
}
if err := d.Set("replicas", replicas); err != nil {
return fmt.Errorf("[DEBUG] Error setting replicas attribute: %#v, error: %#v", replicas, err)
}
if v.ReadReplicaSourceDBInstanceIdentifier != nil {
log.Printf("\n\n------\nread replica instance identifier: %#v", *v.ReadReplicaSourceDBInstanceIdentifier)
} else {
log.Printf("\n\n------\nno replica identifier")
}
d.Set("replicate_source_db", v.ReadReplicaSourceDBInstanceIdentifier)
return nil
}
func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] DB Instance destroy: %v", d.Id())
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" {
opts.SkipFinalSnapshot = aws.Boolean(true)
} else {
opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
}
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
if _, err := conn.DeleteDBInstance(&opts); err != nil {
return err
}
log.Println(
"[INFO] Waiting for DB Instance to be destroyed")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up",
"modifying", "deleting", "available"},
Target: "",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
return nil
}
func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
log.Printf("\n\n-------- ENTER UPDATE -------\n\n")
conn := meta.(*AWSClient).rdsconn
d.Partial(true)
req := &rds.ModifyDBInstanceInput{
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)),
DBInstanceIdentifier: aws.String(d.Id()),
}
d.SetPartial("apply_immediately")
requestUpdate := false
if d.HasChange("allocated_storage") {
d.SetPartial("allocated_storage")
req.AllocatedStorage = aws.Long(int64(d.Get("allocated_storage").(int)))
requestUpdate = true
}
if d.HasChange("backup_retention_period") {
d.SetPartial("backup_retention_period")
req.BackupRetentionPeriod = aws.Long(int64(d.Get("backup_retention_period").(int)))
requestUpdate = true
}
if d.HasChange("instance_class") {
d.SetPartial("instance_class")
req.DBInstanceClass = aws.String(d.Get("instance_class").(string))
requestUpdate = true
}
if d.HasChange("parameter_group_name") {
d.SetPartial("parameter_group_name")
req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string))
requestUpdate = true
}
if d.HasChange("engine_version") {
d.SetPartial("engine_version")
req.EngineVersion = aws.String(d.Get("engine_version").(string))
requestUpdate = true
}
if d.HasChange("iops") {
d.SetPartial("iops")
req.IOPS = aws.Long(int64(d.Get("iops").(int)))
requestUpdate = true
}
if d.HasChange("backup_window") {
d.SetPartial("backup_window")
req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string))
requestUpdate = true
}
if d.HasChange("maintenance_window") {
d.SetPartial("maintenance_window")
req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string))
requestUpdate = true
}
if d.HasChange("password") {
d.SetPartial("password")
req.MasterUserPassword = aws.String(d.Get("password").(string))
requestUpdate = true
}
if d.HasChange("multi_az") {
d.SetPartial("multi_az")
req.MultiAZ = aws.Boolean(d.Get("multi_az").(bool))
requestUpdate = true
}
if d.HasChange("storage_type") {
d.SetPartial("storage_type")
req.StorageType = aws.String(d.Get("storage_type").(string))
requestUpdate = true
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
req.VPCSecurityGroupIDs = s
}
requestUpdate = true
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
req.DBSecurityGroups = s
}
requestUpdate = true
}
log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate)
if requestUpdate {
log.Printf("[DEBUG] DB Instance Modification request: %#v", req)
_, err := conn.ModifyDBInstance(req)
if err != nil {
return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err)
}
}
// seperate request to promote a database
if d.HasChange("replicate_source_db") {
if d.Get("replicate_source_db").(string) == "" {
// promote
opts := rds.PromoteReadReplicaInput{
DBInstanceIdentifier: aws.String(d.Id()),
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}
_, err := conn.PromoteReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error promoting database: %#v", err)
}
d.Set("replicate_source_db", "")
} else {
return fmt.Errorf("cannot elect new source database for replication")
}
}
if arn, err := buildRDSARN(d, meta); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
d.Partial(false)
log.Printf("\n\n-------- EXIT UPDATE -------\n\n")
return resourceAwsDbInstanceRead(d, meta)
}
func resourceAwsDbInstanceRetrieve(
d *schema.ResourceData, meta interface{}) (*rds.DBInstance, error) {
conn := meta.(*AWSClient).rdsconn
opts := rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(d.Id()),
}
log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts)
resp, err := conn.DescribeDBInstances(&opts)
if err != nil {
dbinstanceerr, ok := err.(awserr.Error)
if ok && dbinstanceerr.Code() == "DBInstanceNotFound" {
return nil, nil
}
return nil, fmt.Errorf("Error retrieving DB Instances: %s", err)
}
if len(resp.DBInstances) != 1 ||
*resp.DBInstances[0].DBInstanceIdentifier != d.Id() {
if err != nil {
return nil, nil
}
}
return resp.DBInstances[0], nil
}
func resourceAwsDbInstanceStateRefreshFunc(
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
v, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
log.Printf("Error on retrieving DB Instance when waiting: %s", err)
return nil, "", err
}
if v == nil {
return nil, "", nil
}
return v, *v.DBInstanceStatus, nil
}
}
func buildRDSARN(d *schema.ResourceData, meta interface{}) (string, error) {
iamconn := meta.(*AWSClient).iamconn
region := meta.(*AWSClient).region
// An zero value GetUserInput{} defers to the currently logged in user
resp, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
return "", err
}
userARN := *resp.User.ARN
accountID := strings.Split(userARN, ":")[4]
arn := fmt.Sprintf("arn:aws:rds:%s:%s:db:%s", region, accountID, d.Id())
return arn, nil
}
remove default here
package aws
import (
"fmt"
"log"
"strings"
"time"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/aws/awserr"
"github.com/awslabs/aws-sdk-go/service/iam"
"github.com/awslabs/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsDbInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbInstanceCreate,
Read: resourceAwsDbInstanceRead,
Update: resourceAwsDbInstanceUpdate,
Delete: resourceAwsDbInstanceDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"username": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"password": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"engine": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"engine_version": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"storage_encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"allocated_storage": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"storage_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"backup_retention_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"backup_window": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"license_model": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"maintenance_window": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"multi_az": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"publicly_accessible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"vpc_security_group_ids": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"security_group_names": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"final_snapshot_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"parameter_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
// apply_immediately is used to determine when the update modifications
// take place.
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
"apply_immediately": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"replicate_source_db": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"replicas": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"tags": tagsSchema(),
},
}
}
func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
if v, ok := d.GetOk("replicate_source_db"); ok {
opts := rds.CreateDBInstanceReadReplicaInput{
SourceDBInstanceIdentifier: aws.String(v.(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
Tags: tags,
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
opts.AvailabilityZone = aws.String(attr.(string))
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
}
_, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}
} else {
opts := rds.CreateDBInstanceInput{
AllocatedStorage: aws.Long(int64(d.Get("allocated_storage").(int))),
DBName: aws.String(d.Get("name").(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
MasterUsername: aws.String(d.Get("username").(string)),
MasterUserPassword: aws.String(d.Get("password").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)),
StorageEncrypted: aws.Boolean(d.Get("storage_encrypted").(bool)),
Tags: tags,
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool))
}
if attr, ok := d.GetOk("maintenance_window"); ok {
opts.PreferredMaintenanceWindow = aws.String(attr.(string))
}
if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}
if attr, ok := d.GetOk("license_model"); ok {
opts.LicenseModel = aws.String(attr.(string))
}
if attr, ok := d.GetOk("parameter_group_name"); ok {
opts.DBParameterGroupName = aws.String(attr.(string))
}
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.VPCSecurityGroupIDs = s
}
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.DBSecurityGroups = s
}
if attr, ok := d.GetOk("storage_type"); ok {
opts.StorageType = aws.String(attr.(string))
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
opts.DBSubnetGroupName = aws.String(attr.(string))
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
opts.AvailabilityZone = aws.String(attr.(string))
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
}
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
var err error
_, err = conn.CreateDBInstance(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}
}
d.SetId(d.Get("identifier").(string))
log.Printf("[INFO] DB Instance ID: %s", d.Id())
log.Println(
"[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
// Wait, catching any errors
_, err := stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsDbInstanceRead(d, meta)
}
func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
v, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
return err
}
if v == nil {
d.SetId("")
return nil
}
d.Set("name", v.DBName)
d.Set("username", v.MasterUsername)
d.Set("engine", v.Engine)
d.Set("engine_version", v.EngineVersion)
d.Set("allocated_storage", v.AllocatedStorage)
d.Set("storage_type", v.StorageType)
d.Set("instance_class", v.DBInstanceClass)
d.Set("availability_zone", v.AvailabilityZone)
d.Set("backup_retention_period", v.BackupRetentionPeriod)
d.Set("backup_window", v.PreferredBackupWindow)
d.Set("license_model", v.LicenseModel)
d.Set("maintenance_window", v.PreferredMaintenanceWindow)
d.Set("multi_az", v.MultiAZ)
if v.DBSubnetGroup != nil {
d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName)
}
if len(v.DBParameterGroups) > 0 {
d.Set("parameter_group_name", v.DBParameterGroups[0].DBParameterGroupName)
}
if v.Endpoint != nil {
d.Set("port", v.Endpoint.Port)
d.Set("address", v.Endpoint.Address)
if v.Endpoint.Address != nil && v.Endpoint.Port != nil {
d.Set("endpoint",
fmt.Sprintf("%s:%d", *v.Endpoint.Address, *v.Endpoint.Port))
}
}
d.Set("status", v.DBInstanceStatus)
d.Set("storage_encrypted", v.StorageEncrypted)
// list tags for resource
// set tags
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSARN(d, meta)
if err != nil {
name := "<empty>"
if v.DBName != nil && *v.DBName != "" {
name = *v.DBName
}
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
} else {
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
// Create an empty schema.Set to hold all vpc security group ids
ids := &schema.Set{
F: schema.HashString,
}
for _, v := range v.VPCSecurityGroups {
ids.Add(*v.VPCSecurityGroupID)
}
d.Set("vpc_security_group_ids", ids)
// Create an empty schema.Set to hold all security group names
sgn := &schema.Set{
F: schema.HashString,
}
for _, v := range v.DBSecurityGroups {
sgn.Add(*v.DBSecurityGroupName)
}
d.Set("security_group_names", sgn)
// replica things
var replicas []string
for _, v := range v.ReadReplicaDBInstanceIdentifiers {
replicas = append(replicas, *v)
}
if err := d.Set("replicas", replicas); err != nil {
return fmt.Errorf("[DEBUG] Error setting replicas attribute: %#v, error: %#v", replicas, err)
}
if v.ReadReplicaSourceDBInstanceIdentifier != nil {
log.Printf("\n\n------\nread replica instance identifier: %#v", *v.ReadReplicaSourceDBInstanceIdentifier)
} else {
log.Printf("\n\n------\nno replica identifier")
}
d.Set("replicate_source_db", v.ReadReplicaSourceDBInstanceIdentifier)
return nil
}
func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] DB Instance destroy: %v", d.Id())
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" {
opts.SkipFinalSnapshot = aws.Boolean(true)
} else {
opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
}
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
if _, err := conn.DeleteDBInstance(&opts); err != nil {
return err
}
log.Println(
"[INFO] Waiting for DB Instance to be destroyed")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up",
"modifying", "deleting", "available"},
Target: "",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
return nil
}
func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
log.Printf("\n\n-------- ENTER UPDATE -------\n\n")
conn := meta.(*AWSClient).rdsconn
d.Partial(true)
req := &rds.ModifyDBInstanceInput{
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)),
DBInstanceIdentifier: aws.String(d.Id()),
}
d.SetPartial("apply_immediately")
requestUpdate := false
if d.HasChange("allocated_storage") {
d.SetPartial("allocated_storage")
req.AllocatedStorage = aws.Long(int64(d.Get("allocated_storage").(int)))
requestUpdate = true
}
if d.HasChange("backup_retention_period") {
d.SetPartial("backup_retention_period")
req.BackupRetentionPeriod = aws.Long(int64(d.Get("backup_retention_period").(int)))
requestUpdate = true
}
if d.HasChange("instance_class") {
d.SetPartial("instance_class")
req.DBInstanceClass = aws.String(d.Get("instance_class").(string))
requestUpdate = true
}
if d.HasChange("parameter_group_name") {
d.SetPartial("parameter_group_name")
req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string))
requestUpdate = true
}
if d.HasChange("engine_version") {
d.SetPartial("engine_version")
req.EngineVersion = aws.String(d.Get("engine_version").(string))
requestUpdate = true
}
if d.HasChange("iops") {
d.SetPartial("iops")
req.IOPS = aws.Long(int64(d.Get("iops").(int)))
requestUpdate = true
}
if d.HasChange("backup_window") {
d.SetPartial("backup_window")
req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string))
requestUpdate = true
}
if d.HasChange("maintenance_window") {
d.SetPartial("maintenance_window")
req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string))
requestUpdate = true
}
if d.HasChange("password") {
d.SetPartial("password")
req.MasterUserPassword = aws.String(d.Get("password").(string))
requestUpdate = true
}
if d.HasChange("multi_az") {
d.SetPartial("multi_az")
req.MultiAZ = aws.Boolean(d.Get("multi_az").(bool))
requestUpdate = true
}
if d.HasChange("storage_type") {
d.SetPartial("storage_type")
req.StorageType = aws.String(d.Get("storage_type").(string))
requestUpdate = true
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
req.VPCSecurityGroupIDs = s
}
requestUpdate = true
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
req.DBSecurityGroups = s
}
requestUpdate = true
}
log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate)
if requestUpdate {
log.Printf("[DEBUG] DB Instance Modification request: %#v", req)
_, err := conn.ModifyDBInstance(req)
if err != nil {
return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err)
}
}
// seperate request to promote a database
if d.HasChange("replicate_source_db") {
if d.Get("replicate_source_db").(string) == "" {
// promote
opts := rds.PromoteReadReplicaInput{
DBInstanceIdentifier: aws.String(d.Id()),
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}
_, err := conn.PromoteReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error promoting database: %#v", err)
}
d.Set("replicate_source_db", "")
} else {
return fmt.Errorf("cannot elect new source database for replication")
}
}
if arn, err := buildRDSARN(d, meta); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
d.Partial(false)
log.Printf("\n\n-------- EXIT UPDATE -------\n\n")
return resourceAwsDbInstanceRead(d, meta)
}
func resourceAwsDbInstanceRetrieve(
d *schema.ResourceData, meta interface{}) (*rds.DBInstance, error) {
conn := meta.(*AWSClient).rdsconn
opts := rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(d.Id()),
}
log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts)
resp, err := conn.DescribeDBInstances(&opts)
if err != nil {
dbinstanceerr, ok := err.(awserr.Error)
if ok && dbinstanceerr.Code() == "DBInstanceNotFound" {
return nil, nil
}
return nil, fmt.Errorf("Error retrieving DB Instances: %s", err)
}
if len(resp.DBInstances) != 1 ||
*resp.DBInstances[0].DBInstanceIdentifier != d.Id() {
if err != nil {
return nil, nil
}
}
return resp.DBInstances[0], nil
}
func resourceAwsDbInstanceStateRefreshFunc(
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
v, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
log.Printf("Error on retrieving DB Instance when waiting: %s", err)
return nil, "", err
}
if v == nil {
return nil, "", nil
}
return v, *v.DBInstanceStatus, nil
}
}
func buildRDSARN(d *schema.ResourceData, meta interface{}) (string, error) {
iamconn := meta.(*AWSClient).iamconn
region := meta.(*AWSClient).region
// An zero value GetUserInput{} defers to the currently logged in user
resp, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
return "", err
}
userARN := *resp.User.ARN
accountID := strings.Split(userARN, ":")[4]
arn := fmt.Sprintf("arn:aws:rds:%s:%s:db:%s", region, accountID, d.Id())
return arn, nil
}
|
package main
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"golang.org/x/net/context"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"strings"
)
func resourceVirtualMachine() *schema.Resource {
return &schema.Resource{
Create: resourceVirtualMachineCreate,
Read: resourceVirtualMachineRead,
Delete: resourceVirtualMachineDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"resource_pool": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"cpus": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"memory": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"domain": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"ip_address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"subnet_mask": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"gateway": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"configuration_parameters": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"power_on": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
dc_name := d.Get("datacenter").(string)
if dc_name == "" {
finder := find.NewFinder(client, false)
dc, err := finder.DefaultDatacenter(context.TODO())
if err != nil {
return fmt.Errorf("Error reading default datacenter: %s", err)
}
var dc_mo mo.Datacenter
err = dc.Properties(context.TODO(), dc.Reference(), []string{"name"}, &dc_mo)
if err != nil {
return fmt.Errorf("Error reading datacenter name: %s", err)
}
dc_name = dc_mo.Name
d.Set("datacenter", dc_name)
}
image_name := d.Get("image").(string)
image_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%s/vm/%s", dc_name, image_name))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
if image_ref == nil {
return fmt.Errorf("Cannot find image %s", image_name)
}
image := image_ref.(*object.VirtualMachine)
var image_mo mo.VirtualMachine
err = image.Properties(context.TODO(), image.Reference(), []string{"parent", "config.template", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &image_mo)
if err != nil {
return fmt.Errorf("Error reading base VM properties: %s", err)
}
var folder_ref object.Reference
var folder *object.Folder
if d.Get("folder").(string) != "" {
folder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/vm/%v", dc_name, d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
if folder_ref == nil {
return fmt.Errorf("Cannot find folder %s", d.Get("folder").(string))
}
folder = folder_ref.(*object.Folder)
} else {
folder = object.NewFolder(client, *image_mo.Parent)
}
host_name := d.Get("host").(string)
if host_name == "" {
if image_mo.Config.Template == true {
return fmt.Errorf("Image is a template, 'host' is a required")
} else {
var pool_mo mo.ResourcePool
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), *image_mo.ResourcePool, []string{"owner"}, &pool_mo)
if err != nil {
return fmt.Errorf("Error reading resource pool of base VM: %s", err)
}
if strings.Contains(pool_mo.Owner.Value, "domain-s") {
var host_mo mo.ComputeResource
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &host_mo)
if err != nil {
return fmt.Errorf("Error reading host of base VM: %s", err)
}
host_name = host_mo.Name
} else if strings.Contains(pool_mo.Owner.Value, "domain-c") {
var cluster_mo mo.ClusterComputeResource
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &cluster_mo)
if err != nil {
return fmt.Errorf("Error reading cluster of base VM: %s", err)
}
host_name = cluster_mo.Name
} else {
return fmt.Errorf("Unknown compute resource format of base VM: %s", pool_mo.Owner.Value)
}
}
}
pool_name := d.Get("resource_pool").(string)
pool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/host/%v/Resources/%v", dc_name, host_name, pool_name))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
if pool_ref == nil {
return fmt.Errorf("Cannot find resource pool %s", pool_name)
}
var relocateSpec types.VirtualMachineRelocateSpec
var pool_mor types.ManagedObjectReference
pool_mor = pool_ref.Reference()
relocateSpec.Pool = &pool_mor
if d.Get("linked_clone").(bool) {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
var confSpec types.VirtualMachineConfigSpec
if d.Get("cpus") != nil {
confSpec.NumCPUs = d.Get("cpus").(int)
}
if d.Get("memory") != nil {
confSpec.MemoryMB = int64(d.Get("memory").(int))
}
params := d.Get("configuration_parameters").(map[string]interface{})
var ov []types.BaseOptionValue
if len(params) > 0 {
for k, v := range params {
key := k
value := v
o := types.OptionValue{
Key: key,
Value: &value,
}
ov = append(ov, &o)
}
confSpec.ExtraConfig = ov
}
cloneSpec := types.VirtualMachineCloneSpec{
Location: relocateSpec,
Config: &confSpec,
PowerOn: d.Get("power_on").(bool),
}
if d.Get("linked_clone").(bool) {
if image_mo.Snapshot == nil {
return fmt.Errorf("`linked_clone=true`, but image VM has no snapshots")
}
cloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot
}
domain := d.Get("domain").(string)
ip_address := d.Get("ip_address").(string)
if domain != "" {
if image_mo.Guest.ToolsVersionStatus2 == "guestToolsNotInstalled" {
return fmt.Errorf("VMware tools are not installed in base VM")
}
if !strings.Contains(image_mo.Config.GuestFullName, "Linux") && !strings.Contains(image_mo.Config.GuestFullName, "CentOS") {
return fmt.Errorf("Guest customization is supported only for Linux. Base image OS is: %s", image_mo.Config.GuestFullName)
}
customizationSpec := types.CustomizationSpec{
GlobalIPSettings: types.CustomizationGlobalIPSettings{},
Identity: &types.CustomizationLinuxPrep{
HostName: &types.CustomizationVirtualMachineName{},
Domain: domain,
},
NicSettingMap: []types.CustomizationAdapterMapping {
{
Adapter: types.CustomizationIPSettings{},
},
},
}
if ip_address != "" {
mask := d.Get("subnet_mask").(string)
if mask == "" {
return fmt.Errorf("'subnet_mask' must be set, if static 'ip_address' is specified")
}
customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{
IpAddress: ip_address,
}
customizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get("subnet_mask").(string)
gateway := d.Get("gateway").(string)
if gateway != "" {
customizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}
}
} else {
customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}
}
cloneSpec.Customization = &customizationSpec
} else if ip_address != "" {
return fmt.Errorf("'domain' must be set, if static 'ip_address' is specified")
}
task, err := image.Clone(context.TODO(), folder, d.Get("name").(string), cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
info, err := task.WaitForResult(context.TODO(), nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
vm_mor := info.Result.(types.ManagedObjectReference)
d.SetId(vm_mor.Value)
vm := object.NewVirtualMachine(client, vm_mor)
// workaround for https://github.com/vmware/govmomi/issues/218
if ip_address=="" && d.Get("power_on").(bool) {
ip, err := vm.WaitForIP(context.TODO())
if err != nil {
log.Printf("[ERROR] Cannot read ip address: %s", err)
} else {
d.Set("ip_address", ip)
}
}
return nil
}
func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id() }
vm := object.NewVirtualMachine(client, vm_mor)
var vm_mo mo.VirtualMachine
err := vm.Properties(context.TODO(), vm.Reference(), []string{"summary"}, &vm_mo)
if err != nil {
log.Printf("[INFO] Cannot read VM properties: %s", err)
d.SetId("")
return nil
}
d.Set("name", vm_mo.Summary.Config.Name)
d.Set("cpus", vm_mo.Summary.Config.NumCpu)
d.Set("memory", vm_mo.Summary.Config.MemorySizeMB)
if vm_mo.Summary.Runtime.PowerState == "poweredOn" {
d.Set("power_on", true)
} else {
d.Set("power_on", false)
}
if d.Get("power_on").(bool) {
ip, err := vm.WaitForIP(context.TODO())
if err != nil {
log.Printf("[ERROR] Cannot read ip address: %s", err)
} else {
d.Set("ip_address", ip)
}
}
return nil
}
func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id() }
vm := object.NewVirtualMachine(client, vm_mor)
task, err := vm.PowerOff(context.TODO())
if err != nil {
return fmt.Errorf("Error powering vm off: %s", err)
}
task.WaitForResult(context.TODO(), nil)
task, err = vm.Destroy(context.TODO())
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
_, err = task.WaitForResult(context.TODO(), nil)
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
return nil
}
Expose machine IP address as a default hostname for provisioners (#22)
package main
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"golang.org/x/net/context"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"strings"
)
func resourceVirtualMachine() *schema.Resource {
return &schema.Resource{
Create: resourceVirtualMachineCreate,
Read: resourceVirtualMachineRead,
Delete: resourceVirtualMachineDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"resource_pool": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"cpus": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"memory": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"domain": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"ip_address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"subnet_mask": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"gateway": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"configuration_parameters": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"power_on": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
dc_name := d.Get("datacenter").(string)
if dc_name == "" {
finder := find.NewFinder(client, false)
dc, err := finder.DefaultDatacenter(context.TODO())
if err != nil {
return fmt.Errorf("Error reading default datacenter: %s", err)
}
var dc_mo mo.Datacenter
err = dc.Properties(context.TODO(), dc.Reference(), []string{"name"}, &dc_mo)
if err != nil {
return fmt.Errorf("Error reading datacenter name: %s", err)
}
dc_name = dc_mo.Name
d.Set("datacenter", dc_name)
}
image_name := d.Get("image").(string)
image_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%s/vm/%s", dc_name, image_name))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
if image_ref == nil {
return fmt.Errorf("Cannot find image %s", image_name)
}
image := image_ref.(*object.VirtualMachine)
var image_mo mo.VirtualMachine
err = image.Properties(context.TODO(), image.Reference(), []string{"parent", "config.template", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &image_mo)
if err != nil {
return fmt.Errorf("Error reading base VM properties: %s", err)
}
var folder_ref object.Reference
var folder *object.Folder
if d.Get("folder").(string) != "" {
folder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/vm/%v", dc_name, d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
if folder_ref == nil {
return fmt.Errorf("Cannot find folder %s", d.Get("folder").(string))
}
folder = folder_ref.(*object.Folder)
} else {
folder = object.NewFolder(client, *image_mo.Parent)
}
host_name := d.Get("host").(string)
if host_name == "" {
if image_mo.Config.Template == true {
return fmt.Errorf("Image is a template, 'host' is a required")
} else {
var pool_mo mo.ResourcePool
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), *image_mo.ResourcePool, []string{"owner"}, &pool_mo)
if err != nil {
return fmt.Errorf("Error reading resource pool of base VM: %s", err)
}
if strings.Contains(pool_mo.Owner.Value, "domain-s") {
var host_mo mo.ComputeResource
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &host_mo)
if err != nil {
return fmt.Errorf("Error reading host of base VM: %s", err)
}
host_name = host_mo.Name
} else if strings.Contains(pool_mo.Owner.Value, "domain-c") {
var cluster_mo mo.ClusterComputeResource
err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &cluster_mo)
if err != nil {
return fmt.Errorf("Error reading cluster of base VM: %s", err)
}
host_name = cluster_mo.Name
} else {
return fmt.Errorf("Unknown compute resource format of base VM: %s", pool_mo.Owner.Value)
}
}
}
pool_name := d.Get("resource_pool").(string)
pool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/host/%v/Resources/%v", dc_name, host_name, pool_name))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
if pool_ref == nil {
return fmt.Errorf("Cannot find resource pool %s", pool_name)
}
var relocateSpec types.VirtualMachineRelocateSpec
var pool_mor types.ManagedObjectReference
pool_mor = pool_ref.Reference()
relocateSpec.Pool = &pool_mor
if d.Get("linked_clone").(bool) {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
var confSpec types.VirtualMachineConfigSpec
if d.Get("cpus") != nil {
confSpec.NumCPUs = d.Get("cpus").(int)
}
if d.Get("memory") != nil {
confSpec.MemoryMB = int64(d.Get("memory").(int))
}
params := d.Get("configuration_parameters").(map[string]interface{})
var ov []types.BaseOptionValue
if len(params) > 0 {
for k, v := range params {
key := k
value := v
o := types.OptionValue{
Key: key,
Value: &value,
}
ov = append(ov, &o)
}
confSpec.ExtraConfig = ov
}
cloneSpec := types.VirtualMachineCloneSpec{
Location: relocateSpec,
Config: &confSpec,
PowerOn: d.Get("power_on").(bool),
}
if d.Get("linked_clone").(bool) {
if image_mo.Snapshot == nil {
return fmt.Errorf("`linked_clone=true`, but image VM has no snapshots")
}
cloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot
}
domain := d.Get("domain").(string)
ip_address := d.Get("ip_address").(string)
if domain != "" {
if image_mo.Guest.ToolsVersionStatus2 == "guestToolsNotInstalled" {
return fmt.Errorf("VMware tools are not installed in base VM")
}
if !strings.Contains(image_mo.Config.GuestFullName, "Linux") && !strings.Contains(image_mo.Config.GuestFullName, "CentOS") {
return fmt.Errorf("Guest customization is supported only for Linux. Base image OS is: %s", image_mo.Config.GuestFullName)
}
customizationSpec := types.CustomizationSpec{
GlobalIPSettings: types.CustomizationGlobalIPSettings{},
Identity: &types.CustomizationLinuxPrep{
HostName: &types.CustomizationVirtualMachineName{},
Domain: domain,
},
NicSettingMap: []types.CustomizationAdapterMapping {
{
Adapter: types.CustomizationIPSettings{},
},
},
}
if ip_address != "" {
mask := d.Get("subnet_mask").(string)
if mask == "" {
return fmt.Errorf("'subnet_mask' must be set, if static 'ip_address' is specified")
}
customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{
IpAddress: ip_address,
}
customizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get("subnet_mask").(string)
gateway := d.Get("gateway").(string)
if gateway != "" {
customizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}
}
} else {
customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}
}
cloneSpec.Customization = &customizationSpec
} else if ip_address != "" {
return fmt.Errorf("'domain' must be set, if static 'ip_address' is specified")
}
task, err := image.Clone(context.TODO(), folder, d.Get("name").(string), cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
info, err := task.WaitForResult(context.TODO(), nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
vm_mor := info.Result.(types.ManagedObjectReference)
d.SetId(vm_mor.Value)
vm := object.NewVirtualMachine(client, vm_mor)
// workaround for https://github.com/vmware/govmomi/issues/218
if ip_address=="" && d.Get("power_on").(bool) {
ip, err := vm.WaitForIP(context.TODO())
if err != nil {
log.Printf("[ERROR] Cannot read ip address: %s", err)
} else {
d.Set("ip_address", ip)
d.SetConnInfo(map[string]string{
"type": "ssh",
"host": ip,
})
}
}
return nil
}
func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id() }
vm := object.NewVirtualMachine(client, vm_mor)
var vm_mo mo.VirtualMachine
err := vm.Properties(context.TODO(), vm.Reference(), []string{"summary"}, &vm_mo)
if err != nil {
log.Printf("[INFO] Cannot read VM properties: %s", err)
d.SetId("")
return nil
}
d.Set("name", vm_mo.Summary.Config.Name)
d.Set("cpus", vm_mo.Summary.Config.NumCpu)
d.Set("memory", vm_mo.Summary.Config.MemorySizeMB)
if vm_mo.Summary.Runtime.PowerState == "poweredOn" {
d.Set("power_on", true)
} else {
d.Set("power_on", false)
}
if d.Get("power_on").(bool) {
ip, err := vm.WaitForIP(context.TODO())
if err != nil {
log.Printf("[ERROR] Cannot read ip address: %s", err)
} else {
d.Set("ip_address", ip)
}
}
return nil
}
func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*vim25.Client)
vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id() }
vm := object.NewVirtualMachine(client, vm_mor)
task, err := vm.PowerOff(context.TODO())
if err != nil {
return fmt.Errorf("Error powering vm off: %s", err)
}
task.WaitForResult(context.TODO(), nil)
task, err = vm.Destroy(context.TODO())
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
_, err = task.WaitForResult(context.TODO(), nil)
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
return nil
}
|
package memsearch
// #include "procMems_windows.h"
// #cgo CFLAGS: -std=c99
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func memoryGrep(pid uint, buf []byte) (bool, error) {
minfo := C.GetMemoryInformation(C.DWORD(pid))
defer C.MemoryInformation_Free(minfo)
if minfo.error != 0 {
return false, fmt.Errorf("GetMemoryInformation failed with error %d", minfo.error)
}
cinfo := *(*[]C.MEMORY_BASIC_INFORMATION)(unsafe.Pointer(
&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(minfo.info)),
Len: int(minfo.length),
Cap: int(minfo.length)}))
for _, v := range cinfo {
res := C.FindInRange(minfo.hndl, v, C.CString(string(buf)), C.int(len(buf)))
if int(res) != 0 {
return true, nil
}
}
return false, nil
}
Run memsearch in goroutines
package memsearch
// #include "procMems_windows.h"
// #cgo CFLAGS: -std=c99
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func MemoryGrep(pid uint, buf []byte) (bool, error) {
minfo := C.GetMemoryInformation(C.DWORD(pid))
defer C.MemoryInformation_Free(minfo)
if minfo.error != 0 {
return false, fmt.Errorf("GetMemoryInformation failed with error %d", minfo.error)
}
cinfo := *(*[]C.MEMORY_BASIC_INFORMATION)(unsafe.Pointer(
&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(minfo.info)),
Len: int(minfo.length),
Cap: int(minfo.length)}))
cbuf := C.CString(string(buf))
clen := C.int(len(buf))
results := make(chan bool)
for _, v := range cinfo {
go func(v C.MEMORY_BASIC_INFORMATION) {
res := C.FindInRange(minfo.hndl, v, cbuf,clen)
results <- int(res) != 0
}(v)
}
done := 0
for {
select {
case v := <- results:
done += 1
if v {
return true, nil
}
if done == int(minfo.length) {
return false, nil
}
}
}
return false, nil
}
|
package main
const (
GOMIG_MAJ_VERSION = 0
GOMIG_MIN_VERSION = 4
GOMIG_MIC_VERSION = 2
)
version: 0.4.3
package main
const (
GOMIG_MAJ_VERSION = 0
GOMIG_MIN_VERSION = 4
GOMIG_MIC_VERSION = 3
)
|
package main
const VERSION = "0.5.1"
:+1: Bump up the version
package main
const VERSION = "0.5.2-alpha1"
|
// Evaluate opcodes
package vm
import (
"errors"
"fmt"
"github.com/ncw/gpython/py"
)
// Stack operations
func (vm *Vm) STACK_LEVEL() int { return len(vm.stack) }
func (vm *Vm) EMPTY() bool { return len(vm.stack) == 0 }
func (vm *Vm) TOP() py.Object { return vm.stack[len(vm.stack)-1] }
func (vm *Vm) SECOND() py.Object { return vm.stack[len(vm.stack)-2] }
func (vm *Vm) THIRD() py.Object { return vm.stack[len(vm.stack)-3] }
func (vm *Vm) FOURTH() py.Object { return vm.stack[len(vm.stack)-4] }
func (vm *Vm) PEEK(n int) py.Object { return vm.stack[len(vm.stack)-n] }
func (vm *Vm) SET_TOP(v py.Object) { vm.stack[len(vm.stack)-1] = v }
func (vm *Vm) SET_SECOND(v py.Object) { vm.stack[len(vm.stack)-2] = v }
func (vm *Vm) SET_THIRD(v py.Object) { vm.stack[len(vm.stack)-3] = v }
func (vm *Vm) SET_FOURTH(v py.Object) { vm.stack[len(vm.stack)-4] = v }
func (vm *Vm) SET_VALUE(n int, v py.Object) { vm.stack[len(vm.stack)-(n)] = (v) }
func (vm *Vm) DROPN(n int) { vm.stack = vm.stack[:len(vm.stack)-n] }
// Pop from top of vm stack
func (vm *Vm) POP() py.Object {
// FIXME what if empty?
out := vm.stack[len(vm.stack)-1]
vm.stack = vm.stack[:len(vm.stack)-1]
return out
}
// Push to top of vm stack
func (vm *Vm) PUSH(obj py.Object) {
vm.stack = append(vm.stack, obj)
}
// Illegal instruction
func do_ILLEGAL(vm *Vm, arg int32) {
panic("Illegal opcode")
}
// Do nothing code. Used as a placeholder by the bytecode optimizer.
func do_NOP(vm *Vm, arg int32) {
}
// Removes the top-of-stack (TOS) item.
func do_POP_TOP(vm *Vm, arg int32) {
vm.DROPN(1)
}
// Swaps the two top-most stack items.
func do_ROT_TWO(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
vm.SET_TOP(second)
vm.SET_SECOND(top)
}
// Lifts second and third stack item one position up, moves top down
// to position three.
func do_ROT_THREE(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
third := vm.THIRD()
vm.SET_TOP(second)
vm.SET_SECOND(third)
vm.SET_THIRD(top)
}
// Duplicates the reference on top of the stack.
func do_DUP_TOP(vm *Vm, arg int32) {
vm.PUSH(vm.TOP())
}
// Duplicates the top two reference on top of the stack.
func do_DUP_TOP_TWO(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
vm.PUSH(second)
vm.PUSH(top)
}
// Unary Operations take the top of the stack, apply the operation,
// and push the result back on the stack.
// Implements TOS = +TOS.
func do_UNARY_POSITIVE(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_POSITIVE", arg)
}
// Implements TOS = -TOS.
func do_UNARY_NEGATIVE(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_NEGATIVE", arg)
}
// Implements TOS = not TOS.
func do_UNARY_NOT(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_NOT", arg)
}
// Implements TOS = ~TOS.
func do_UNARY_INVERT(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_INVERT", arg)
}
// Implements TOS = iter(TOS).
func do_GET_ITER(vm *Vm, arg int32) {
vm.NotImplemented("GET_ITER", arg)
}
// Binary operations remove the top of the stack (TOS) and the second
// top-most stack item (TOS1) from the stack. They perform the
// operation, and put the result back on the stack.
// Implements TOS = TOS1 ** TOS.
func do_BINARY_POWER(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_POWER", arg)
}
// Implements TOS = TOS1 * TOS.
func do_BINARY_MULTIPLY(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_MULTIPLY", arg)
}
// Implements TOS = TOS1 // TOS.
func do_BINARY_FLOOR_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_FLOOR_DIVIDE", arg)
}
// Implements TOS = TOS1 / TOS when from __future__ import division is
// in effect.
func do_BINARY_TRUE_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_TRUE_DIVIDE", arg)
}
// Implements TOS = TOS1 % TOS.
func do_BINARY_MODULO(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_MODULO", arg)
}
// Implements TOS = TOS1 + TOS.
func do_BINARY_ADD(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_ADD", arg)
}
// Implements TOS = TOS1 - TOS.
func do_BINARY_SUBTRACT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_SUBTRACT", arg)
}
// Implements TOS = TOS1[TOS].
func do_BINARY_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_SUBSCR", arg)
}
// Implements TOS = TOS1 << TOS.
func do_BINARY_LSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_LSHIFT", arg)
}
// Implements TOS = TOS1 >> TOS.
func do_BINARY_RSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_RSHIFT", arg)
}
// Implements TOS = TOS1 & TOS.
func do_BINARY_AND(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_AND", arg)
}
// Implements TOS = TOS1 ^ TOS.
func do_BINARY_XOR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_XOR", arg)
}
// Implements TOS = TOS1 | TOS.
func do_BINARY_OR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_OR", arg)
}
// In-place operations are like binary operations, in that they remove
// TOS and TOS1, and push the result back on the stack, but the
// operation is done in-place when TOS1 supports it, and the resulting
// TOS may be (but does not have to be) the original TOS1.
// Implements in-place TOS = TOS1 ** TOS.
func do_INPLACE_POWER(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_POWER", arg)
}
// Implements in-place TOS = TOS1 * TOS.
func do_INPLACE_MULTIPLY(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_MULTIPLY", arg)
}
// Implements in-place TOS = TOS1 // TOS.
func do_INPLACE_FLOOR_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_FLOOR_DIVIDE", arg)
}
// Implements in-place TOS = TOS1 / TOS when from __future__ import
// division is in effect.
func do_INPLACE_TRUE_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_TRUE_DIVIDE", arg)
}
// Implements in-place TOS = TOS1 % TOS.
func do_INPLACE_MODULO(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_MODULO", arg)
}
// Implements in-place TOS = TOS1 + TOS.
func do_INPLACE_ADD(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_ADD", arg)
}
// Implements in-place TOS = TOS1 - TOS.
func do_INPLACE_SUBTRACT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_SUBTRACT", arg)
}
// Implements in-place TOS = TOS1 << TOS.
func do_INPLACE_LSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_LSHIFT", arg)
}
// Implements in-place TOS = TOS1 >> TOS.
func do_INPLACE_RSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_RSHIFT", arg)
}
// Implements in-place TOS = TOS1 & TOS.
func do_INPLACE_AND(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_AND", arg)
}
// Implements in-place TOS = TOS1 ^ TOS.
func do_INPLACE_XOR(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_XOR", arg)
}
// Implements in-place TOS = TOS1 | TOS.
func do_INPLACE_OR(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_OR", arg)
}
// Implements TOS1[TOS] = TOS2.
func do_STORE_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("STORE_SUBSCR", arg)
}
// Implements del TOS1[TOS].
func do_DELETE_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("DELETE_SUBSCR", arg)
}
// Miscellaneous opcodes.
// Implements the expression statement for the interactive mode. TOS
// is removed from the stack and printed. In non-interactive mode, an
// expression statement is terminated with POP_STACK.
func do_PRINT_EXPR(vm *Vm, arg int32) {
vm.NotImplemented("PRINT_EXPR", arg)
}
// Terminates a loop due to a break statement.
func do_BREAK_LOOP(vm *Vm, arg int32) {
vm.NotImplemented("BREAK_LOOP", arg)
}
// Continues a loop due to a continue statement. target is the address
// to jump to (which should be a FOR_ITER instruction).
func do_CONTINUE_LOOP(vm *Vm, target int32) {
vm.NotImplemented("CONTINUE_LOOP", target)
}
// Implements assignment with a starred target: Unpacks an iterable in
// TOS into individual values, where the total number of values can be
// smaller than the number of items in the iterable: one the new
// values will be a list of all leftover items.
//
// The low byte of counts is the number of values before the list
// value, the high byte of counts the number of values after it. The
// resulting values are put onto the stack right-to-left.
func do_UNPACK_EX(vm *Vm, counts int32) {
vm.NotImplemented("UNPACK_EX", counts)
}
// Calls set.add(TOS1[-i], TOS). Used to implement set comprehensions.
func do_SET_ADD(vm *Vm, i int32) {
vm.NotImplemented("SET_ADD", i)
}
// Calls list.append(TOS[-i], TOS). Used to implement list
// comprehensions. While the appended value is popped off, the list
// object remains on the stack so that it is available for further
// iterations of the loop.
func do_LIST_APPEND(vm *Vm, i int32) {
vm.NotImplemented("LIST_APPEND", i)
}
// Calls dict.setitem(TOS1[-i], TOS, TOS1). Used to implement dict comprehensions.
func do_MAP_ADD(vm *Vm, i int32) {
vm.NotImplemented("MAP_ADD", i)
}
// Returns with TOS to the caller of the function.
func do_RETURN_VALUE(vm *Vm, arg int32) {
vm.exit = true
}
// Pops TOS and delegates to it as a subiterator from a generator.
func do_YIELD_FROM(vm *Vm, arg int32) {
vm.NotImplemented("YIELD_FROM", arg)
}
// Pops TOS and yields it from a generator.
func do_YIELD_VALUE(vm *Vm, arg int32) {
vm.NotImplemented("YIELD_VALUE", arg)
}
// Loads all symbols not starting with '_' directly from the module
// TOS to the local namespace. The module is popped after loading all
// names. This opcode implements from module import *.
func do_IMPORT_STAR(vm *Vm, arg int32) {
vm.NotImplemented("IMPORT_STAR", arg)
}
// Removes one block from the block stack. Per frame, there is a stack
// of blocks, denoting nested loops, try statements, and such.
func do_POP_BLOCK(vm *Vm, arg int32) {
vm.NotImplemented("POP_BLOCK", arg)
}
// Removes one block from the block stack. The popped block must be an
// exception handler block, as implicitly created when entering an
// except handler. In addition to popping extraneous values from the
// frame stack, the last three popped values are used to restore the
// exception state.
func do_POP_EXCEPT(vm *Vm, arg int32) {
vm.NotImplemented("POP_EXCEPT", arg)
}
// Terminates a finally clause. The interpreter recalls whether the
// exception has to be re-raised, or whether the function returns, and
// continues with the outer-next block.
func do_END_FINALLY(vm *Vm, arg int32) {
vm.NotImplemented("END_FINALLY", arg)
}
// Creates a new class object. TOS is the methods dictionary, TOS1 the
// tuple of the names of the base classes, and TOS2 the class name.
func do_LOAD_BUILD_CLASS(vm *Vm, arg int32) {
vm.NotImplemented("LOAD_BUILD_CLASS", arg)
}
// This opcode performs several operations before a with block
// starts. First, it loads __exit__( ) from the context manager and
// pushes it onto the stack for later use by WITH_CLEANUP. Then,
// __enter__( ) is called, and a finally block pointing to delta is
// pushed. Finally, the result of calling the enter method is pushed
// onto the stack. The next opcode will either ignore it (POP_TOP), or
// store it in (a) variable(s) (STORE_FAST, STORE_NAME, or
// UNPACK_SEQUENCE).
func do_SETUP_WITH(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_WITH", delta)
}
// Cleans up the stack when a with statement block exits. On top of
// the stack are 1–3 values indicating how/why the finally clause was
// entered:
//
// TOP = None
// (TOP, SECOND) = (WHY_{RETURN,CONTINUE}), retval
// TOP = WHY_*; no retval below it
// (TOP, SECOND, THIRD) = exc_info( )
// Under them is EXIT, the context manager’s __exit__( ) bound method.
//
// In the last case, EXIT(TOP, SECOND, THIRD) is called, otherwise
// EXIT(None, None, None).
//
// EXIT is removed from the stack, leaving the values above it in the
// same order. In addition, if the stack represents an exception, and
// the function call returns a ‘true’ value, this information is
// “zapped”, to prevent END_FINALLY from re-raising the
// exception. (But non-local gotos should still be resumed.)
func do_WITH_CLEANUP(vm *Vm, arg int32) {
vm.NotImplemented("WITH_CLEANUP", arg)
}
// All of the following opcodes expect arguments. An argument is two bytes, with the more significant byte last.
// Implements name = TOS. namei is the index of name in the attribute
// co_names of the code object. The compiler tries to use STORE_FAST
// or STORE_GLOBAL if possible.
func do_STORE_NAME(vm *Vm, namei int32) {
vm.locals[string(vm.co.Names[namei].(py.String))] = vm.POP()
}
// Implements del name, where namei is the index into co_names
// attribute of the code object.
func do_DELETE_NAME(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_NAME", namei)
}
// Unpacks TOS into count individual values, which are put onto the
// stack right-to-left.
func do_UNPACK_SEQUENCE(vm *Vm, count int32) {
vm.NotImplemented("UNPACK_SEQUENCE", count)
}
// Implements TOS.name = TOS1, where namei is the index of name in
// co_names.
func do_STORE_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("STORE_ATTR", namei)
}
// Implements del TOS.name, using namei as index into co_names.
func do_DELETE_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_ATTR", namei)
}
// Works as STORE_NAME, but stores the name as a global.
func do_STORE_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("STORE_GLOBAL", namei)
}
// Works as DELETE_NAME, but deletes a global name.
func do_DELETE_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_GLOBAL", namei)
}
// Pushes co_consts[consti] onto the stack.
func do_LOAD_CONST(vm *Vm, consti int32) {
vm.PUSH(vm.co.Consts[consti])
// fmt.Printf("LOAD_CONST %v\n", vm.TOP())
}
// Pushes the value associated with co_names[namei] onto the stack.
func do_LOAD_NAME(vm *Vm, namei int32) {
vm.PUSH(vm.co.Names[namei])
}
// Creates a tuple consuming count items from the stack, and pushes
// the resulting tuple onto the stack.
func do_BUILD_TUPLE(vm *Vm, count int32) {
vm.NotImplemented("BUILD_TUPLE", count)
}
// Works as BUILD_TUPLE, but creates a set.
func do_BUILD_SET(vm *Vm, count int32) {
vm.NotImplemented("BUILD_SET", count)
}
// Works as BUILD_TUPLE, but creates a list.
func do_BUILD_LIST(vm *Vm, count int32) {
vm.NotImplemented("BUILD_LIST", count)
}
// Pushes a new dictionary object onto the stack. The dictionary is
// pre-sized to hold count entries.
func do_BUILD_MAP(vm *Vm, count int32) {
vm.NotImplemented("BUILD_MAP", count)
}
// Replaces TOS with getattr(TOS, co_names[namei]).
func do_LOAD_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("LOAD_ATTR", namei)
}
// Performs a Boolean operation. The operation name can be found in
// cmp_op[opname].
func do_COMPARE_OP(vm *Vm, opname int32) {
vm.NotImplemented("COMPARE_OP", opname)
}
// Imports the module co_names[namei]. TOS and TOS1 are popped and
// provide the fromlist and level arguments of __import__( ). The
// module object is pushed onto the stack. The current namespace is
// not affected: for a proper import statement, a subsequent
// STORE_FAST instruction modifies the namespace.
func do_IMPORT_NAME(vm *Vm, namei int32) {
vm.NotImplemented("IMPORT_NAME", namei)
}
// Loads the attribute co_names[namei] from the module found in
// TOS. The resulting object is pushed onto the stack, to be
// subsequently stored by a STORE_FAST instruction.
func do_IMPORT_FROM(vm *Vm, namei int32) {
vm.NotImplemented("IMPORT_FROM", namei)
}
// Increments bytecode counter by delta.
func do_JUMP_FORWARD(vm *Vm, delta int32) {
vm.NotImplemented("JUMP_FORWARD", delta)
}
// If TOS is true, sets the bytecode counter to target. TOS is popped.
func do_POP_JUMP_IF_TRUE(vm *Vm, target int32) {
vm.NotImplemented("POP_JUMP_IF_TRUE", target)
}
// If TOS is false, sets the bytecode counter to target. TOS is popped.
func do_POP_JUMP_IF_FALSE(vm *Vm, target int32) {
vm.NotImplemented("POP_JUMP_IF_FALSE", target)
}
// If TOS is true, sets the bytecode counter to target and leaves TOS
// on the stack. Otherwise (TOS is false), TOS is popped.
func do_JUMP_IF_TRUE_OR_POP(vm *Vm, target int32) {
vm.NotImplemented("JUMP_IF_TRUE_OR_POP", target)
}
// If TOS is false, sets the bytecode counter to target and leaves TOS
// on the stack. Otherwise (TOS is true), TOS is popped.
func do_JUMP_IF_FALSE_OR_POP(vm *Vm, target int32) {
vm.NotImplemented("JUMP_IF_FALSE_OR_POP", target)
}
// Set bytecode counter to target.
func do_JUMP_ABSOLUTE(vm *Vm, target int32) {
vm.NotImplemented("JUMP_ABSOLUTE", target)
}
// TOS is an iterator. Call its next( ) method. If this yields a new
// value, push it on the stack (leaving the iterator below it). If the
// iterator indicates it is exhausted TOS is popped, and the bytecode
// counter is incremented by delta.
func do_FOR_ITER(vm *Vm, delta int32) {
vm.NotImplemented("FOR_ITER", delta)
}
// Loads the global named co_names[namei] onto the stack.
func do_LOAD_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("LOAD_GLOBAL", namei)
}
// Pushes a block for a loop onto the block stack. The block spans
// from the current instruction with a size of delta bytes.
func do_SETUP_LOOP(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_LOOP", delta)
}
// Pushes a try block from a try-except clause onto the block
// stack. delta points to the first except block.
func do_SETUP_EXCEPT(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_EXCEPT", delta)
}
// Pushes a try block from a try-except clause onto the block
// stack. delta points to the finally block.
func do_SETUP_FINALLY(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_FINALLY", delta)
}
// Store a key and value pair in a dictionary. Pops the key and value
// while leaving the dictionary on the stack.
func do_STORE_MAP(vm *Vm, arg int32) {
vm.NotImplemented("STORE_MAP", arg)
}
// Pushes a reference to the local co_varnames[var_num] onto the stack.
func do_LOAD_FAST(vm *Vm, var_num int32) {
vm.NotImplemented("LOAD_FAST", var_num)
}
// Stores TOS into the local co_varnames[var_num].
func do_STORE_FAST(vm *Vm, var_num int32) {
vm.NotImplemented("STORE_FAST", var_num)
}
// Deletes local co_varnames[var_num].
func do_DELETE_FAST(vm *Vm, var_num int32) {
vm.NotImplemented("DELETE_FAST", var_num)
}
// Pushes a reference to the cell contained in slot i of the cell and
// free variable storage. The name of the variable is co_cellvars[i]
// if i is less than the length of co_cellvars. Otherwise it is
// co_freevars[i - len(co_cellvars)].
func do_LOAD_CLOSURE(vm *Vm, i int32) {
vm.NotImplemented("LOAD_CLOSURE", i)
}
// Loads the cell contained in slot i of the cell and free variable
// storage. Pushes a reference to the object the cell contains on the
// stack.
func do_LOAD_DEREF(vm *Vm, i int32) {
vm.NotImplemented("LOAD_DEREF", i)
}
// Much like LOAD_DEREF but first checks the locals dictionary before
// consulting the cell. This is used for loading free variables in
// class bodies.
func do_LOAD_CLASSDEREF(vm *Vm, i int32) {
vm.NotImplemented("LOAD_CLASSDEREF", i)
}
// Stores TOS into the cell contained in slot i of the cell and free
// variable storage.
func do_STORE_DEREF(vm *Vm, i int32) {
vm.NotImplemented("STORE_DEREF", i)
}
// Empties the cell contained in slot i of the cell and free variable
// storage. Used by the del statement.
func do_DELETE_DEREF(vm *Vm, i int32) {
vm.NotImplemented("DELETE_DEREF", i)
}
// Raises an exception. argc indicates the number of parameters to the
// raise statement, ranging from 0 to 3. The handler will find the
// traceback as TOS2, the parameter as TOS1, and the exception as TOS.
func do_RAISE_VARARGS(vm *Vm, argc int32) {
vm.NotImplemented("RAISE_VARARGS", argc)
}
// Calls a function. The low byte of argc indicates the number of
// positional parameters, the high byte the number of keyword
// parameters. On the stack, the opcode finds the keyword parameters
// first. For each keyword argument, the value is on top of the
// key. Below the keyword parameters, the positional parameters are on
// the stack, with the right-most parameter on top. Below the
// parameters, the function object to call is on the stack. Pops all
// function arguments, and the function itself off the stack, and
// pushes the return value.
func do_CALL_FUNCTION(vm *Vm, argc int32) {
nargs := int(argc & 0xFF)
nkwargs := int((argc >> 8) & 0xFF)
p, q := len(vm.stack)-2*nkwargs, len(vm.stack)
kwargs := vm.stack[p:q]
p, q = p-nargs, p
args := py.Tuple(vm.stack[p:q])
p, q = p-1, p
fn := vm.stack[p]
vm.stack[p] = vm.call(fn, args, kwargs)
// Drop the args off the stack
vm.stack = vm.stack[:q]
}
// Pushes a new function object on the stack. TOS is the code
// associated with the function. The function object is defined to
// have argc default parameters, which are found below TOS.
//
// FIXME these docs are slightly wrong.
func do_MAKE_FUNCTION(vm *Vm, argc int32) {
posdefaults := argc & 0xff
kwdefaults := (argc >> 8) & 0xff
num_annotations := (argc >> 16) & 0x7fff
qualname := vm.POP()
code := vm.POP()
function := py.NewFunction(code.(*py.Code), vm.globals, qualname.(py.String))
// FIXME share code with MAKE_CLOSURE
// if opcode == MAKE_CLOSURE {
// function.Closure = vm.POP();
// }
if num_annotations > 0 {
names := vm.POP().(py.Tuple) // names of args with annotations
anns := py.NewStringDict()
name_ix := int32(len(names))
if num_annotations != name_ix+1 {
panic("num_annotations wrong - corrupt bytecode?")
}
for name_ix > 0 {
name_ix--
name := names[name_ix]
value := vm.POP()
anns[string(name.(py.String))] = value
}
function.Annotations = anns
}
if kwdefaults > 0 {
defs := py.NewStringDict()
for kwdefaults--; kwdefaults >= 0; kwdefaults-- {
v := vm.POP() // default value
key := vm.POP() // kw only arg name
defs[string(key.(py.String))] = v
}
function.KwDefaults = defs
}
if posdefaults > 0 {
defs := make(py.Tuple, posdefaults)
for posdefaults--; posdefaults >= 0; posdefaults-- {
defs[posdefaults] = vm.POP()
}
function.Defaults = defs
}
vm.PUSH(function)
}
// Creates a new function object, sets its func_closure slot, and
// pushes it on the stack. TOS is the code associated with the
// function, TOS1 the tuple containing cells for the closure’s free
// variables. The function also has argc default parameters, which are
// found below the cells.
func do_MAKE_CLOSURE(vm *Vm, argc int32) {
vm.NotImplemented("MAKE_CLOSURE", argc)
// see MAKE_FUNCTION
}
// Pushes a slice object on the stack. argc must be 2 or 3. If it is
// 2, slice(TOS1, TOS) is pushed; if it is 3, slice(TOS2, TOS1, TOS)
// is pushed. See the slice( ) built-in function for more information.
func do_BUILD_SLICE(vm *Vm, argc int32) {
vm.NotImplemented("BUILD_SLICE", argc)
}
// Prefixes any opcode which has an argument too big to fit into the
// default two bytes. ext holds two additional bytes which, taken
// together with the subsequent opcode’s argument, comprise a
// four-byte argument, ext being the two most-significant bytes.
func do_EXTENDED_ARG(vm *Vm, ext int32) {
vm.ext = ext
vm.extended = true
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the variable argument list, followed
// by keyword and positional arguments.
func do_CALL_FUNCTION_VAR(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_VAR", argc)
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the keyword arguments dictionary,
// followed by explicit keyword and positional arguments.
func do_CALL_FUNCTION_KW(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_KW", argc)
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the keyword arguments dictionary,
// followed by the variable-arguments tuple, followed by explicit
// keyword and positional arguments.
func do_CALL_FUNCTION_VAR_KW(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_VAR_KW", argc)
}
// NotImplemented
func (vm *Vm) NotImplemented(name string, arg int32) {
fmt.Printf("%s %d NOT IMPLEMENTED\n", name, arg)
}
// Poke the vm.Run into py
func init() {
py.VmRun = Run
}
// Run the virtual machine on the code object in the module
//
// FIXME figure out how we are going to signal exceptions!
//
// Any parameters are expected to have been decoded into locals
func Run(globals, locals py.StringDict, co *py.Code) (err error) {
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
case string:
err = errors.New(x)
default:
err = errors.New(fmt.Sprintf("Unknown error '%s'", x))
}
}
}()
_vm := Vm{
stack: make([]py.Object, 0, 16),
globals: globals,
locals: locals,
co: co,
}
vm := &_vm
ip := 0
var opcode byte
var arg int32
code := co.Code
for !vm.exit {
opcode = code[ip]
ip++
if HAS_ARG(opcode) {
arg = int32(code[ip])
ip++
arg += int32(code[ip] << 8)
ip++
if vm.extended {
arg += vm.ext << 16
}
fmt.Printf("* %s(%d)\n", OpCodeToName[opcode], arg)
} else {
fmt.Printf("* %s\n", OpCodeToName[opcode])
}
vm.extended = false
jumpTable[opcode](vm, arg)
}
return nil
}
Impelement LOAD_FAST
// Evaluate opcodes
package vm
import (
"errors"
"fmt"
"github.com/ncw/gpython/py"
)
// Stack operations
func (vm *Vm) STACK_LEVEL() int { return len(vm.stack) }
func (vm *Vm) EMPTY() bool { return len(vm.stack) == 0 }
func (vm *Vm) TOP() py.Object { return vm.stack[len(vm.stack)-1] }
func (vm *Vm) SECOND() py.Object { return vm.stack[len(vm.stack)-2] }
func (vm *Vm) THIRD() py.Object { return vm.stack[len(vm.stack)-3] }
func (vm *Vm) FOURTH() py.Object { return vm.stack[len(vm.stack)-4] }
func (vm *Vm) PEEK(n int) py.Object { return vm.stack[len(vm.stack)-n] }
func (vm *Vm) SET_TOP(v py.Object) { vm.stack[len(vm.stack)-1] = v }
func (vm *Vm) SET_SECOND(v py.Object) { vm.stack[len(vm.stack)-2] = v }
func (vm *Vm) SET_THIRD(v py.Object) { vm.stack[len(vm.stack)-3] = v }
func (vm *Vm) SET_FOURTH(v py.Object) { vm.stack[len(vm.stack)-4] = v }
func (vm *Vm) SET_VALUE(n int, v py.Object) { vm.stack[len(vm.stack)-(n)] = (v) }
func (vm *Vm) DROPN(n int) { vm.stack = vm.stack[:len(vm.stack)-n] }
// Pop from top of vm stack
func (vm *Vm) POP() py.Object {
// FIXME what if empty?
out := vm.stack[len(vm.stack)-1]
vm.stack = vm.stack[:len(vm.stack)-1]
return out
}
// Push to top of vm stack
func (vm *Vm) PUSH(obj py.Object) {
vm.stack = append(vm.stack, obj)
}
// Illegal instruction
func do_ILLEGAL(vm *Vm, arg int32) {
panic("Illegal opcode")
}
// Do nothing code. Used as a placeholder by the bytecode optimizer.
func do_NOP(vm *Vm, arg int32) {
}
// Removes the top-of-stack (TOS) item.
func do_POP_TOP(vm *Vm, arg int32) {
vm.DROPN(1)
}
// Swaps the two top-most stack items.
func do_ROT_TWO(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
vm.SET_TOP(second)
vm.SET_SECOND(top)
}
// Lifts second and third stack item one position up, moves top down
// to position three.
func do_ROT_THREE(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
third := vm.THIRD()
vm.SET_TOP(second)
vm.SET_SECOND(third)
vm.SET_THIRD(top)
}
// Duplicates the reference on top of the stack.
func do_DUP_TOP(vm *Vm, arg int32) {
vm.PUSH(vm.TOP())
}
// Duplicates the top two reference on top of the stack.
func do_DUP_TOP_TWO(vm *Vm, arg int32) {
top := vm.TOP()
second := vm.SECOND()
vm.PUSH(second)
vm.PUSH(top)
}
// Unary Operations take the top of the stack, apply the operation,
// and push the result back on the stack.
// Implements TOS = +TOS.
func do_UNARY_POSITIVE(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_POSITIVE", arg)
}
// Implements TOS = -TOS.
func do_UNARY_NEGATIVE(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_NEGATIVE", arg)
}
// Implements TOS = not TOS.
func do_UNARY_NOT(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_NOT", arg)
}
// Implements TOS = ~TOS.
func do_UNARY_INVERT(vm *Vm, arg int32) {
vm.NotImplemented("UNARY_INVERT", arg)
}
// Implements TOS = iter(TOS).
func do_GET_ITER(vm *Vm, arg int32) {
vm.NotImplemented("GET_ITER", arg)
}
// Binary operations remove the top of the stack (TOS) and the second
// top-most stack item (TOS1) from the stack. They perform the
// operation, and put the result back on the stack.
// Implements TOS = TOS1 ** TOS.
func do_BINARY_POWER(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_POWER", arg)
}
// Implements TOS = TOS1 * TOS.
func do_BINARY_MULTIPLY(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_MULTIPLY", arg)
}
// Implements TOS = TOS1 // TOS.
func do_BINARY_FLOOR_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_FLOOR_DIVIDE", arg)
}
// Implements TOS = TOS1 / TOS when from __future__ import division is
// in effect.
func do_BINARY_TRUE_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_TRUE_DIVIDE", arg)
}
// Implements TOS = TOS1 % TOS.
func do_BINARY_MODULO(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_MODULO", arg)
}
// Implements TOS = TOS1 + TOS.
func do_BINARY_ADD(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_ADD", arg)
}
// Implements TOS = TOS1 - TOS.
func do_BINARY_SUBTRACT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_SUBTRACT", arg)
}
// Implements TOS = TOS1[TOS].
func do_BINARY_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_SUBSCR", arg)
}
// Implements TOS = TOS1 << TOS.
func do_BINARY_LSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_LSHIFT", arg)
}
// Implements TOS = TOS1 >> TOS.
func do_BINARY_RSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_RSHIFT", arg)
}
// Implements TOS = TOS1 & TOS.
func do_BINARY_AND(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_AND", arg)
}
// Implements TOS = TOS1 ^ TOS.
func do_BINARY_XOR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_XOR", arg)
}
// Implements TOS = TOS1 | TOS.
func do_BINARY_OR(vm *Vm, arg int32) {
vm.NotImplemented("BINARY_OR", arg)
}
// In-place operations are like binary operations, in that they remove
// TOS and TOS1, and push the result back on the stack, but the
// operation is done in-place when TOS1 supports it, and the resulting
// TOS may be (but does not have to be) the original TOS1.
// Implements in-place TOS = TOS1 ** TOS.
func do_INPLACE_POWER(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_POWER", arg)
}
// Implements in-place TOS = TOS1 * TOS.
func do_INPLACE_MULTIPLY(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_MULTIPLY", arg)
}
// Implements in-place TOS = TOS1 // TOS.
func do_INPLACE_FLOOR_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_FLOOR_DIVIDE", arg)
}
// Implements in-place TOS = TOS1 / TOS when from __future__ import
// division is in effect.
func do_INPLACE_TRUE_DIVIDE(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_TRUE_DIVIDE", arg)
}
// Implements in-place TOS = TOS1 % TOS.
func do_INPLACE_MODULO(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_MODULO", arg)
}
// Implements in-place TOS = TOS1 + TOS.
func do_INPLACE_ADD(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_ADD", arg)
}
// Implements in-place TOS = TOS1 - TOS.
func do_INPLACE_SUBTRACT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_SUBTRACT", arg)
}
// Implements in-place TOS = TOS1 << TOS.
func do_INPLACE_LSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_LSHIFT", arg)
}
// Implements in-place TOS = TOS1 >> TOS.
func do_INPLACE_RSHIFT(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_RSHIFT", arg)
}
// Implements in-place TOS = TOS1 & TOS.
func do_INPLACE_AND(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_AND", arg)
}
// Implements in-place TOS = TOS1 ^ TOS.
func do_INPLACE_XOR(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_XOR", arg)
}
// Implements in-place TOS = TOS1 | TOS.
func do_INPLACE_OR(vm *Vm, arg int32) {
vm.NotImplemented("INPLACE_OR", arg)
}
// Implements TOS1[TOS] = TOS2.
func do_STORE_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("STORE_SUBSCR", arg)
}
// Implements del TOS1[TOS].
func do_DELETE_SUBSCR(vm *Vm, arg int32) {
vm.NotImplemented("DELETE_SUBSCR", arg)
}
// Miscellaneous opcodes.
// Implements the expression statement for the interactive mode. TOS
// is removed from the stack and printed. In non-interactive mode, an
// expression statement is terminated with POP_STACK.
func do_PRINT_EXPR(vm *Vm, arg int32) {
vm.NotImplemented("PRINT_EXPR", arg)
}
// Terminates a loop due to a break statement.
func do_BREAK_LOOP(vm *Vm, arg int32) {
vm.NotImplemented("BREAK_LOOP", arg)
}
// Continues a loop due to a continue statement. target is the address
// to jump to (which should be a FOR_ITER instruction).
func do_CONTINUE_LOOP(vm *Vm, target int32) {
vm.NotImplemented("CONTINUE_LOOP", target)
}
// Implements assignment with a starred target: Unpacks an iterable in
// TOS into individual values, where the total number of values can be
// smaller than the number of items in the iterable: one the new
// values will be a list of all leftover items.
//
// The low byte of counts is the number of values before the list
// value, the high byte of counts the number of values after it. The
// resulting values are put onto the stack right-to-left.
func do_UNPACK_EX(vm *Vm, counts int32) {
vm.NotImplemented("UNPACK_EX", counts)
}
// Calls set.add(TOS1[-i], TOS). Used to implement set comprehensions.
func do_SET_ADD(vm *Vm, i int32) {
vm.NotImplemented("SET_ADD", i)
}
// Calls list.append(TOS[-i], TOS). Used to implement list
// comprehensions. While the appended value is popped off, the list
// object remains on the stack so that it is available for further
// iterations of the loop.
func do_LIST_APPEND(vm *Vm, i int32) {
vm.NotImplemented("LIST_APPEND", i)
}
// Calls dict.setitem(TOS1[-i], TOS, TOS1). Used to implement dict comprehensions.
func do_MAP_ADD(vm *Vm, i int32) {
vm.NotImplemented("MAP_ADD", i)
}
// Returns with TOS to the caller of the function.
func do_RETURN_VALUE(vm *Vm, arg int32) {
vm.exit = true
}
// Pops TOS and delegates to it as a subiterator from a generator.
func do_YIELD_FROM(vm *Vm, arg int32) {
vm.NotImplemented("YIELD_FROM", arg)
}
// Pops TOS and yields it from a generator.
func do_YIELD_VALUE(vm *Vm, arg int32) {
vm.NotImplemented("YIELD_VALUE", arg)
}
// Loads all symbols not starting with '_' directly from the module
// TOS to the local namespace. The module is popped after loading all
// names. This opcode implements from module import *.
func do_IMPORT_STAR(vm *Vm, arg int32) {
vm.NotImplemented("IMPORT_STAR", arg)
}
// Removes one block from the block stack. Per frame, there is a stack
// of blocks, denoting nested loops, try statements, and such.
func do_POP_BLOCK(vm *Vm, arg int32) {
vm.NotImplemented("POP_BLOCK", arg)
}
// Removes one block from the block stack. The popped block must be an
// exception handler block, as implicitly created when entering an
// except handler. In addition to popping extraneous values from the
// frame stack, the last three popped values are used to restore the
// exception state.
func do_POP_EXCEPT(vm *Vm, arg int32) {
vm.NotImplemented("POP_EXCEPT", arg)
}
// Terminates a finally clause. The interpreter recalls whether the
// exception has to be re-raised, or whether the function returns, and
// continues with the outer-next block.
func do_END_FINALLY(vm *Vm, arg int32) {
vm.NotImplemented("END_FINALLY", arg)
}
// Creates a new class object. TOS is the methods dictionary, TOS1 the
// tuple of the names of the base classes, and TOS2 the class name.
func do_LOAD_BUILD_CLASS(vm *Vm, arg int32) {
vm.NotImplemented("LOAD_BUILD_CLASS", arg)
}
// This opcode performs several operations before a with block
// starts. First, it loads __exit__( ) from the context manager and
// pushes it onto the stack for later use by WITH_CLEANUP. Then,
// __enter__( ) is called, and a finally block pointing to delta is
// pushed. Finally, the result of calling the enter method is pushed
// onto the stack. The next opcode will either ignore it (POP_TOP), or
// store it in (a) variable(s) (STORE_FAST, STORE_NAME, or
// UNPACK_SEQUENCE).
func do_SETUP_WITH(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_WITH", delta)
}
// Cleans up the stack when a with statement block exits. On top of
// the stack are 1–3 values indicating how/why the finally clause was
// entered:
//
// TOP = None
// (TOP, SECOND) = (WHY_{RETURN,CONTINUE}), retval
// TOP = WHY_*; no retval below it
// (TOP, SECOND, THIRD) = exc_info( )
// Under them is EXIT, the context manager’s __exit__( ) bound method.
//
// In the last case, EXIT(TOP, SECOND, THIRD) is called, otherwise
// EXIT(None, None, None).
//
// EXIT is removed from the stack, leaving the values above it in the
// same order. In addition, if the stack represents an exception, and
// the function call returns a ‘true’ value, this information is
// “zapped”, to prevent END_FINALLY from re-raising the
// exception. (But non-local gotos should still be resumed.)
func do_WITH_CLEANUP(vm *Vm, arg int32) {
vm.NotImplemented("WITH_CLEANUP", arg)
}
// All of the following opcodes expect arguments. An argument is two bytes, with the more significant byte last.
// Implements name = TOS. namei is the index of name in the attribute
// co_names of the code object. The compiler tries to use STORE_FAST
// or STORE_GLOBAL if possible.
func do_STORE_NAME(vm *Vm, namei int32) {
vm.locals[string(vm.co.Names[namei].(py.String))] = vm.POP()
}
// Implements del name, where namei is the index into co_names
// attribute of the code object.
func do_DELETE_NAME(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_NAME", namei)
}
// Unpacks TOS into count individual values, which are put onto the
// stack right-to-left.
func do_UNPACK_SEQUENCE(vm *Vm, count int32) {
vm.NotImplemented("UNPACK_SEQUENCE", count)
}
// Implements TOS.name = TOS1, where namei is the index of name in
// co_names.
func do_STORE_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("STORE_ATTR", namei)
}
// Implements del TOS.name, using namei as index into co_names.
func do_DELETE_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_ATTR", namei)
}
// Works as STORE_NAME, but stores the name as a global.
func do_STORE_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("STORE_GLOBAL", namei)
}
// Works as DELETE_NAME, but deletes a global name.
func do_DELETE_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("DELETE_GLOBAL", namei)
}
// Pushes co_consts[consti] onto the stack.
func do_LOAD_CONST(vm *Vm, consti int32) {
vm.PUSH(vm.co.Consts[consti])
// fmt.Printf("LOAD_CONST %v\n", vm.TOP())
}
// Pushes the value associated with co_names[namei] onto the stack.
func do_LOAD_NAME(vm *Vm, namei int32) {
vm.PUSH(vm.co.Names[namei])
}
// Creates a tuple consuming count items from the stack, and pushes
// the resulting tuple onto the stack.
func do_BUILD_TUPLE(vm *Vm, count int32) {
vm.NotImplemented("BUILD_TUPLE", count)
}
// Works as BUILD_TUPLE, but creates a set.
func do_BUILD_SET(vm *Vm, count int32) {
vm.NotImplemented("BUILD_SET", count)
}
// Works as BUILD_TUPLE, but creates a list.
func do_BUILD_LIST(vm *Vm, count int32) {
vm.NotImplemented("BUILD_LIST", count)
}
// Pushes a new dictionary object onto the stack. The dictionary is
// pre-sized to hold count entries.
func do_BUILD_MAP(vm *Vm, count int32) {
vm.NotImplemented("BUILD_MAP", count)
}
// Replaces TOS with getattr(TOS, co_names[namei]).
func do_LOAD_ATTR(vm *Vm, namei int32) {
vm.NotImplemented("LOAD_ATTR", namei)
}
// Performs a Boolean operation. The operation name can be found in
// cmp_op[opname].
func do_COMPARE_OP(vm *Vm, opname int32) {
vm.NotImplemented("COMPARE_OP", opname)
}
// Imports the module co_names[namei]. TOS and TOS1 are popped and
// provide the fromlist and level arguments of __import__( ). The
// module object is pushed onto the stack. The current namespace is
// not affected: for a proper import statement, a subsequent
// STORE_FAST instruction modifies the namespace.
func do_IMPORT_NAME(vm *Vm, namei int32) {
vm.NotImplemented("IMPORT_NAME", namei)
}
// Loads the attribute co_names[namei] from the module found in
// TOS. The resulting object is pushed onto the stack, to be
// subsequently stored by a STORE_FAST instruction.
func do_IMPORT_FROM(vm *Vm, namei int32) {
vm.NotImplemented("IMPORT_FROM", namei)
}
// Increments bytecode counter by delta.
func do_JUMP_FORWARD(vm *Vm, delta int32) {
vm.NotImplemented("JUMP_FORWARD", delta)
}
// If TOS is true, sets the bytecode counter to target. TOS is popped.
func do_POP_JUMP_IF_TRUE(vm *Vm, target int32) {
vm.NotImplemented("POP_JUMP_IF_TRUE", target)
}
// If TOS is false, sets the bytecode counter to target. TOS is popped.
func do_POP_JUMP_IF_FALSE(vm *Vm, target int32) {
vm.NotImplemented("POP_JUMP_IF_FALSE", target)
}
// If TOS is true, sets the bytecode counter to target and leaves TOS
// on the stack. Otherwise (TOS is false), TOS is popped.
func do_JUMP_IF_TRUE_OR_POP(vm *Vm, target int32) {
vm.NotImplemented("JUMP_IF_TRUE_OR_POP", target)
}
// If TOS is false, sets the bytecode counter to target and leaves TOS
// on the stack. Otherwise (TOS is true), TOS is popped.
func do_JUMP_IF_FALSE_OR_POP(vm *Vm, target int32) {
vm.NotImplemented("JUMP_IF_FALSE_OR_POP", target)
}
// Set bytecode counter to target.
func do_JUMP_ABSOLUTE(vm *Vm, target int32) {
vm.NotImplemented("JUMP_ABSOLUTE", target)
}
// TOS is an iterator. Call its next( ) method. If this yields a new
// value, push it on the stack (leaving the iterator below it). If the
// iterator indicates it is exhausted TOS is popped, and the bytecode
// counter is incremented by delta.
func do_FOR_ITER(vm *Vm, delta int32) {
vm.NotImplemented("FOR_ITER", delta)
}
// Loads the global named co_names[namei] onto the stack.
func do_LOAD_GLOBAL(vm *Vm, namei int32) {
vm.NotImplemented("LOAD_GLOBAL", namei)
}
// Pushes a block for a loop onto the block stack. The block spans
// from the current instruction with a size of delta bytes.
func do_SETUP_LOOP(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_LOOP", delta)
}
// Pushes a try block from a try-except clause onto the block
// stack. delta points to the first except block.
func do_SETUP_EXCEPT(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_EXCEPT", delta)
}
// Pushes a try block from a try-except clause onto the block
// stack. delta points to the finally block.
func do_SETUP_FINALLY(vm *Vm, delta int32) {
vm.NotImplemented("SETUP_FINALLY", delta)
}
// Store a key and value pair in a dictionary. Pops the key and value
// while leaving the dictionary on the stack.
func do_STORE_MAP(vm *Vm, arg int32) {
vm.NotImplemented("STORE_MAP", arg)
}
// Pushes a reference to the local co_varnames[var_num] onto the stack.
func do_LOAD_FAST(vm *Vm, var_num int32) {
vm.PUSH(vm.locals[string(vm.co.Varnames[var_num].(py.String))])
}
// Stores TOS into the local co_varnames[var_num].
func do_STORE_FAST(vm *Vm, var_num int32) {
vm.NotImplemented("STORE_FAST", var_num)
}
// Deletes local co_varnames[var_num].
func do_DELETE_FAST(vm *Vm, var_num int32) {
vm.NotImplemented("DELETE_FAST", var_num)
}
// Pushes a reference to the cell contained in slot i of the cell and
// free variable storage. The name of the variable is co_cellvars[i]
// if i is less than the length of co_cellvars. Otherwise it is
// co_freevars[i - len(co_cellvars)].
func do_LOAD_CLOSURE(vm *Vm, i int32) {
vm.NotImplemented("LOAD_CLOSURE", i)
}
// Loads the cell contained in slot i of the cell and free variable
// storage. Pushes a reference to the object the cell contains on the
// stack.
func do_LOAD_DEREF(vm *Vm, i int32) {
vm.NotImplemented("LOAD_DEREF", i)
}
// Much like LOAD_DEREF but first checks the locals dictionary before
// consulting the cell. This is used for loading free variables in
// class bodies.
func do_LOAD_CLASSDEREF(vm *Vm, i int32) {
vm.NotImplemented("LOAD_CLASSDEREF", i)
}
// Stores TOS into the cell contained in slot i of the cell and free
// variable storage.
func do_STORE_DEREF(vm *Vm, i int32) {
vm.NotImplemented("STORE_DEREF", i)
}
// Empties the cell contained in slot i of the cell and free variable
// storage. Used by the del statement.
func do_DELETE_DEREF(vm *Vm, i int32) {
vm.NotImplemented("DELETE_DEREF", i)
}
// Raises an exception. argc indicates the number of parameters to the
// raise statement, ranging from 0 to 3. The handler will find the
// traceback as TOS2, the parameter as TOS1, and the exception as TOS.
func do_RAISE_VARARGS(vm *Vm, argc int32) {
vm.NotImplemented("RAISE_VARARGS", argc)
}
// Calls a function. The low byte of argc indicates the number of
// positional parameters, the high byte the number of keyword
// parameters. On the stack, the opcode finds the keyword parameters
// first. For each keyword argument, the value is on top of the
// key. Below the keyword parameters, the positional parameters are on
// the stack, with the right-most parameter on top. Below the
// parameters, the function object to call is on the stack. Pops all
// function arguments, and the function itself off the stack, and
// pushes the return value.
func do_CALL_FUNCTION(vm *Vm, argc int32) {
nargs := int(argc & 0xFF)
nkwargs := int((argc >> 8) & 0xFF)
p, q := len(vm.stack)-2*nkwargs, len(vm.stack)
kwargs := vm.stack[p:q]
p, q = p-nargs, p
args := py.Tuple(vm.stack[p:q])
p, q = p-1, p
fn := vm.stack[p]
vm.stack[p] = vm.call(fn, args, kwargs)
// Drop the args off the stack
vm.stack = vm.stack[:q]
}
// Pushes a new function object on the stack. TOS is the code
// associated with the function. The function object is defined to
// have argc default parameters, which are found below TOS.
//
// FIXME these docs are slightly wrong.
func do_MAKE_FUNCTION(vm *Vm, argc int32) {
posdefaults := argc & 0xff
kwdefaults := (argc >> 8) & 0xff
num_annotations := (argc >> 16) & 0x7fff
qualname := vm.POP()
code := vm.POP()
function := py.NewFunction(code.(*py.Code), vm.globals, qualname.(py.String))
// FIXME share code with MAKE_CLOSURE
// if opcode == MAKE_CLOSURE {
// function.Closure = vm.POP();
// }
if num_annotations > 0 {
names := vm.POP().(py.Tuple) // names of args with annotations
anns := py.NewStringDict()
name_ix := int32(len(names))
if num_annotations != name_ix+1 {
panic("num_annotations wrong - corrupt bytecode?")
}
for name_ix > 0 {
name_ix--
name := names[name_ix]
value := vm.POP()
anns[string(name.(py.String))] = value
}
function.Annotations = anns
}
if kwdefaults > 0 {
defs := py.NewStringDict()
for kwdefaults--; kwdefaults >= 0; kwdefaults-- {
v := vm.POP() // default value
key := vm.POP() // kw only arg name
defs[string(key.(py.String))] = v
}
function.KwDefaults = defs
}
if posdefaults > 0 {
defs := make(py.Tuple, posdefaults)
for posdefaults--; posdefaults >= 0; posdefaults-- {
defs[posdefaults] = vm.POP()
}
function.Defaults = defs
}
vm.PUSH(function)
}
// Creates a new function object, sets its func_closure slot, and
// pushes it on the stack. TOS is the code associated with the
// function, TOS1 the tuple containing cells for the closure’s free
// variables. The function also has argc default parameters, which are
// found below the cells.
func do_MAKE_CLOSURE(vm *Vm, argc int32) {
vm.NotImplemented("MAKE_CLOSURE", argc)
// see MAKE_FUNCTION
}
// Pushes a slice object on the stack. argc must be 2 or 3. If it is
// 2, slice(TOS1, TOS) is pushed; if it is 3, slice(TOS2, TOS1, TOS)
// is pushed. See the slice( ) built-in function for more information.
func do_BUILD_SLICE(vm *Vm, argc int32) {
vm.NotImplemented("BUILD_SLICE", argc)
}
// Prefixes any opcode which has an argument too big to fit into the
// default two bytes. ext holds two additional bytes which, taken
// together with the subsequent opcode’s argument, comprise a
// four-byte argument, ext being the two most-significant bytes.
func do_EXTENDED_ARG(vm *Vm, ext int32) {
vm.ext = ext
vm.extended = true
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the variable argument list, followed
// by keyword and positional arguments.
func do_CALL_FUNCTION_VAR(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_VAR", argc)
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the keyword arguments dictionary,
// followed by explicit keyword and positional arguments.
func do_CALL_FUNCTION_KW(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_KW", argc)
}
// Calls a function. argc is interpreted as in CALL_FUNCTION. The top
// element on the stack contains the keyword arguments dictionary,
// followed by the variable-arguments tuple, followed by explicit
// keyword and positional arguments.
func do_CALL_FUNCTION_VAR_KW(vm *Vm, argc int32) {
vm.NotImplemented("CALL_FUNCTION_VAR_KW", argc)
}
// NotImplemented
func (vm *Vm) NotImplemented(name string, arg int32) {
fmt.Printf("%s %d NOT IMPLEMENTED\n", name, arg)
}
// Poke the vm.Run into py
func init() {
py.VmRun = Run
}
// Run the virtual machine on the code object in the module
//
// FIXME figure out how we are going to signal exceptions!
//
// Any parameters are expected to have been decoded into locals
func Run(globals, locals py.StringDict, co *py.Code) (err error) {
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
case string:
err = errors.New(x)
default:
err = errors.New(fmt.Sprintf("Unknown error '%s'", x))
}
}
}()
_vm := Vm{
stack: make([]py.Object, 0, 16),
globals: globals,
locals: locals,
co: co,
}
vm := &_vm
ip := 0
var opcode byte
var arg int32
code := co.Code
for !vm.exit {
opcode = code[ip]
ip++
if HAS_ARG(opcode) {
arg = int32(code[ip])
ip++
arg += int32(code[ip] << 8)
ip++
if vm.extended {
arg += vm.ext << 16
}
fmt.Printf("* %s(%d)\n", OpCodeToName[opcode], arg)
} else {
fmt.Printf("* %s\n", OpCodeToName[opcode])
}
vm.extended = false
jumpTable[opcode](vm, arg)
}
return nil
}
|
package web
import (
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"text/template/parse"
"time"
"github.com/StackExchange/bosun/_third_party/github.com/MiniProfiler/go/miniprofiler"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/collect"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/metadata"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/opentsdb"
"github.com/StackExchange/bosun/_third_party/github.com/gorilla/mux"
"github.com/StackExchange/bosun/conf"
"github.com/StackExchange/bosun/expr"
"github.com/StackExchange/bosun/sched"
)
var (
templates *template.Template
router = mux.NewRouter()
schedule = sched.DefaultSched
)
func init() {
miniprofiler.Position = "bottomleft"
miniprofiler.StartHidden = true
}
func Listen(addr, dir, host, relayListen string) error {
var err error
templates, err = template.New("").ParseFiles(
dir + "/templates/index.html",
)
if err != nil {
log.Fatal(err)
}
RelayHTTP(relayListen, host, JSON(PutMetadata))
router.Handle("/api/action", JSON(Action))
router.Handle("/api/alerts", JSON(Alerts))
router.Handle("/api/alerts/details", JSON(AlertDetails))
router.Handle("/api/config", miniprofiler.NewHandler(Config))
router.Handle("/api/config_test", miniprofiler.NewHandler(ConfigTest))
router.Handle("/api/egraph/{bs}.svg", JSON(ExprGraph))
router.Handle("/api/expr", JSON(Expr))
router.Handle("/api/graph", JSON(Graph))
router.Handle("/api/health", JSON(HealthCheck))
router.Handle("/api/metadata/get", JSON(GetMetadata))
router.Handle("/api/metadata/put", JSON(PutMetadata))
router.Handle("/api/metric", JSON(UniqueMetrics))
router.Handle("/api/metric/{tagk}/{tagv}", JSON(MetricsByTagPair))
router.Handle("/api/rule", JSON(Rule))
router.Handle("/api/silence/clear", JSON(SilenceClear))
router.Handle("/api/silence/get", JSON(SilenceGet))
router.Handle("/api/silence/set", JSON(SilenceSet))
router.Handle("/api/tagk/{metric}", JSON(TagKeysByMetric))
router.Handle("/api/tagv/{tagk}", JSON(TagValuesByTagKey))
router.Handle("/api/tagv/{tagk}/{metric}", JSON(TagValuesByMetricTagKey))
router.Handle("/api/templates", JSON(Templates))
router.HandleFunc("/api/put", Relay(host, JSON(PutMetadata)))
http.Handle("/", miniprofiler.NewHandler(Index))
http.Handle("/api/", router)
fs := http.FileServer(http.Dir(dir))
http.Handle("/partials/", fs)
http.Handle("/static/", fs)
static := http.FileServer(http.Dir(filepath.Join(dir, "static")))
http.Handle("/favicon.ico", static)
log.Println("bosun web listening on:", addr)
log.Println("bosun web directory:", dir)
return http.ListenAndServe(addr, nil)
}
func RelayHTTP(addr, dest string, metaHandler http.Handler) {
mux := http.NewServeMux()
mux.HandleFunc("/", Relay(dest, metaHandler))
log.Println("OpenTSDB relay listening on:", addr)
log.Println("OpenTSDB destination:", dest)
go func() { log.Fatal(http.ListenAndServe(addr, mux)) }()
}
var client *http.Client = &http.Client{
Transport: &timeoutTransport{
Transport: &http.Transport{
DisableKeepAlives: true,
},
},
Timeout: time.Minute,
}
type timeoutTransport struct {
*http.Transport
Timeout time.Time
}
func (t *timeoutTransport) RoundTrip(r *http.Request) (*http.Response, error) {
if time.Now().After(t.Timeout) {
t.Transport.CloseIdleConnections()
t.Timeout = time.Now().Add(time.Minute * 5)
}
return t.Transport.RoundTrip(r)
}
func Relay(dest string, metaHandler http.Handler) func(http.ResponseWriter, *http.Request) {
clean := func(s string) string {
return opentsdb.MustReplace(s, "_")
}
return func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/metadata/put" {
metaHandler.ServeHTTP(w, r)
return
}
orig, _ := ioutil.ReadAll(r.Body)
if r.URL.Path == "/api/put" {
var body []byte
if r, err := gzip.NewReader(bytes.NewReader(orig)); err == nil {
body, _ = ioutil.ReadAll(r)
r.Close()
} else {
body = orig
}
var dp opentsdb.DataPoint
var mdp opentsdb.MultiDataPoint
if err := json.Unmarshal(body, &mdp); err == nil {
} else if err = json.Unmarshal(body, &dp); err == nil {
mdp = opentsdb.MultiDataPoint{&dp}
}
if len(mdp) > 0 {
ra := strings.Split(r.RemoteAddr, ":")[0]
tags := opentsdb.TagSet{"remote": clean(ra)}
collect.Add("search.puts_relayed", tags, 1)
collect.Add("search.datapoints_relayed", tags, int64(len(mdp)))
schedule.Search.Index(mdp)
}
}
durl := url.URL{
Scheme: "http",
Host: dest,
}
durl.Path = r.URL.Path
durl.RawQuery = r.URL.RawQuery
durl.Fragment = r.URL.Fragment
req, err := http.NewRequest(r.Method, durl.String(), bytes.NewReader(orig))
if err != nil {
log.Println("relay NewRequest err:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
req.Header = r.Header
req.TransferEncoding = r.TransferEncoding
req.ContentLength = r.ContentLength
resp, err := client.Do(req)
tags := opentsdb.TagSet{"path": clean(r.URL.Path), "remote": clean(strings.Split(r.RemoteAddr, ":")[0])}
if err != nil {
log.Println("relay Do err:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
collect.Add("relay.do_err", tags, 1)
return
}
b, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
tags["status"] = strconv.Itoa(resp.StatusCode)
collect.Add("relay.response", tags, 1)
w.WriteHeader(resp.StatusCode)
w.Write(b)
}
}
func Index(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/graph" {
r.ParseForm()
if _, present := r.Form["png"]; present {
if _, err := Graph(t, w, r); err != nil {
serveError(w, err)
}
return
}
}
err := templates.ExecuteTemplate(w, "index.html", struct {
Includes template.HTML
}{
t.Includes(),
})
if err != nil {
serveError(w, err)
}
}
func serveError(w http.ResponseWriter, err error) {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
func JSON(h func(miniprofiler.Timer, http.ResponseWriter, *http.Request) (interface{}, error)) http.Handler {
return miniprofiler.NewHandler(func(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
d, err := h(t, w, r)
if err != nil {
serveError(w, err)
return
}
if d == nil {
return
}
b, err := json.Marshal(d)
if err != nil {
serveError(w, err)
return
}
if cb := r.FormValue("callback"); cb != "" {
w.Write([]byte(cb + "("))
w.Write(b)
w.Write([]byte(")"))
return
}
w.Header().Add("Content-Type", "application/json")
w.Write(b)
})
}
type Health struct {
// RuleCheck is true if last check happened within the check frequency window.
RuleCheck bool
}
func HealthCheck(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var h Health
h.RuleCheck = schedule.CheckStart.After(time.Now().Add(-schedule.Conf.CheckFrequency))
return h, nil
}
func PutMetadata(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
d := json.NewDecoder(r.Body)
var ms []metadata.Metasend
if err := d.Decode(&ms); err != nil {
return nil, err
}
for _, m := range ms {
schedule.PutMetadata(metadata.Metakey{
Metric: m.Metric,
Tags: m.Tags.Tags(),
Name: m.Name,
}, m.Value)
}
w.WriteHeader(204)
return nil, nil
}
func GetMetadata(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
tags := make(opentsdb.TagSet)
r.ParseForm()
vals := r.Form["tagv"]
for i, k := range r.Form["tagk"] {
if len(vals) <= i {
return nil, fmt.Errorf("unpaired tagk/tagv")
}
tags[k] = vals[i]
}
return schedule.GetMetadata(r.FormValue("metric"), tags), nil
}
func Alerts(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
return schedule, nil
}
func AlertDetails(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
r.ParseForm()
states := make(sched.States)
for _, v := range r.Form["key"] {
k := expr.AlertKey(v)
s := schedule.Status(k)
if s == nil {
return nil, fmt.Errorf("unknown key: %v", v)
}
states[k] = s
}
return states, nil
}
func Action(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var data struct {
Type string
User string
Message string
Keys []string
}
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
var at sched.ActionType
switch data.Type {
case "ack":
at = sched.ActionAcknowledge
case "close":
at = sched.ActionClose
case "forget":
at = sched.ActionForget
}
errs := make(MultiError)
r.ParseForm()
for _, key := range data.Keys {
err := schedule.Action(data.User, data.Message, at, expr.AlertKey(key))
if err != nil {
errs[key] = err
}
}
if len(errs) != 0 {
return nil, errs
}
return nil, nil
}
type MultiError map[string]error
func (m MultiError) Error() string {
return fmt.Sprint(map[string]error(m))
}
func SilenceGet(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
return schedule.Silence, nil
}
var silenceLayouts = []string{
"2006-01-02 15:04:05 MST",
"2006-01-02 15:04:05 -0700",
"2006-01-02 15:04 MST",
"2006-01-02 15:04 -0700",
"2006-01-02 15:04:05",
"2006-01-02 15:04",
}
func SilenceSet(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var start, end time.Time
var err error
var data map[string]string
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
if s := data["start"]; s != "" {
for _, layout := range silenceLayouts {
start, err = time.Parse(layout, s)
if err == nil {
break
}
}
if start.IsZero() {
return nil, fmt.Errorf("unrecognized start time format: %s", s)
}
}
if s := data["end"]; s != "" {
for _, layout := range silenceLayouts {
end, err = time.Parse(layout, s)
if err == nil {
break
}
}
if end.IsZero() {
return nil, fmt.Errorf("unrecognized end time format: %s", s)
}
}
if start.IsZero() {
start = time.Now().UTC()
}
if end.IsZero() {
d, err := time.ParseDuration(data["duration"])
if err != nil {
return nil, err
}
end = start.Add(d)
}
return schedule.AddSilence(start, end, data["alert"], data["tags"], len(data["confirm"]) > 0, data["edit"])
}
func SilenceClear(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var data map[string]string
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
return nil, schedule.ClearSilence(data["id"])
}
func ConfigTest(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
_, err := conf.New("test", r.FormValue("config_text"))
if err != nil {
fmt.Fprint(w, err.Error())
}
}
func Config(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, schedule.Conf.RawText)
}
func nilFunc() {}
var builtins = template.FuncMap{
"and": nilFunc,
"call": nilFunc,
"html": nilFunc,
"index": nilFunc,
"js": nilFunc,
"len": nilFunc,
"not": nilFunc,
"or": nilFunc,
"print": nilFunc,
"printf": nilFunc,
"println": nilFunc,
"urlquery": nilFunc,
"eq": nilFunc,
"ge": nilFunc,
"gt": nilFunc,
"le": nilFunc,
"lt": nilFunc,
"ne": nilFunc,
// HTML-specific funcs
"html_template_attrescaper": nilFunc,
"html_template_commentescaper": nilFunc,
"html_template_cssescaper": nilFunc,
"html_template_cssvaluefilter": nilFunc,
"html_template_htmlnamefilter": nilFunc,
"html_template_htmlescaper": nilFunc,
"html_template_jsregexpescaper": nilFunc,
"html_template_jsstrescaper": nilFunc,
"html_template_jsvalescaper": nilFunc,
"html_template_nospaceescaper": nilFunc,
"html_template_rcdataescaper": nilFunc,
"html_template_urlescaper": nilFunc,
"html_template_urlfilter": nilFunc,
"html_template_urlnormalizer": nilFunc,
// bosun-specific funcs
"V": nilFunc,
"bytes": nilFunc,
"replace": nilFunc,
"short": nilFunc,
}
func Templates(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
templates := make(map[string]string)
for name, template := range schedule.Conf.Templates {
incl := map[string]bool{name: true}
var parseSection func(*conf.Template) error
parseTemplate := func(s string) error {
trees, err := parse.Parse("", s, "", "", builtins)
if err != nil {
return err
}
for _, node := range trees[""].Root.Nodes {
switch node := node.(type) {
case *parse.TemplateNode:
if incl[node.Name] {
continue
}
incl[node.Name] = true
if err := parseSection(schedule.Conf.Templates[node.Name]); err != nil {
return err
}
}
}
return nil
}
parseSection = func(s *conf.Template) error {
if s.Body != nil {
if err := parseTemplate(s.Body.Tree.Root.String()); err != nil {
return err
}
}
if s.Subject != nil {
if err := parseTemplate(s.Subject.Tree.Root.String()); err != nil {
return err
}
}
return nil
}
if err := parseSection(template); err != nil {
return nil, err
}
delete(incl, name)
templates[name] = template.Def
for n := range incl {
t := schedule.Conf.Templates[n]
if t == nil {
continue
}
templates[name] += "\n\n" + t.Def
}
}
alerts := make(map[string]string)
for name, alert := range schedule.Conf.Alerts {
var add func([]string)
add = func(macros []string) {
for _, macro := range macros {
m := schedule.Conf.Macros[macro]
add(m.Macros)
alerts[name] += m.Def + "\n\n"
}
}
add(alert.Macros)
alerts[name] += alert.Def
}
return struct {
Templates map[string]string
Alerts map[string]string
}{
templates,
alerts,
}, nil
}
Correct jsonp content type
package web
import (
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"text/template/parse"
"time"
"github.com/StackExchange/bosun/_third_party/github.com/MiniProfiler/go/miniprofiler"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/collect"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/metadata"
"github.com/StackExchange/bosun/_third_party/github.com/StackExchange/scollector/opentsdb"
"github.com/StackExchange/bosun/_third_party/github.com/gorilla/mux"
"github.com/StackExchange/bosun/conf"
"github.com/StackExchange/bosun/expr"
"github.com/StackExchange/bosun/sched"
)
var (
templates *template.Template
router = mux.NewRouter()
schedule = sched.DefaultSched
)
func init() {
miniprofiler.Position = "bottomleft"
miniprofiler.StartHidden = true
}
func Listen(addr, dir, host, relayListen string) error {
var err error
templates, err = template.New("").ParseFiles(
dir + "/templates/index.html",
)
if err != nil {
log.Fatal(err)
}
RelayHTTP(relayListen, host, JSON(PutMetadata))
router.Handle("/api/action", JSON(Action))
router.Handle("/api/alerts", JSON(Alerts))
router.Handle("/api/alerts/details", JSON(AlertDetails))
router.Handle("/api/config", miniprofiler.NewHandler(Config))
router.Handle("/api/config_test", miniprofiler.NewHandler(ConfigTest))
router.Handle("/api/egraph/{bs}.svg", JSON(ExprGraph))
router.Handle("/api/expr", JSON(Expr))
router.Handle("/api/graph", JSON(Graph))
router.Handle("/api/health", JSON(HealthCheck))
router.Handle("/api/metadata/get", JSON(GetMetadata))
router.Handle("/api/metadata/put", JSON(PutMetadata))
router.Handle("/api/metric", JSON(UniqueMetrics))
router.Handle("/api/metric/{tagk}/{tagv}", JSON(MetricsByTagPair))
router.Handle("/api/rule", JSON(Rule))
router.Handle("/api/silence/clear", JSON(SilenceClear))
router.Handle("/api/silence/get", JSON(SilenceGet))
router.Handle("/api/silence/set", JSON(SilenceSet))
router.Handle("/api/tagk/{metric}", JSON(TagKeysByMetric))
router.Handle("/api/tagv/{tagk}", JSON(TagValuesByTagKey))
router.Handle("/api/tagv/{tagk}/{metric}", JSON(TagValuesByMetricTagKey))
router.Handle("/api/templates", JSON(Templates))
router.HandleFunc("/api/put", Relay(host, JSON(PutMetadata)))
http.Handle("/", miniprofiler.NewHandler(Index))
http.Handle("/api/", router)
fs := http.FileServer(http.Dir(dir))
http.Handle("/partials/", fs)
http.Handle("/static/", fs)
static := http.FileServer(http.Dir(filepath.Join(dir, "static")))
http.Handle("/favicon.ico", static)
log.Println("bosun web listening on:", addr)
log.Println("bosun web directory:", dir)
return http.ListenAndServe(addr, nil)
}
func RelayHTTP(addr, dest string, metaHandler http.Handler) {
mux := http.NewServeMux()
mux.HandleFunc("/", Relay(dest, metaHandler))
log.Println("OpenTSDB relay listening on:", addr)
log.Println("OpenTSDB destination:", dest)
go func() { log.Fatal(http.ListenAndServe(addr, mux)) }()
}
var client *http.Client = &http.Client{
Transport: &timeoutTransport{
Transport: &http.Transport{
DisableKeepAlives: true,
},
},
Timeout: time.Minute,
}
type timeoutTransport struct {
*http.Transport
Timeout time.Time
}
func (t *timeoutTransport) RoundTrip(r *http.Request) (*http.Response, error) {
if time.Now().After(t.Timeout) {
t.Transport.CloseIdleConnections()
t.Timeout = time.Now().Add(time.Minute * 5)
}
return t.Transport.RoundTrip(r)
}
func Relay(dest string, metaHandler http.Handler) func(http.ResponseWriter, *http.Request) {
clean := func(s string) string {
return opentsdb.MustReplace(s, "_")
}
return func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/metadata/put" {
metaHandler.ServeHTTP(w, r)
return
}
orig, _ := ioutil.ReadAll(r.Body)
if r.URL.Path == "/api/put" {
var body []byte
if r, err := gzip.NewReader(bytes.NewReader(orig)); err == nil {
body, _ = ioutil.ReadAll(r)
r.Close()
} else {
body = orig
}
var dp opentsdb.DataPoint
var mdp opentsdb.MultiDataPoint
if err := json.Unmarshal(body, &mdp); err == nil {
} else if err = json.Unmarshal(body, &dp); err == nil {
mdp = opentsdb.MultiDataPoint{&dp}
}
if len(mdp) > 0 {
ra := strings.Split(r.RemoteAddr, ":")[0]
tags := opentsdb.TagSet{"remote": clean(ra)}
collect.Add("search.puts_relayed", tags, 1)
collect.Add("search.datapoints_relayed", tags, int64(len(mdp)))
schedule.Search.Index(mdp)
}
}
durl := url.URL{
Scheme: "http",
Host: dest,
}
durl.Path = r.URL.Path
durl.RawQuery = r.URL.RawQuery
durl.Fragment = r.URL.Fragment
req, err := http.NewRequest(r.Method, durl.String(), bytes.NewReader(orig))
if err != nil {
log.Println("relay NewRequest err:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
req.Header = r.Header
req.TransferEncoding = r.TransferEncoding
req.ContentLength = r.ContentLength
resp, err := client.Do(req)
tags := opentsdb.TagSet{"path": clean(r.URL.Path), "remote": clean(strings.Split(r.RemoteAddr, ":")[0])}
if err != nil {
log.Println("relay Do err:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
collect.Add("relay.do_err", tags, 1)
return
}
b, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
tags["status"] = strconv.Itoa(resp.StatusCode)
collect.Add("relay.response", tags, 1)
w.WriteHeader(resp.StatusCode)
w.Write(b)
}
}
func Index(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/graph" {
r.ParseForm()
if _, present := r.Form["png"]; present {
if _, err := Graph(t, w, r); err != nil {
serveError(w, err)
}
return
}
}
err := templates.ExecuteTemplate(w, "index.html", struct {
Includes template.HTML
}{
t.Includes(),
})
if err != nil {
serveError(w, err)
}
}
func serveError(w http.ResponseWriter, err error) {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
func JSON(h func(miniprofiler.Timer, http.ResponseWriter, *http.Request) (interface{}, error)) http.Handler {
return miniprofiler.NewHandler(func(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
d, err := h(t, w, r)
if err != nil {
serveError(w, err)
return
}
if d == nil {
return
}
b, err := json.Marshal(d)
if err != nil {
serveError(w, err)
return
}
if cb := r.FormValue("callback"); cb != "" {
w.Header().Add("Content-Type", "application/javascript")
w.Write([]byte(cb + "("))
w.Write(b)
w.Write([]byte(")"))
return
}
w.Header().Add("Content-Type", "application/json")
w.Write(b)
})
}
type Health struct {
// RuleCheck is true if last check happened within the check frequency window.
RuleCheck bool
}
func HealthCheck(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var h Health
h.RuleCheck = schedule.CheckStart.After(time.Now().Add(-schedule.Conf.CheckFrequency))
return h, nil
}
func PutMetadata(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
d := json.NewDecoder(r.Body)
var ms []metadata.Metasend
if err := d.Decode(&ms); err != nil {
return nil, err
}
for _, m := range ms {
schedule.PutMetadata(metadata.Metakey{
Metric: m.Metric,
Tags: m.Tags.Tags(),
Name: m.Name,
}, m.Value)
}
w.WriteHeader(204)
return nil, nil
}
func GetMetadata(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
tags := make(opentsdb.TagSet)
r.ParseForm()
vals := r.Form["tagv"]
for i, k := range r.Form["tagk"] {
if len(vals) <= i {
return nil, fmt.Errorf("unpaired tagk/tagv")
}
tags[k] = vals[i]
}
return schedule.GetMetadata(r.FormValue("metric"), tags), nil
}
func Alerts(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
return schedule, nil
}
func AlertDetails(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
r.ParseForm()
states := make(sched.States)
for _, v := range r.Form["key"] {
k := expr.AlertKey(v)
s := schedule.Status(k)
if s == nil {
return nil, fmt.Errorf("unknown key: %v", v)
}
states[k] = s
}
return states, nil
}
func Action(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var data struct {
Type string
User string
Message string
Keys []string
}
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
var at sched.ActionType
switch data.Type {
case "ack":
at = sched.ActionAcknowledge
case "close":
at = sched.ActionClose
case "forget":
at = sched.ActionForget
}
errs := make(MultiError)
r.ParseForm()
for _, key := range data.Keys {
err := schedule.Action(data.User, data.Message, at, expr.AlertKey(key))
if err != nil {
errs[key] = err
}
}
if len(errs) != 0 {
return nil, errs
}
return nil, nil
}
type MultiError map[string]error
func (m MultiError) Error() string {
return fmt.Sprint(map[string]error(m))
}
func SilenceGet(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
return schedule.Silence, nil
}
var silenceLayouts = []string{
"2006-01-02 15:04:05 MST",
"2006-01-02 15:04:05 -0700",
"2006-01-02 15:04 MST",
"2006-01-02 15:04 -0700",
"2006-01-02 15:04:05",
"2006-01-02 15:04",
}
func SilenceSet(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var start, end time.Time
var err error
var data map[string]string
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
if s := data["start"]; s != "" {
for _, layout := range silenceLayouts {
start, err = time.Parse(layout, s)
if err == nil {
break
}
}
if start.IsZero() {
return nil, fmt.Errorf("unrecognized start time format: %s", s)
}
}
if s := data["end"]; s != "" {
for _, layout := range silenceLayouts {
end, err = time.Parse(layout, s)
if err == nil {
break
}
}
if end.IsZero() {
return nil, fmt.Errorf("unrecognized end time format: %s", s)
}
}
if start.IsZero() {
start = time.Now().UTC()
}
if end.IsZero() {
d, err := time.ParseDuration(data["duration"])
if err != nil {
return nil, err
}
end = start.Add(d)
}
return schedule.AddSilence(start, end, data["alert"], data["tags"], len(data["confirm"]) > 0, data["edit"])
}
func SilenceClear(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
var data map[string]string
j := json.NewDecoder(r.Body)
if err := j.Decode(&data); err != nil {
return nil, err
}
return nil, schedule.ClearSilence(data["id"])
}
func ConfigTest(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
_, err := conf.New("test", r.FormValue("config_text"))
if err != nil {
fmt.Fprint(w, err.Error())
}
}
func Config(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, schedule.Conf.RawText)
}
func nilFunc() {}
var builtins = template.FuncMap{
"and": nilFunc,
"call": nilFunc,
"html": nilFunc,
"index": nilFunc,
"js": nilFunc,
"len": nilFunc,
"not": nilFunc,
"or": nilFunc,
"print": nilFunc,
"printf": nilFunc,
"println": nilFunc,
"urlquery": nilFunc,
"eq": nilFunc,
"ge": nilFunc,
"gt": nilFunc,
"le": nilFunc,
"lt": nilFunc,
"ne": nilFunc,
// HTML-specific funcs
"html_template_attrescaper": nilFunc,
"html_template_commentescaper": nilFunc,
"html_template_cssescaper": nilFunc,
"html_template_cssvaluefilter": nilFunc,
"html_template_htmlnamefilter": nilFunc,
"html_template_htmlescaper": nilFunc,
"html_template_jsregexpescaper": nilFunc,
"html_template_jsstrescaper": nilFunc,
"html_template_jsvalescaper": nilFunc,
"html_template_nospaceescaper": nilFunc,
"html_template_rcdataescaper": nilFunc,
"html_template_urlescaper": nilFunc,
"html_template_urlfilter": nilFunc,
"html_template_urlnormalizer": nilFunc,
// bosun-specific funcs
"V": nilFunc,
"bytes": nilFunc,
"replace": nilFunc,
"short": nilFunc,
}
func Templates(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) {
templates := make(map[string]string)
for name, template := range schedule.Conf.Templates {
incl := map[string]bool{name: true}
var parseSection func(*conf.Template) error
parseTemplate := func(s string) error {
trees, err := parse.Parse("", s, "", "", builtins)
if err != nil {
return err
}
for _, node := range trees[""].Root.Nodes {
switch node := node.(type) {
case *parse.TemplateNode:
if incl[node.Name] {
continue
}
incl[node.Name] = true
if err := parseSection(schedule.Conf.Templates[node.Name]); err != nil {
return err
}
}
}
return nil
}
parseSection = func(s *conf.Template) error {
if s.Body != nil {
if err := parseTemplate(s.Body.Tree.Root.String()); err != nil {
return err
}
}
if s.Subject != nil {
if err := parseTemplate(s.Subject.Tree.Root.String()); err != nil {
return err
}
}
return nil
}
if err := parseSection(template); err != nil {
return nil, err
}
delete(incl, name)
templates[name] = template.Def
for n := range incl {
t := schedule.Conf.Templates[n]
if t == nil {
continue
}
templates[name] += "\n\n" + t.Def
}
}
alerts := make(map[string]string)
for name, alert := range schedule.Conf.Alerts {
var add func([]string)
add = func(macros []string) {
for _, macro := range macros {
m := schedule.Conf.Macros[macro]
add(m.Macros)
alerts[name] += m.Def + "\n\n"
}
}
add(alert.Macros)
alerts[name] += alert.Def
}
return struct {
Templates map[string]string
Alerts map[string]string
}{
templates,
alerts,
}, nil
}
|
package wfe
import (
"context"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"net/mail"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
"unicode"
"gopkg.in/square/go-jose.v2"
"github.com/jmhodges/clock"
"github.com/letsencrypt/pebble/acme"
"github.com/letsencrypt/pebble/ca"
"github.com/letsencrypt/pebble/core"
"github.com/letsencrypt/pebble/db"
"github.com/letsencrypt/pebble/va"
)
const (
// Note: We deliberately pick endpoint paths that differ from Boulder to
// exercise clients processing of the /directory response
directoryPath = "/dir"
noncePath = "/nonce-plz"
newAccountPath = "/sign-me-up"
acctPath = "/my-account/"
newOrderPath = "/order-plz"
orderPath = "/my-order/"
orderFinalizePath = "/finalize-order/"
authzPath = "/authZ/"
challengePath = "/chalZ/"
certPath = "/certZ/"
revokeCertPath = "/revoke-cert"
rootCertPath = "/root"
keyRolloverPath = "/rollover-account-key"
// How long do pending authorizations last before expiring?
pendingAuthzExpire = time.Hour
// How many contacts is an account allowed to have?
maxContactsPerAcct = 2
// badNonceEnvVar defines the environment variable name used to provide
// a percentage value for how often good nonces should be rejected as if they
// were bad. This can be used to exercise client nonce handling/retries.
// To have the WFE not reject any good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=0 pebble
// To have the WFE reject 15% of good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=15 pebble
badNonceEnvVar = "PEBBLE_WFE_NONCEREJECT"
// By default when no PEBBLE_WFE_NONCEREJECT is set, what percentage of good
// nonces are rejected?
defaultNonceReject = 15
// POST requests with a JWS body must have the following Content-Type header
expectedJWSContentType = "application/jose+json"
// RFC 1034 says DNS labels have a max of 63 octets, and names have a max of 255
// octets: https://tools.ietf.org/html/rfc1035#page-10. Since two of those octets
// are taken up by the leading length byte and the trailing root period the actual
// max length becomes 253.
maxDNSIdentifierLength = 253
// Invalid revocation reason codes.
// The full list of codes can be found in Section 8.5.3.1 of ITU-T X.509
// http://www.itu.int/rec/T-REC-X.509-201210-I/en
unusedRevocationReason = 7
aACompromiseRevocationReason = 10
)
type requestEvent struct {
ClientAddr string `json:",omitempty"`
Endpoint string `json:",omitempty"`
Method string `json:",omitempty"`
UserAgent string `json:",omitempty"`
}
type wfeHandlerFunc func(context.Context, *requestEvent, http.ResponseWriter, *http.Request)
func (f wfeHandlerFunc) ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request) {
ctx := context.TODO()
f(ctx, e, w, r)
}
type wfeHandler interface {
ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request)
}
type topHandler struct {
wfe wfeHandler
}
func (th *topHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(@cpu): consider restoring X-Forwarded-For handling for ClientAddr
rEvent := &requestEvent{
ClientAddr: r.RemoteAddr,
Method: r.Method,
UserAgent: r.Header.Get("User-Agent"),
}
th.wfe.ServeHTTP(rEvent, w, r)
}
type WebFrontEndImpl struct {
log *log.Logger
db *db.MemoryStore
nonce *nonceMap
nonceErrPercent int
clk clock.Clock
va *va.VAImpl
ca *ca.CAImpl
strict bool
}
const ToSURL = "data:text/plain,Do%20what%20thou%20wilt"
func New(
log *log.Logger,
clk clock.Clock,
db *db.MemoryStore,
va *va.VAImpl,
ca *ca.CAImpl,
strict bool) WebFrontEndImpl {
// Read the % of good nonces that should be rejected as bad nonces from the
// environment
nonceErrPercentVal := os.Getenv(badNonceEnvVar)
var nonceErrPercent int
// Parse the env var value as a base 10 int - if there isn't an error, use it
// as the wfe nonceErrPercent
if val, err := strconv.ParseInt(nonceErrPercentVal, 10, 0); err == nil {
nonceErrPercent = int(val)
} else {
// Otherwise just use the default
nonceErrPercent = defaultNonceReject
}
// If the value is out of the range just clip it sensibly
if nonceErrPercent < 0 {
nonceErrPercent = 0
} else if nonceErrPercent > 100 {
nonceErrPercent = 99
}
log.Printf("Configured to reject %d%% of good nonces", nonceErrPercent)
return WebFrontEndImpl{
log: log,
db: db,
nonce: newNonceMap(),
nonceErrPercent: nonceErrPercent,
clk: clk,
va: va,
ca: ca,
strict: strict,
}
}
func (wfe *WebFrontEndImpl) HandleFunc(
mux *http.ServeMux,
pattern string,
handler wfeHandlerFunc,
methods ...string) {
methodsMap := make(map[string]bool)
for _, m := range methods {
methodsMap[m] = true
}
if methodsMap["GET"] && !methodsMap["HEAD"] {
// Allow HEAD for any resource that allows GET
methods = append(methods, "HEAD")
methodsMap["HEAD"] = true
}
methodsStr := strings.Join(methods, ", ")
defaultHandler := http.StripPrefix(pattern,
&topHandler{
wfe: wfeHandlerFunc(func(ctx context.Context, logEvent *requestEvent, response http.ResponseWriter, request *http.Request) {
response.Header().Set("Replay-Nonce", wfe.nonce.createNonce())
logEvent.Endpoint = pattern
if request.URL != nil {
logEvent.Endpoint = path.Join(logEvent.Endpoint, request.URL.Path)
}
addNoCacheHeader(response)
if !methodsMap[request.Method] {
response.Header().Set("Allow", methodsStr)
wfe.sendError(acme.MethodNotAllowed(), response)
return
}
wfe.log.Printf("%s %s -> calling handler()\n", request.Method, logEvent.Endpoint)
// TODO(@cpu): Configurable request timeout
timeout := 1 * time.Minute
ctx, cancel := context.WithTimeout(ctx, timeout)
handler(ctx, logEvent, response, request)
cancel()
},
)})
mux.Handle(pattern, defaultHandler)
}
func (wfe *WebFrontEndImpl) sendError(prob *acme.ProblemDetails, response http.ResponseWriter) {
problemDoc, err := marshalIndent(prob)
if err != nil {
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
}
response.Header().Set("Content-Type", "application/problem+json; charset=utf-8")
response.WriteHeader(prob.HTTPStatus)
response.Write(problemDoc)
}
func (wfe *WebFrontEndImpl) RootCert(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
root := wfe.ca.GetRootCert()
if root == nil {
response.WriteHeader(http.StatusServiceUnavailable)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(root.PEM())
}
func (wfe *WebFrontEndImpl) Handler() http.Handler {
m := http.NewServeMux()
wfe.HandleFunc(m, directoryPath, wfe.Directory, "GET")
// Note for noncePath: "GET" also implies "HEAD"
wfe.HandleFunc(m, noncePath, wfe.Nonce, "GET")
wfe.HandleFunc(m, newAccountPath, wfe.NewAccount, "POST")
wfe.HandleFunc(m, newOrderPath, wfe.NewOrder, "POST")
wfe.HandleFunc(m, orderPath, wfe.Order, "GET")
wfe.HandleFunc(m, orderFinalizePath, wfe.FinalizeOrder, "POST")
wfe.HandleFunc(m, authzPath, wfe.Authz, "GET", "POST")
wfe.HandleFunc(m, challengePath, wfe.Challenge, "GET", "POST")
wfe.HandleFunc(m, certPath, wfe.Certificate, "GET")
wfe.HandleFunc(m, acctPath, wfe.UpdateAccount, "POST")
wfe.HandleFunc(m, keyRolloverPath, wfe.KeyRollover, "POST")
wfe.HandleFunc(m, revokeCertPath, wfe.RevokeCert, "POST")
wfe.HandleFunc(m, rootCertPath, wfe.RootCert, "GET")
return m
}
func (wfe *WebFrontEndImpl) Directory(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
directoryEndpoints := map[string]string{
"newNonce": noncePath,
"newAccount": newAccountPath,
"newOrder": newOrderPath,
"revokeCert": revokeCertPath,
"keyChange": keyRolloverPath,
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
relDir, err := wfe.relativeDirectory(request, directoryEndpoints)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("unable to create directory"), response)
return
}
response.Write(relDir)
}
func (wfe *WebFrontEndImpl) relativeDirectory(request *http.Request, directory map[string]string) ([]byte, error) {
// Create an empty map sized equal to the provided directory to store the
// relative-ized result
relativeDir := make(map[string]interface{}, len(directory))
for k, v := range directory {
relativeDir[k] = wfe.relativeEndpoint(request, v)
}
relativeDir["meta"] = map[string]string{
"termsOfService": ToSURL,
}
directoryJSON, err := marshalIndent(relativeDir)
// This should never happen since we are just marshalling known strings
if err != nil {
return nil, err
}
return directoryJSON, nil
}
func (wfe *WebFrontEndImpl) relativeEndpoint(request *http.Request, endpoint string) string {
proto := "http"
host := request.Host
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
// Default to "localhost" when no request.Host is provided. Otherwise requests
// with an empty `Host` produce results like `http:///acme/new-authz`
if request.Host == "" {
host = "localhost"
}
resultUrl := url.URL{Scheme: proto, Host: host, Path: endpoint}
return resultUrl.String()
}
func (wfe *WebFrontEndImpl) Nonce(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
response.WriteHeader(http.StatusNoContent)
}
func (wfe *WebFrontEndImpl) parseJWS(body string) (*jose.JSONWebSignature, error) {
// Parse the raw JWS JSON to check that:
// * the unprotected Header field is not being used.
// * the "signatures" member isn't present, just "signature".
//
// This must be done prior to `jose.parseSigned` since it will strip away
// these headers.
var unprotected struct {
Header map[string]string
Signatures []interface{}
}
if err := json.Unmarshal([]byte(body), &unprotected); err != nil {
return nil, errors.New("Parse error reading JWS")
}
// ACME v2 never uses values from the unprotected JWS header. Reject JWS that
// include unprotected headers.
if unprotected.Header != nil {
return nil, errors.New(
"JWS \"header\" field not allowed. All headers must be in \"protected\" field")
}
// ACME v2 never uses the "signatures" array of JSON serialized JWS, just the
// mandatory "signature" field. Reject JWS that include the "signatures" array.
if len(unprotected.Signatures) > 0 {
return nil, errors.New(
"JWS \"signatures\" field not allowed. Only the \"signature\" field should contain a signature")
}
parsedJWS, err := jose.ParseSigned(body)
if err != nil {
return nil, errors.New("Parse error reading JWS")
}
if len(parsedJWS.Signatures) > 1 {
return nil, errors.New("Too many signatures in POST body")
}
if len(parsedJWS.Signatures) == 0 {
return nil, errors.New("POST JWS not signed")
}
return parsedJWS, nil
}
// jwsAuthType represents whether a given POST request is authenticated using
// a JWS with an embedded JWK (new-account, possibly revoke-cert) or an
// embeded Key ID or an unsupported/unknown auth type.
type jwsAuthType int
const (
embeddedJWK jwsAuthType = iota
embeddedKeyID
invalidAuthType
)
// checkJWSAuthType examines a JWS' protected headers to determine if
// the request being authenticated by the JWS is identified using an embedded
// JWK or an embedded key ID. If no signatures are present, or mutually
// exclusive authentication types are specified at the same time, a problem is
// returned.
func checkJWSAuthType(jws *jose.JSONWebSignature) (jwsAuthType, *acme.ProblemDetails) {
// checkJWSAuthType is called after parseJWS() which defends against the
// incorrect number of signatures.
header := jws.Signatures[0].Header
// There must not be a Key ID *and* an embedded JWK
if header.KeyID != "" && header.JSONWebKey != nil {
return invalidAuthType, acme.MalformedProblem("jwk and kid header fields are mutually exclusive")
} else if header.KeyID != "" {
return embeddedKeyID, nil
} else if header.JSONWebKey != nil {
return embeddedJWK, nil
}
return invalidAuthType, nil
}
// extractJWK returns a JSONWebKey embedded in a JWS header.
func (wfe *WebFrontEndImpl) extractJWK(_ *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
key := header.JSONWebKey
if key == nil {
return nil, acme.MalformedProblem("No JWK in JWS header")
}
if !key.Valid() {
return nil, acme.MalformedProblem("Invalid JWK in JWS header")
}
if header.KeyID != "" {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return key, nil
}
// lookupJWK returns a JSONWebKey referenced by the "kid" (key id) field in a JWS header.
func (wfe *WebFrontEndImpl) lookupJWK(request *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
accountURL := header.KeyID
prefix := wfe.relativeEndpoint(request, acctPath)
if !strings.HasPrefix(accountURL, prefix) {
return nil, acme.MalformedProblem("Key ID (kid) in JWS header missing expected URL prefix")
}
accountID := strings.TrimPrefix(accountURL, prefix)
if accountID == "" {
return nil, acme.MalformedProblem("No key ID (kid) in JWS header")
}
account := wfe.db.GetAccountByID(accountID)
if account == nil {
return nil, acme.AccountDoesNotExistProblem(fmt.Sprintf(
"Account %s not found.", accountURL))
}
if header.JSONWebKey != nil {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return account.Key, nil
}
func (wfe *WebFrontEndImpl) validPOST(request *http.Request) *acme.ProblemDetails {
if wfe.strict {
// Section 6.2 says to reject JWS requests without the expected Content-Type
// using a status code of http.UnsupportedMediaType
if _, present := request.Header["Content-Type"]; !present {
return acme.UnsupportedMediaTypeProblem(
`missing Content-Type header on POST. ` +
`Content-Type must be "application/jose+json"`)
}
if contentType := request.Header.Get("Content-Type"); contentType != expectedJWSContentType {
return acme.UnsupportedMediaTypeProblem(
`Invalid Content-Type header on POST. ` +
`Content-Type must be "application/jose+json"`)
}
}
if _, present := request.Header["Content-Length"]; !present {
return acme.MalformedProblem("missing Content-Length header on POST")
}
// Per 6.4.1 "Replay-Nonce" clients should not send a Replay-Nonce header in
// the HTTP request, it needs to be part of the signed JWS request body
if _, present := request.Header["Replay-Nonce"]; present {
return acme.MalformedProblem("HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field")
}
return nil
}
// keyExtractor is a function that returns a JSONWebKey based on input from a
// user-provided JSONWebSignature, for instance by extracting it from the input,
// or by looking it up in a database based on the input.
type keyExtractor func(*http.Request, *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails)
// NOTE: Unlike `verifyPOST` from the Boulder WFE this version does not
// presently handle the `regCheck` parameter or do any lookups for existing
// accounts.
func (wfe *WebFrontEndImpl) verifyPOST(
ctx context.Context,
logEvent *requestEvent,
request *http.Request,
kx keyExtractor) ([]byte, string, *jose.JSONWebKey, *acme.ProblemDetails) {
if prob := wfe.validPOST(request); prob != nil {
return nil, "", nil, prob
}
if request.Body == nil {
return nil, "", nil, acme.MalformedProblem("no body on POST")
}
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return nil, "", nil, acme.InternalErrorProblem("unable to read request body")
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
return nil, "", nil, acme.MalformedProblem(err.Error())
}
pubKey, prob := kx(request, parsedJWS)
if prob != nil {
return nil, "", nil, prob
}
return wfe.verifyJWS(pubKey, parsedJWS, request)
}
// Checks parsed JWS whether it matches the given public key
// and checks whether the algorithm used is acceptable
// (the latter is still to be implemented).
func (wfe *WebFrontEndImpl) verifyJWSSignatureAndAlgorithm(
pubKey *jose.JSONWebKey,
parsedJWS *jose.JSONWebSignature) ([]byte, error) {
// TODO(@cpu): `checkAlgorithm()`
payload, err := parsedJWS.Verify(pubKey)
if err != nil {
return nil, err
}
return []byte(payload), nil
}
// Extracts URL header parameter from parsed JWS.
// Second return value indicates whether header was found.
func (wfe *WebFrontEndImpl) extractJWSURL(
parsedJWS *jose.JSONWebSignature) (string, bool) {
headerURL, ok := parsedJWS.Signatures[0].Header.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(headerURL) == 0 {
return "", false
}
return headerURL, true
}
func (wfe *WebFrontEndImpl) verifyJWS(
pubKey *jose.JSONWebKey,
parsedJWS *jose.JSONWebSignature,
request *http.Request) ([]byte, string, *jose.JSONWebKey, *acme.ProblemDetails) {
payload, err := wfe.verifyJWSSignatureAndAlgorithm(pubKey, parsedJWS)
if err != nil {
return nil, "", nil, acme.MalformedProblem("JWS verification error")
}
headerURL, ok := wfe.extractJWSURL(parsedJWS)
if !ok {
return nil, "", nil, acme.MalformedProblem("JWS header parameter 'url' required.")
}
nonce := parsedJWS.Signatures[0].Header.Nonce
if len(nonce) == 0 {
return nil, "", nil, acme.BadNonceProblem("JWS has no anti-replay nonce")
}
// Roll a random number between 0 and 100.
nonceRoll := rand.Intn(100)
// If the nonce is not valid OR if the nonceRoll was less than the
// nonceErrPercent, fail with an error
if !wfe.nonce.validNonce(nonce) || nonceRoll < wfe.nonceErrPercent {
return nil, "", nil, acme.BadNonceProblem(fmt.Sprintf(
"JWS has an invalid anti-replay nonce: %s", nonce))
}
expectedURL := url.URL{
// NOTE(@cpu): ACME **REQUIRES** HTTPS and Pebble is hardcoded to offer the
// API over HTTPS.
Scheme: "https",
Host: request.Host,
Path: request.RequestURI,
}
if expectedURL.String() != headerURL {
return nil, "", nil, acme.MalformedProblem(fmt.Sprintf(
"JWS header parameter 'url' incorrect. Expected %q, got %q",
expectedURL.String(), headerURL))
}
return payload, headerURL, pubKey, nil
}
// isASCII determines if every character in a string is encoded in
// the ASCII character set.
func isASCII(str string) bool {
for _, r := range str {
if r > unicode.MaxASCII {
return false
}
}
return true
}
func (wfe *WebFrontEndImpl) verifyContacts(acct acme.Account) *acme.ProblemDetails {
contacts := acct.Contact
// Providing no Contacts is perfectly acceptable
if contacts == nil || len(contacts) == 0 {
return nil
}
if len(contacts) > maxContactsPerAcct {
return acme.MalformedProblem(fmt.Sprintf(
"too many contacts provided: %d > %d", len(contacts), maxContactsPerAcct))
}
for _, c := range contacts {
parsed, err := url.Parse(c)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf("contact %q is invalid", c))
}
if parsed.Scheme != "mailto" {
return acme.UnsupportedContactProblem(fmt.Sprintf(
"contact method %q is not supported", parsed.Scheme))
}
email := parsed.Opaque
// An empty or omitted Contact array should be used instead of an empty contact
if email == "" {
return acme.InvalidContactProblem("empty contact email")
}
if !isASCII(email) {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q contains non-ASCII characters", email))
}
// NOTE(@cpu): ParseAddress may allow invalid emails since it supports RFC 5322
// display names. This is sufficient for Pebble because we don't intend to
// use the emails for anything and check this as a best effort for client
// developers to test invalid contact problems.
_, err = mail.ParseAddress(email)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q is invalid", email))
}
}
return nil
}
func (wfe *WebFrontEndImpl) UpdateAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// updateAcctReq is the ACME account information submitted by the client
var updateAcctReq struct {
Contact []string `json:"contact"`
Status string `json:"status,omitempty"`
}
err := json.Unmarshal(body, &updateAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling account update JSON body"), response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// if this update contains no contacts or deactivated status,
// simply return the existing account and return early.
if updateAcctReq.Contact == nil && updateAcctReq.Status != acme.StatusDeactivated {
err = wfe.writeJsonResponse(response, http.StatusOK, existingAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
return
}
// Create a new account object with the existing data
newAcct := &core.Account{
Account: acme.Account{
Contact: existingAcct.Contact,
Status: existingAcct.Status,
Orders: existingAcct.Orders,
},
Key: existingAcct.Key,
ID: existingAcct.ID,
}
switch {
case updateAcctReq.Status == acme.StatusDeactivated:
newAcct.Status = updateAcctReq.Status
case updateAcctReq.Status != "" && updateAcctReq.Status != newAcct.Status:
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf(
"Invalid account status: %q", updateAcctReq.Status)), response)
return
case updateAcctReq.Contact != nil:
newAcct.Contact = updateAcctReq.Contact
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
}
err = wfe.db.UpdateAccountByID(existingAcct.ID, newAcct)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error storing updated account"), response)
return
}
err = wfe.writeJsonResponse(response, http.StatusOK, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
func (wfe *WebFrontEndImpl) verifyKeyRollover(
innerPayload []byte,
existingAcct *core.Account,
newKey *jose.JSONWebKey,
request *http.Request) *acme.ProblemDetails {
var innerContent struct {
Account string
OldKey jose.JSONWebKey
}
err := json.Unmarshal(innerPayload, &innerContent)
if err != nil {
return acme.MalformedProblem("Error unmarshaling key roll-over inner JWS body")
}
// Check account ID
prefix := wfe.relativeEndpoint(request, acctPath)
if !strings.HasPrefix(innerContent.Account, prefix) {
return acme.MalformedProblem(fmt.Sprintf("Key ID (account) in inner JWS body missing expected URL prefix (provided account value: %q)", innerContent.Account))
}
accountID := strings.TrimPrefix(innerContent.Account, prefix)
if accountID == "" {
return acme.MalformedProblem(fmt.Sprintf("No key ID (account) in inner JWS body (provided account value: %q)", innerContent.Account))
}
if accountID != existingAcct.ID {
return acme.MalformedProblem(fmt.Sprintf("Key roll-over inner JWS body contains wrong account ID (provided account value: %q)", innerContent.Account))
}
// Verify inner key
if !keyDigestEquals(innerContent.OldKey, *existingAcct.Key) {
return acme.MalformedProblem("Key roll-over inner JWS body JSON contains wrong old key")
}
// Check for same key
if keyDigestEquals(innerContent.OldKey, newKey) {
return acme.MalformedProblem("New and old key are identical")
}
return nil
}
func (wfe *WebFrontEndImpl) KeyRollover(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Extract and parse outer JWS, and retrieve account
body, outerHeaderURL, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Extract inner JWS
parsedInnerJWS, err := wfe.parseJWS(string(body))
if err != nil {
wfe.sendError(acme.MalformedProblem(err.Error()), response)
return
}
newPubKey, prob := wfe.extractJWK(request, parsedInnerJWS)
if prob != nil {
wfe.sendError(prob, response)
return
}
innerPayload, err := wfe.verifyJWSSignatureAndAlgorithm(newPubKey, parsedInnerJWS)
if err != nil {
wfe.sendError(acme.MalformedProblem("Inner JWS verification error"), response)
return
}
innerHeaderURL, ok := wfe.extractJWSURL(parsedInnerJWS)
if !ok {
wfe.sendError(acme.MalformedProblem("Inner JWS header parameter 'url' required."), response)
return
}
if innerHeaderURL != outerHeaderURL {
wfe.sendError(acme.MalformedProblem("JWS header parameter 'url' differs for inner and outer JWS."), response)
return
}
prob = wfe.verifyKeyRollover(innerPayload, existingAcct, newPubKey, request)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Ok, now change account key
err = wfe.db.ChangeAccountKey(existingAcct, newPubKey)
if err != nil {
if existingAccountError, ok := err.(*db.ExistingAccountError); ok {
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAccountError.MatchingAccount.ID))
response.Header().Set("Location", acctURL)
response.WriteHeader(http.StatusConflict)
} else {
wfe.sendError(acme.InternalErrorProblem(fmt.Sprintf("Error rolling over account key (%s)", err.Error())), response)
}
return
}
response.WriteHeader(http.StatusOK)
}
func (wfe *WebFrontEndImpl) NewAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// We use extractJWK rather than lookupJWK here because the account is not yet
// created, so the user provides the full key in a JWS header rather than
// referring to an existing key.
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.extractJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// newAcctReq is the ACME account information submitted by the client
var newAcctReq struct {
Contact []string `json:"contact"`
ToSAgreed bool `json:"termsOfServiceAgreed"`
OnlyReturnExisting bool `json:"onlyReturnExisting"`
}
err := json.Unmarshal(body, &newAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
// Lookup existing account to exit early if it exists
existingAcct, _ := wfe.db.GetAccountByKey(key)
if existingAcct != nil {
// If there is an existing account then return a Location header pointing to
// the account and a 200 OK response
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAcct.ID))
response.Header().Set("Location", acctURL)
_ = wfe.writeJsonResponse(response, http.StatusOK, existingAcct)
return
} else if existingAcct == nil && newAcctReq.OnlyReturnExisting {
// If there *isn't* an existing account and the created account request
// contained OnlyReturnExisting then this is an error - return now before
// creating a new account with the key
wfe.sendError(acme.AccountDoesNotExistProblem(
"unable to find existing account for only-return-existing request"), response)
return
}
if newAcctReq.ToSAgreed == false {
response.Header().Add("Link", link(ToSURL, "terms-of-service"))
wfe.sendError(
acme.AgreementRequiredProblem(
"Provided account did not agree to the terms of service"),
response)
return
}
// Create a new account object with the provided contact
newAcct := core.Account{
Account: acme.Account{
Contact: newAcctReq.Contact,
// New accounts are valid to start.
Status: acme.StatusValid,
},
Key: key,
}
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
count, err := wfe.db.AddAccount(&newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error saving account"), response)
return
}
wfe.log.Printf("There are now %d accounts in memory\n", count)
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, newAcct.ID))
response.Header().Add("Location", acctURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
// isDNSCharacter is ported from Boulder's `policy/pa.go` implementation.
func isDNSCharacter(ch byte) bool {
return ('a' <= ch && ch <= 'z') ||
('A' <= ch && ch <= 'Z') ||
('0' <= ch && ch <= '9') ||
ch == '.' || ch == '-' || ch == '*'
}
/* TODO(@cpu): Pebble's validation of domain names is still pretty weak
* compared to Boulder. We should consider adding:
* 1) Checks for the # of labels, and the size of each label
* 2) Checks against the Public Suffix List
* 3) Checks against a configured domain blocklist
* 4) Checks for malformed IDN, RLDH, etc
*/
// verifyOrder checks that a new order is considered well formed. Light
// validation is done on the order identifiers.
func (wfe *WebFrontEndImpl) verifyOrder(order *core.Order) *acme.ProblemDetails {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Shouldn't happen - defensive check
if order == nil {
return acme.InternalErrorProblem("Order is nil")
}
idents := order.Identifiers
if len(idents) == 0 {
return acme.MalformedProblem("Order did not specify any identifiers")
}
// Check that all of the identifiers in the new-order are DNS type
for _, ident := range idents {
if ident.Type != acme.IdentifierDNS {
return acme.MalformedProblem(fmt.Sprintf(
"Order included non-DNS type identifier: type %q, value %q",
ident.Type, ident.Value))
}
rawDomain := ident.Value
if rawDomain == "" {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier with empty value"))
}
for _, ch := range []byte(rawDomain) {
if !isDNSCharacter(ch) {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier with a value containing an illegal character: %q",
ch))
}
}
if len(rawDomain) > maxDNSIdentifierLength {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier that was longer than %d characters",
maxDNSIdentifierLength))
}
if ip := net.ParseIP(rawDomain); ip != nil {
return acme.MalformedProblem(fmt.Sprintf(
"Order included a DNS identifier with an IP address value: %q\n",
rawDomain))
}
if strings.HasSuffix(rawDomain, ".") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included a DNS identifier with a value ending in a period: %q\n",
rawDomain))
}
// If there is a wildcard character in the ident value there should be only
// *one* instance
if strings.Count(rawDomain, "*") > 1 {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"too many wildcards %q",
rawDomain))
} else if strings.Count(rawDomain, "*") == 1 {
// If there is one wildcard character it should be the only character in
// the leftmost label.
if !strings.HasPrefix(rawDomain, "*.") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"wildcard isn't leftmost prefix %q",
rawDomain))
}
}
}
return nil
}
// makeAuthorizations populates an order with new authz's. The request parameter
// is required to make the authz URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeAuthorizations(order *core.Order, request *http.Request) error {
var auths []string
var authObs []*core.Authorization
// Lock the order for reading
order.RLock()
// Create one authz for each name in the order's parsed CSR
for _, name := range order.Names {
now := wfe.clk.Now().UTC()
expires := now.Add(pendingAuthzExpire)
ident := acme.Identifier{
Type: acme.IdentifierDNS,
Value: name,
}
authz := &core.Authorization{
ID: newToken(),
ExpiresDate: expires,
Order: order,
Authorization: acme.Authorization{
Status: acme.StatusPending,
Identifier: ident,
Expires: expires.UTC().Format(time.RFC3339),
},
}
authz.URL = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
// Create the challenges for this authz
err := wfe.makeChallenges(authz, request)
if err != nil {
return err
}
// Save the authorization in memory
count, err := wfe.db.AddAuthorization(authz)
if err != nil {
return err
}
wfe.log.Printf("There are now %d authorizations in the db\n", count)
authzURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
auths = append(auths, authzURL)
authObs = append(authObs, authz)
}
// Unlock the order from reading
order.RUnlock()
// Lock the order for writing & update the order's authorizations
order.Lock()
order.Authorizations = auths
order.AuthorizationObjects = authObs
order.Unlock()
return nil
}
func (wfe *WebFrontEndImpl) makeChallenge(
chalType string,
authz *core.Authorization,
request *http.Request) (*core.Challenge, error) {
// Create a new challenge of the requested type
id := newToken()
chal := &core.Challenge{
ID: id,
Challenge: acme.Challenge{
Type: chalType,
Token: newToken(),
URL: wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", challengePath, id)),
Status: acme.StatusPending,
},
Authz: authz,
}
// Add it to the in-memory database
_, err := wfe.db.AddChallenge(chal)
if err != nil {
return nil, err
}
return chal, nil
}
// makeChallenges populates an authz with new challenges. The request parameter
// is required to make the challenge URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeChallenges(authz *core.Authorization, request *http.Request) error {
var chals []*core.Challenge
// Authorizations for a wildcard identifier only get a DNS-01 challenges to
// match Boulder/Let's Encrypt wildcard issuance policy
if strings.HasPrefix(authz.Identifier.Value, "*.") {
chal, err := wfe.makeChallenge(acme.ChallengeDNS01, authz, request)
if err != nil {
return err
}
chals = []*core.Challenge{chal}
} else {
// Non-wildcard authorizations get all of the enabled challenge types
enabledChallenges := []string{acme.ChallengeHTTP01, acme.ChallengeTLSALPN01, acme.ChallengeDNS01}
for _, chalType := range enabledChallenges {
chal, err := wfe.makeChallenge(chalType, authz, request)
if err != nil {
return err
}
chals = append(chals, chal)
}
}
// Lock the authorization for writing to update the challenges
authz.Lock()
authz.Challenges = nil
for _, c := range chals {
authz.Challenges = append(authz.Challenges, &c.Challenge)
}
authz.Unlock()
return nil
}
// NewOrder creates a new Order request and populates its authorizations
func (wfe *WebFrontEndImpl) NewOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingReg, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Unpack the order request body
var newOrder acme.Order
err := json.Unmarshal(body, &newOrder)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON: "+err.Error()), response)
return
}
expires := time.Now().AddDate(0, 0, 1)
order := &core.Order{
ID: newToken(),
AccountID: existingReg.ID,
Order: acme.Order{
Status: acme.StatusPending,
Expires: expires.UTC().Format(time.RFC3339),
// Only the Identifiers, NotBefore and NotAfter from the submitted order
// are carried forward
Identifiers: newOrder.Identifiers,
NotBefore: newOrder.NotBefore,
NotAfter: newOrder.NotAfter,
},
ExpiresDate: expires,
}
// Verify the details of the order before creating authorizations
if err := wfe.verifyOrder(order); err != nil {
wfe.sendError(err, response)
return
}
// Collect all of the DNS identifier values up into a []string
var orderNames []string
for _, ident := range order.Identifiers {
orderNames = append(orderNames, ident.Value)
}
// Store the unique lower version of the names on the order object
order.Names = uniqueLowerNames(orderNames)
// Create the authorizations for the order
err = wfe.makeAuthorizations(order, request)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error creating authorizations for order"), response)
return
}
// Add the order to the in-memory DB
count, err := wfe.db.AddOrder(order)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error saving order"), response)
return
}
wfe.log.Printf("Added order %q to the db\n", order.ID)
wfe.log.Printf("There are now %d orders in the db\n", count)
// Get the stored order back from the DB. The memorystore will set the order's
// status for us.
storedOrder := wfe.db.GetOrderByID(order.ID)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, storedOrder.ID))
response.Header().Add("Location", orderURL)
orderResp := wfe.orderForDisplay(storedOrder, request)
err = wfe.writeJsonResponse(response, http.StatusCreated, orderResp)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// orderForDisplay preps a *core.Order for display by populating some fields
// based on the http.request provided and returning a *acme.Order ready to be
// rendered to JSON for display to an API client.
func (wfe *WebFrontEndImpl) orderForDisplay(
order *core.Order,
request *http.Request) acme.Order {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Copy the initial OrderRequest from the internal order object to mutate and
// use as the result.
result := order.Order
// Randomize the order of the order authorization URLs as well as the order's
// identifiers. ACME draft Section 7.4 "Applying for Certificate Issuance"
// says:
// Clients SHOULD NOT make any assumptions about the sort order of
// "identifiers" or "authorizations" elements in the returned order
// object.
rand.Shuffle(len(result.Authorizations), func(i, j int) {
result.Authorizations[i], result.Authorizations[j] = result.Authorizations[j], result.Authorizations[i]
})
rand.Shuffle(len(result.Identifiers), func(i, j int) {
result.Identifiers[i], result.Identifiers[j] = result.Identifiers[j], result.Identifiers[i]
})
// Populate a finalization URL for this order
result.Finalize = wfe.relativeEndpoint(request,
fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
// If the order has a cert ID then set the certificate URL by constructing
// a relative path based on the HTTP request & the cert ID
if order.CertificateObject != nil {
result.Certificate = wfe.relativeEndpoint(
request,
certPath+order.CertificateObject.ID)
}
return result
}
// Order retrieves the details of an existing order
func (wfe *WebFrontEndImpl) Order(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
orderID := strings.TrimPrefix(request.URL.Path, orderPath)
order := wfe.db.GetOrderByID(orderID)
if order == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(order, request)
err := wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) FinalizeOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Verify the POST request
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the account corresponding to the key that authenticated the POST request
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the order specified by the order ID
orderID := strings.TrimPrefix(request.URL.Path, orderFinalizePath)
existingOrder := wfe.db.GetOrderByID(orderID)
if existingOrder == nil {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// Lock the order for reading the properties we need to check
existingOrder.RLock()
orderAccountID := existingOrder.AccountID
orderStatus := existingOrder.Status
orderExpires := existingOrder.ExpiresDate
orderNames := existingOrder.Names
// And then immediately unlock it again - we don't defer() here because
// `maybeIssue` will also acquire a read lock and we call that before
// returning
existingOrder.RUnlock()
// If the order doesn't belong to the account that authenticted the POST
// request then pretend it doesn't exist.
if orderAccountID != existingAcct.ID {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// The existing order must be in a ready status to finalize it
if orderStatus != acme.StatusReady {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Order's status (%q) was not %s", orderStatus, acme.StatusReady)), response)
return
}
// The existing order must not be expired
if orderExpires.Before(wfe.clk.Now()) {
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"Order %q expired %s", orderID, orderExpires)), response)
return
}
// The finalize POST body is expected to be the bytes from a base64 raw url
// encoded CSR
var finalizeMessage struct {
CSR string
}
err := json.Unmarshal(body, &finalizeMessage)
if err != nil {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Error unmarshaling finalize order request body: %s", err.Error())), response)
return
}
csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeMessage.CSR)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error decoding Base64url-encoded CSR: "+err.Error()), response)
return
}
parsedCSR, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error parsing Base64url-encoded CSR: "+err.Error()), response)
return
}
// Check that the CSR has the same number of names as the initial order contained
csrNames := uniqueLowerNames(parsedCSR.DNSNames)
if len(csrNames) != len(orderNames) {
wfe.sendError(acme.UnauthorizedProblem(
"Order includes different number of names than CSR specifies"), response)
return
}
// Check that the CSR's names match the order names exactly
for i, name := range orderNames {
if name != csrNames[i] {
wfe.sendError(acme.UnauthorizedProblem(
fmt.Sprintf("CSR is missing Order domain %q", name)), response)
return
}
}
// Lock and update the order with the parsed CSR and the began processing
// state.
existingOrder.Lock()
existingOrder.ParsedCSR = parsedCSR
existingOrder.BeganProcessing = true
existingOrder.Unlock()
// Ask the CA to complete the order in a separate goroutine.
wfe.log.Printf("Order %s is fully authorized. Processing finalization", orderID)
go wfe.ca.CompleteOrder(existingOrder)
// Set the existingOrder to processing before displaying to the user
existingOrder.Status = acme.StatusProcessing
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(existingOrder, request)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, existingOrder.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// prepAuthorizationForDisplay prepares the provided acme.Authorization for
// display to an ACME client.
func prepAuthorizationForDisplay(authz acme.Authorization) acme.Authorization {
// Copy the authz to mutate and return
result := authz
identVal := result.Identifier.Value
// If the authorization identifier has a wildcard in the value, remove it and
// set the Wildcard field to true
if strings.HasPrefix(identVal, "*.") {
result.Identifier.Value = strings.TrimPrefix(identVal, "*.")
result.Wildcard = true
}
// If the authz isn't pending then we need to filter the challenges displayed
// to only those that were used to make the authz valid || invalid.
if result.Status != acme.StatusPending {
var chals []*acme.Challenge
// Scan each of the authz's challenges
for _, c := range result.Challenges {
// Include any that have an associated error, or that are status valid
if c.Error != nil || c.Status == acme.StatusValid {
chals = append(chals, c)
}
}
// Replace the authz's challenges with the filtered set
result.Challenges = chals
}
// Randomize the order of the challenges in the returned authorization.
// Clients should not make any assumptions about the sort order.
rand.Shuffle(len(result.Challenges), func(i, j int) {
result.Challenges[i], result.Challenges[j] = result.Challenges[j], result.Challenges[i]
})
return result
}
func (wfe *WebFrontEndImpl) Authz(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
authzID := strings.TrimPrefix(request.URL.Path, authzPath)
authz := wfe.db.GetAuthorizationByID(authzID)
if authz == nil {
response.WriteHeader(http.StatusNotFound)
return
}
if request.Method == "POST" {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz.Order.AccountID != existingAcct.ID {
wfe.sendError(acme.UnauthorizedProblem(
"Account does not own authorization"), response)
return
}
var deactivateRequest struct {
Status string
}
err := json.Unmarshal(body, &deactivateRequest)
if err != nil {
wfe.sendError(acme.MalformedProblem(
fmt.Sprintf("Malformed authorization update: %s",
err.Error())), response)
return
}
if deactivateRequest.Status != "deactivated" {
wfe.sendError(acme.MalformedProblem(
fmt.Sprintf("Malformed authorization update, status must be \"deactivated\" not %q",
deactivateRequest.Status)), response)
return
}
authz.Status = acme.StatusDeactivated
}
err := wfe.writeJsonResponse(
response,
http.StatusOK,
prepAuthorizationForDisplay(authz.Authorization))
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling authz"), response)
return
}
}
func (wfe *WebFrontEndImpl) Challenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
if request.Method == "POST" {
wfe.updateChallenge(ctx, logEvent, response, request)
return
}
wfe.getChallenge(ctx, logEvent, response, request)
}
func (wfe *WebFrontEndImpl) getChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
chal := wfe.db.GetChallengeByID(chalID)
if chal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Lock the challenge for reading in order to write the response
chal.RLock()
defer chal.RUnlock()
err := wfe.writeJsonResponse(response, http.StatusOK, chal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
// getAcctByKey finds a account by key or returns a problem pointer if an
// existing account can't be found or the key is invalid.
func (wfe *WebFrontEndImpl) getAcctByKey(key crypto.PublicKey) (*core.Account, *acme.ProblemDetails) {
// Find the existing account object for that key
existingAcct, err := wfe.db.GetAccountByKey(key)
if err != nil {
return nil, acme.AccountDoesNotExistProblem("Error while retrieving key ID from public key")
}
if existingAcct == nil {
return nil, acme.AccountDoesNotExistProblem(
"URL in JWS 'kid' field does not correspond to an account")
}
if existingAcct.Status == acme.StatusDeactivated {
return nil, acme.UnauthorizedProblem("Account has been deactivated")
}
return existingAcct, nil
}
func (wfe *WebFrontEndImpl) validateChallengeUpdate(
chal *core.Challenge,
acct *core.Account) (*core.Authorization, *acme.ProblemDetails) {
// Lock the challenge for reading to do validation
chal.RLock()
defer chal.RUnlock()
// Check that the existing challenge is Pending
if chal.Status != acme.StatusPending {
return nil, acme.MalformedProblem(
fmt.Sprintf("Cannot update challenge with status %s, only status %s",
chal.Status, acme.StatusPending))
}
return chal.Authz, nil
}
// validateAuthzForChallenge checks an authz is:
// 1) for a supported identifier type
// 2) not expired
// 3) associated to an order
// The associated order is returned when no problems are found to avoid needing
// another RLock() for the caller to get the order pointer later.
func (wfe *WebFrontEndImpl) validateAuthzForChallenge(authz *core.Authorization) (*core.Order, *acme.ProblemDetails) {
// Lock the authz for reading
authz.RLock()
defer authz.RUnlock()
ident := authz.Identifier
if ident.Type != acme.IdentifierDNS {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization identifier was type %s, only %s is supported",
ident.Type, acme.IdentifierDNS))
}
now := wfe.clk.Now()
if now.After(authz.ExpiresDate) {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization expired %s",
authz.ExpiresDate.Format(time.RFC3339)))
}
existingOrder := authz.Order
if existingOrder == nil {
return nil, acme.InternalErrorProblem("authz missing associated order")
}
return existingOrder, nil
}
func (wfe *WebFrontEndImpl) updateChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
var chalResp struct {
KeyAuthorization *string
}
err := json.Unmarshal(body, &chalResp)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
// Historically challenges were updated by POSTing a KeyAuthorization. This is
// unnecessary, the server can calculate this itself. We could ignore this if
// sent (and that's what Boulder will do) but for Pebble we'd like to offer
// a way to be more aggressive about pushing clients implementations in the
// right direction, so we treat this as a malformed request when running in
// strict mode.
if wfe.strict && chalResp.KeyAuthorization != nil {
wfe.sendError(
acme.MalformedProblem(
"Challenge response body contained legacy KeyAuthorzation field, "+
"POST body should be `{}`"), response)
return
}
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
existingChal := wfe.db.GetChallengeByID(chalID)
if existingChal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
authz, prob := wfe.validateChallengeUpdate(existingChal, existingAcct)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz == nil {
wfe.sendError(
acme.InternalErrorProblem("challenge missing associated authz"), response)
return
}
existingOrder, prob := wfe.validateAuthzForChallenge(authz)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Lock the order for reading to check the expiry date
existingOrder.RLock()
now := wfe.clk.Now()
if now.After(existingOrder.ExpiresDate) {
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf("order expired %s",
existingOrder.ExpiresDate.Format(time.RFC3339))), response)
return
}
existingOrder.RUnlock()
// Lock the authorization to get the identifier value
authz.RLock()
ident := authz.Identifier.Value
authz.RUnlock()
// If the identifier value is for a wildcard domain then strip the wildcard
// prefix before dispatching the validation to ensure the base domain is
// validated.
if strings.HasPrefix(ident, "*.") {
ident = strings.TrimPrefix(ident, "*.")
}
// Submit a validation job to the VA, this will be processed asynchronously
wfe.va.ValidateChallenge(ident, existingChal, existingAcct)
// Lock the challenge for reading in order to write the response
existingChal.RLock()
defer existingChal.RUnlock()
response.Header().Add("Link", link(existingChal.Authz.URL, "up"))
err = wfe.writeJsonResponse(response, http.StatusOK, existingChal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
func (wfe *WebFrontEndImpl) Certificate(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
serial := strings.TrimPrefix(request.URL.Path, certPath)
cert := wfe.db.GetCertificateByID(serial)
if cert == nil {
response.WriteHeader(http.StatusNotFound)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(cert.Chain())
}
func (wfe *WebFrontEndImpl) writeJsonResponse(response http.ResponseWriter, status int, v interface{}) error {
jsonReply, err := marshalIndent(v)
if err != nil {
return err // All callers are responsible for handling this error
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
response.WriteHeader(status)
// Don't worry about returning an error from Write() because the caller will
// never handle it.
_, _ = response.Write(jsonReply)
return nil
}
func addNoCacheHeader(response http.ResponseWriter) {
response.Header().Add("Cache-Control", "public, max-age=0, no-cache")
}
func marshalIndent(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
func link(url, relation string) string {
return fmt.Sprintf("<%s>;rel=\"%s\"", url, relation)
}
// uniqueLowerNames returns the set of all unique names in the input after all
// of them are lowercased. The returned names will be in their lowercased form
// and sorted alphabetically. See Boulder `core/util.go UniqueLowerNames`.
func uniqueLowerNames(names []string) []string {
nameMap := make(map[string]int, len(names))
for _, name := range names {
nameMap[strings.ToLower(name)] = 1
}
unique := make([]string, 0, len(nameMap))
for name := range nameMap {
unique = append(unique, name)
}
sort.Strings(unique)
return unique
}
// RevokeCert revokes an ACME certificate.
// It currently only implements one method of ACME revocation:
// Signing the revocation request by signing it with the certificate
// to be revoked's private key and embedding the certificate
// to be revoked's public key as a JWK in the JWS.
//
// Pebble's idea of certificate revocation is to forget the certificate exists.
// This method does not percolate to a CRL or an OCSP response.
func (wfe *WebFrontEndImpl) RevokeCert(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// The ACME specification handles the verification of revocation requests
// differently from other endpoints that always use one JWS authentication
// method. For this endpoint we need to accept a JWS with an embedded JWK, or
// a JWS with an embedded key ID, handling each case differently in terms of
// which certificates are authorized to be revoked by the requester
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("unable to read request body"), response)
return
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
wfe.sendError(
acme.MalformedProblem(err.Error()), response)
return
}
if prob := wfe.validPOST(request); prob != nil {
wfe.sendError(prob, response)
return
}
// Determine the authentication type for this request
authType, prob := checkJWSAuthType(parsedJWS)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Handle the revocation request according to how it is authenticated, or if
// the authentication type is unknown, error immediately
if authType == embeddedKeyID {
prob = wfe.revokeCertByKeyID(ctx, logEvent, parsedJWS, request)
} else if authType == embeddedJWK {
prob = wfe.revokeCertByJWK(ctx, logEvent, parsedJWS, request)
} else {
prob = acme.MalformedProblem("Malformed JWS, no KeyID or embedded JWK")
}
if prob != nil {
wfe.sendError(prob, response)
return
}
response.WriteHeader(http.StatusOK)
}
func (wfe *WebFrontEndImpl) revokeCertByKeyID(
ctx context.Context,
logEvent *requestEvent,
jws *jose.JSONWebSignature,
request *http.Request) *acme.ProblemDetails {
pubKey, prob := wfe.lookupJWK(request, jws)
if prob != nil {
return prob
}
body, _, key, prob := wfe.verifyJWS(pubKey, jws, request)
if prob != nil {
return prob
}
existingAcct, err := wfe.db.GetAccountByKey(key)
if err != nil {
return acme.MalformedProblem(fmt.Sprintf("Cannot obtain key ID from public key (%s)", err.Error()))
}
if existingAcct == nil {
return acme.UnauthorizedProblem("No account found corresponding to public key authenticating this request")
}
// An account is only authorized to revoke its own certificates presently.
// TODO(@cpu): Allow an account to revoke another account's certificate if
// the revoker account has valid authorizations for all of the names in the
// to-be-revoked certificate.
authorizedToRevoke := func(cert *core.Certificate) *acme.ProblemDetails {
if cert.AccountID == existingAcct.ID {
return nil
}
return acme.UnauthorizedProblem(
fmt.Sprintf(
"The certificate being revoked is not associated with account %q",
existingAcct.ID))
}
return wfe.processRevocation(ctx, body, authorizedToRevoke, request, logEvent)
}
func (wfe *WebFrontEndImpl) revokeCertByJWK(
ctx context.Context,
logEvent *requestEvent,
jws *jose.JSONWebSignature,
request *http.Request) *acme.ProblemDetails {
var requestKey *jose.JSONWebKey
pubKey, prob := wfe.extractJWK(request, jws)
if prob != nil {
return prob
}
body, _, key, prob := wfe.verifyJWS(pubKey, jws, request)
if prob != nil {
return prob
}
requestKey = key
// For embedded JWK revocations we decide if a requester is able to revoke a specific
// certificate by checking that to-be-revoked certificate has the same public
// key as the JWK that was used to authenticate the request
authorizedToRevoke := func(cert *core.Certificate) *acme.ProblemDetails {
if keyDigestEquals(requestKey, cert.Cert.PublicKey) {
return nil
}
return acme.UnauthorizedProblem(
"JWK embedded in revocation request must be the same public key as the cert to be revoked")
}
return wfe.processRevocation(ctx, body, authorizedToRevoke, request, logEvent)
}
// authorizedToRevokeCert is a callback function that can be used to validate if
// a given requester is authorized to revoke the certificate parsed out of the
// revocation request. If the requester is not authorized to revoke the
// certificate a problem is returned. It is expected to be a closure containing
// additional state (an account ID or key) that will be used to make the
// decision.
type authorizedToRevokeCert func(*core.Certificate) *acme.ProblemDetails
func (wfe *WebFrontEndImpl) processRevocation(
ctx context.Context,
jwsBody []byte,
authorizedToRevoke authorizedToRevokeCert,
request *http.Request,
logEvent *requestEvent) *acme.ProblemDetails {
// revokeCertReq is the ACME certificate information submitted by the client
var revokeCertReq struct {
Certificate string `json:"certificate"`
Reason *uint `json:"reason,omitempty"`
}
err := json.Unmarshal(jwsBody, &revokeCertReq)
if err != nil {
return acme.MalformedProblem("Error unmarshaling certificate revocation JSON body")
}
if revokeCertReq.Reason != nil {
r := *revokeCertReq.Reason
if r == unusedRevocationReason || r > aACompromiseRevocationReason {
return acme.BadRevocationReasonProblem(fmt.Sprintf("Invalid revocation reason: %d", r))
}
}
derBytes, err := base64.RawURLEncoding.DecodeString(revokeCertReq.Certificate)
if err != nil {
return acme.MalformedProblem("Error decoding Base64url-encoded DER: " + err.Error())
}
cert := wfe.db.GetCertificateByDER(derBytes)
if cert == nil {
return acme.MalformedProblem(
"Unable to find specified certificate. It may already be revoked")
}
if prob := authorizedToRevoke(cert); prob != nil {
return prob
}
wfe.db.RevokeCertificate(cert)
return nil
}
Fix typo. (#163)
package wfe
import (
"context"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"net/mail"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
"unicode"
"gopkg.in/square/go-jose.v2"
"github.com/jmhodges/clock"
"github.com/letsencrypt/pebble/acme"
"github.com/letsencrypt/pebble/ca"
"github.com/letsencrypt/pebble/core"
"github.com/letsencrypt/pebble/db"
"github.com/letsencrypt/pebble/va"
)
const (
// Note: We deliberately pick endpoint paths that differ from Boulder to
// exercise clients processing of the /directory response
directoryPath = "/dir"
noncePath = "/nonce-plz"
newAccountPath = "/sign-me-up"
acctPath = "/my-account/"
newOrderPath = "/order-plz"
orderPath = "/my-order/"
orderFinalizePath = "/finalize-order/"
authzPath = "/authZ/"
challengePath = "/chalZ/"
certPath = "/certZ/"
revokeCertPath = "/revoke-cert"
rootCertPath = "/root"
keyRolloverPath = "/rollover-account-key"
// How long do pending authorizations last before expiring?
pendingAuthzExpire = time.Hour
// How many contacts is an account allowed to have?
maxContactsPerAcct = 2
// badNonceEnvVar defines the environment variable name used to provide
// a percentage value for how often good nonces should be rejected as if they
// were bad. This can be used to exercise client nonce handling/retries.
// To have the WFE not reject any good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=0 pebble
// To have the WFE reject 15% of good nonces, run Pebble like:
// PEBBLE_WFE_NONCEREJECT=15 pebble
badNonceEnvVar = "PEBBLE_WFE_NONCEREJECT"
// By default when no PEBBLE_WFE_NONCEREJECT is set, what percentage of good
// nonces are rejected?
defaultNonceReject = 15
// POST requests with a JWS body must have the following Content-Type header
expectedJWSContentType = "application/jose+json"
// RFC 1034 says DNS labels have a max of 63 octets, and names have a max of 255
// octets: https://tools.ietf.org/html/rfc1035#page-10. Since two of those octets
// are taken up by the leading length byte and the trailing root period the actual
// max length becomes 253.
maxDNSIdentifierLength = 253
// Invalid revocation reason codes.
// The full list of codes can be found in Section 8.5.3.1 of ITU-T X.509
// http://www.itu.int/rec/T-REC-X.509-201210-I/en
unusedRevocationReason = 7
aACompromiseRevocationReason = 10
)
type requestEvent struct {
ClientAddr string `json:",omitempty"`
Endpoint string `json:",omitempty"`
Method string `json:",omitempty"`
UserAgent string `json:",omitempty"`
}
type wfeHandlerFunc func(context.Context, *requestEvent, http.ResponseWriter, *http.Request)
func (f wfeHandlerFunc) ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request) {
ctx := context.TODO()
f(ctx, e, w, r)
}
type wfeHandler interface {
ServeHTTP(e *requestEvent, w http.ResponseWriter, r *http.Request)
}
type topHandler struct {
wfe wfeHandler
}
func (th *topHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(@cpu): consider restoring X-Forwarded-For handling for ClientAddr
rEvent := &requestEvent{
ClientAddr: r.RemoteAddr,
Method: r.Method,
UserAgent: r.Header.Get("User-Agent"),
}
th.wfe.ServeHTTP(rEvent, w, r)
}
type WebFrontEndImpl struct {
log *log.Logger
db *db.MemoryStore
nonce *nonceMap
nonceErrPercent int
clk clock.Clock
va *va.VAImpl
ca *ca.CAImpl
strict bool
}
const ToSURL = "data:text/plain,Do%20what%20thou%20wilt"
func New(
log *log.Logger,
clk clock.Clock,
db *db.MemoryStore,
va *va.VAImpl,
ca *ca.CAImpl,
strict bool) WebFrontEndImpl {
// Read the % of good nonces that should be rejected as bad nonces from the
// environment
nonceErrPercentVal := os.Getenv(badNonceEnvVar)
var nonceErrPercent int
// Parse the env var value as a base 10 int - if there isn't an error, use it
// as the wfe nonceErrPercent
if val, err := strconv.ParseInt(nonceErrPercentVal, 10, 0); err == nil {
nonceErrPercent = int(val)
} else {
// Otherwise just use the default
nonceErrPercent = defaultNonceReject
}
// If the value is out of the range just clip it sensibly
if nonceErrPercent < 0 {
nonceErrPercent = 0
} else if nonceErrPercent > 100 {
nonceErrPercent = 99
}
log.Printf("Configured to reject %d%% of good nonces", nonceErrPercent)
return WebFrontEndImpl{
log: log,
db: db,
nonce: newNonceMap(),
nonceErrPercent: nonceErrPercent,
clk: clk,
va: va,
ca: ca,
strict: strict,
}
}
func (wfe *WebFrontEndImpl) HandleFunc(
mux *http.ServeMux,
pattern string,
handler wfeHandlerFunc,
methods ...string) {
methodsMap := make(map[string]bool)
for _, m := range methods {
methodsMap[m] = true
}
if methodsMap["GET"] && !methodsMap["HEAD"] {
// Allow HEAD for any resource that allows GET
methods = append(methods, "HEAD")
methodsMap["HEAD"] = true
}
methodsStr := strings.Join(methods, ", ")
defaultHandler := http.StripPrefix(pattern,
&topHandler{
wfe: wfeHandlerFunc(func(ctx context.Context, logEvent *requestEvent, response http.ResponseWriter, request *http.Request) {
response.Header().Set("Replay-Nonce", wfe.nonce.createNonce())
logEvent.Endpoint = pattern
if request.URL != nil {
logEvent.Endpoint = path.Join(logEvent.Endpoint, request.URL.Path)
}
addNoCacheHeader(response)
if !methodsMap[request.Method] {
response.Header().Set("Allow", methodsStr)
wfe.sendError(acme.MethodNotAllowed(), response)
return
}
wfe.log.Printf("%s %s -> calling handler()\n", request.Method, logEvent.Endpoint)
// TODO(@cpu): Configurable request timeout
timeout := 1 * time.Minute
ctx, cancel := context.WithTimeout(ctx, timeout)
handler(ctx, logEvent, response, request)
cancel()
},
)})
mux.Handle(pattern, defaultHandler)
}
func (wfe *WebFrontEndImpl) sendError(prob *acme.ProblemDetails, response http.ResponseWriter) {
problemDoc, err := marshalIndent(prob)
if err != nil {
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
}
response.Header().Set("Content-Type", "application/problem+json; charset=utf-8")
response.WriteHeader(prob.HTTPStatus)
response.Write(problemDoc)
}
func (wfe *WebFrontEndImpl) RootCert(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
root := wfe.ca.GetRootCert()
if root == nil {
response.WriteHeader(http.StatusServiceUnavailable)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(root.PEM())
}
func (wfe *WebFrontEndImpl) Handler() http.Handler {
m := http.NewServeMux()
wfe.HandleFunc(m, directoryPath, wfe.Directory, "GET")
// Note for noncePath: "GET" also implies "HEAD"
wfe.HandleFunc(m, noncePath, wfe.Nonce, "GET")
wfe.HandleFunc(m, newAccountPath, wfe.NewAccount, "POST")
wfe.HandleFunc(m, newOrderPath, wfe.NewOrder, "POST")
wfe.HandleFunc(m, orderPath, wfe.Order, "GET")
wfe.HandleFunc(m, orderFinalizePath, wfe.FinalizeOrder, "POST")
wfe.HandleFunc(m, authzPath, wfe.Authz, "GET", "POST")
wfe.HandleFunc(m, challengePath, wfe.Challenge, "GET", "POST")
wfe.HandleFunc(m, certPath, wfe.Certificate, "GET")
wfe.HandleFunc(m, acctPath, wfe.UpdateAccount, "POST")
wfe.HandleFunc(m, keyRolloverPath, wfe.KeyRollover, "POST")
wfe.HandleFunc(m, revokeCertPath, wfe.RevokeCert, "POST")
wfe.HandleFunc(m, rootCertPath, wfe.RootCert, "GET")
return m
}
func (wfe *WebFrontEndImpl) Directory(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
directoryEndpoints := map[string]string{
"newNonce": noncePath,
"newAccount": newAccountPath,
"newOrder": newOrderPath,
"revokeCert": revokeCertPath,
"keyChange": keyRolloverPath,
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
relDir, err := wfe.relativeDirectory(request, directoryEndpoints)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("unable to create directory"), response)
return
}
response.Write(relDir)
}
func (wfe *WebFrontEndImpl) relativeDirectory(request *http.Request, directory map[string]string) ([]byte, error) {
// Create an empty map sized equal to the provided directory to store the
// relative-ized result
relativeDir := make(map[string]interface{}, len(directory))
for k, v := range directory {
relativeDir[k] = wfe.relativeEndpoint(request, v)
}
relativeDir["meta"] = map[string]string{
"termsOfService": ToSURL,
}
directoryJSON, err := marshalIndent(relativeDir)
// This should never happen since we are just marshalling known strings
if err != nil {
return nil, err
}
return directoryJSON, nil
}
func (wfe *WebFrontEndImpl) relativeEndpoint(request *http.Request, endpoint string) string {
proto := "http"
host := request.Host
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
// Default to "localhost" when no request.Host is provided. Otherwise requests
// with an empty `Host` produce results like `http:///acme/new-authz`
if request.Host == "" {
host = "localhost"
}
resultUrl := url.URL{Scheme: proto, Host: host, Path: endpoint}
return resultUrl.String()
}
func (wfe *WebFrontEndImpl) Nonce(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
response.WriteHeader(http.StatusNoContent)
}
func (wfe *WebFrontEndImpl) parseJWS(body string) (*jose.JSONWebSignature, error) {
// Parse the raw JWS JSON to check that:
// * the unprotected Header field is not being used.
// * the "signatures" member isn't present, just "signature".
//
// This must be done prior to `jose.parseSigned` since it will strip away
// these headers.
var unprotected struct {
Header map[string]string
Signatures []interface{}
}
if err := json.Unmarshal([]byte(body), &unprotected); err != nil {
return nil, errors.New("Parse error reading JWS")
}
// ACME v2 never uses values from the unprotected JWS header. Reject JWS that
// include unprotected headers.
if unprotected.Header != nil {
return nil, errors.New(
"JWS \"header\" field not allowed. All headers must be in \"protected\" field")
}
// ACME v2 never uses the "signatures" array of JSON serialized JWS, just the
// mandatory "signature" field. Reject JWS that include the "signatures" array.
if len(unprotected.Signatures) > 0 {
return nil, errors.New(
"JWS \"signatures\" field not allowed. Only the \"signature\" field should contain a signature")
}
parsedJWS, err := jose.ParseSigned(body)
if err != nil {
return nil, errors.New("Parse error reading JWS")
}
if len(parsedJWS.Signatures) > 1 {
return nil, errors.New("Too many signatures in POST body")
}
if len(parsedJWS.Signatures) == 0 {
return nil, errors.New("POST JWS not signed")
}
return parsedJWS, nil
}
// jwsAuthType represents whether a given POST request is authenticated using
// a JWS with an embedded JWK (new-account, possibly revoke-cert) or an
// embeded Key ID or an unsupported/unknown auth type.
type jwsAuthType int
const (
embeddedJWK jwsAuthType = iota
embeddedKeyID
invalidAuthType
)
// checkJWSAuthType examines a JWS' protected headers to determine if
// the request being authenticated by the JWS is identified using an embedded
// JWK or an embedded key ID. If no signatures are present, or mutually
// exclusive authentication types are specified at the same time, a problem is
// returned.
func checkJWSAuthType(jws *jose.JSONWebSignature) (jwsAuthType, *acme.ProblemDetails) {
// checkJWSAuthType is called after parseJWS() which defends against the
// incorrect number of signatures.
header := jws.Signatures[0].Header
// There must not be a Key ID *and* an embedded JWK
if header.KeyID != "" && header.JSONWebKey != nil {
return invalidAuthType, acme.MalformedProblem("jwk and kid header fields are mutually exclusive")
} else if header.KeyID != "" {
return embeddedKeyID, nil
} else if header.JSONWebKey != nil {
return embeddedJWK, nil
}
return invalidAuthType, nil
}
// extractJWK returns a JSONWebKey embedded in a JWS header.
func (wfe *WebFrontEndImpl) extractJWK(_ *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
key := header.JSONWebKey
if key == nil {
return nil, acme.MalformedProblem("No JWK in JWS header")
}
if !key.Valid() {
return nil, acme.MalformedProblem("Invalid JWK in JWS header")
}
if header.KeyID != "" {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return key, nil
}
// lookupJWK returns a JSONWebKey referenced by the "kid" (key id) field in a JWS header.
func (wfe *WebFrontEndImpl) lookupJWK(request *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {
header := jws.Signatures[0].Header
accountURL := header.KeyID
prefix := wfe.relativeEndpoint(request, acctPath)
if !strings.HasPrefix(accountURL, prefix) {
return nil, acme.MalformedProblem("Key ID (kid) in JWS header missing expected URL prefix")
}
accountID := strings.TrimPrefix(accountURL, prefix)
if accountID == "" {
return nil, acme.MalformedProblem("No key ID (kid) in JWS header")
}
account := wfe.db.GetAccountByID(accountID)
if account == nil {
return nil, acme.AccountDoesNotExistProblem(fmt.Sprintf(
"Account %s not found.", accountURL))
}
if header.JSONWebKey != nil {
return nil, acme.MalformedProblem("jwk and kid header fields are mutually exclusive.")
}
return account.Key, nil
}
func (wfe *WebFrontEndImpl) validPOST(request *http.Request) *acme.ProblemDetails {
if wfe.strict {
// Section 6.2 says to reject JWS requests without the expected Content-Type
// using a status code of http.UnsupportedMediaType
if _, present := request.Header["Content-Type"]; !present {
return acme.UnsupportedMediaTypeProblem(
`missing Content-Type header on POST. ` +
`Content-Type must be "application/jose+json"`)
}
if contentType := request.Header.Get("Content-Type"); contentType != expectedJWSContentType {
return acme.UnsupportedMediaTypeProblem(
`Invalid Content-Type header on POST. ` +
`Content-Type must be "application/jose+json"`)
}
}
if _, present := request.Header["Content-Length"]; !present {
return acme.MalformedProblem("missing Content-Length header on POST")
}
// Per 6.4.1 "Replay-Nonce" clients should not send a Replay-Nonce header in
// the HTTP request, it needs to be part of the signed JWS request body
if _, present := request.Header["Replay-Nonce"]; present {
return acme.MalformedProblem("HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field")
}
return nil
}
// keyExtractor is a function that returns a JSONWebKey based on input from a
// user-provided JSONWebSignature, for instance by extracting it from the input,
// or by looking it up in a database based on the input.
type keyExtractor func(*http.Request, *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails)
// NOTE: Unlike `verifyPOST` from the Boulder WFE this version does not
// presently handle the `regCheck` parameter or do any lookups for existing
// accounts.
func (wfe *WebFrontEndImpl) verifyPOST(
ctx context.Context,
logEvent *requestEvent,
request *http.Request,
kx keyExtractor) ([]byte, string, *jose.JSONWebKey, *acme.ProblemDetails) {
if prob := wfe.validPOST(request); prob != nil {
return nil, "", nil, prob
}
if request.Body == nil {
return nil, "", nil, acme.MalformedProblem("no body on POST")
}
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return nil, "", nil, acme.InternalErrorProblem("unable to read request body")
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
return nil, "", nil, acme.MalformedProblem(err.Error())
}
pubKey, prob := kx(request, parsedJWS)
if prob != nil {
return nil, "", nil, prob
}
return wfe.verifyJWS(pubKey, parsedJWS, request)
}
// Checks parsed JWS whether it matches the given public key
// and checks whether the algorithm used is acceptable
// (the latter is still to be implemented).
func (wfe *WebFrontEndImpl) verifyJWSSignatureAndAlgorithm(
pubKey *jose.JSONWebKey,
parsedJWS *jose.JSONWebSignature) ([]byte, error) {
// TODO(@cpu): `checkAlgorithm()`
payload, err := parsedJWS.Verify(pubKey)
if err != nil {
return nil, err
}
return []byte(payload), nil
}
// Extracts URL header parameter from parsed JWS.
// Second return value indicates whether header was found.
func (wfe *WebFrontEndImpl) extractJWSURL(
parsedJWS *jose.JSONWebSignature) (string, bool) {
headerURL, ok := parsedJWS.Signatures[0].Header.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(headerURL) == 0 {
return "", false
}
return headerURL, true
}
func (wfe *WebFrontEndImpl) verifyJWS(
pubKey *jose.JSONWebKey,
parsedJWS *jose.JSONWebSignature,
request *http.Request) ([]byte, string, *jose.JSONWebKey, *acme.ProblemDetails) {
payload, err := wfe.verifyJWSSignatureAndAlgorithm(pubKey, parsedJWS)
if err != nil {
return nil, "", nil, acme.MalformedProblem("JWS verification error")
}
headerURL, ok := wfe.extractJWSURL(parsedJWS)
if !ok {
return nil, "", nil, acme.MalformedProblem("JWS header parameter 'url' required.")
}
nonce := parsedJWS.Signatures[0].Header.Nonce
if len(nonce) == 0 {
return nil, "", nil, acme.BadNonceProblem("JWS has no anti-replay nonce")
}
// Roll a random number between 0 and 100.
nonceRoll := rand.Intn(100)
// If the nonce is not valid OR if the nonceRoll was less than the
// nonceErrPercent, fail with an error
if !wfe.nonce.validNonce(nonce) || nonceRoll < wfe.nonceErrPercent {
return nil, "", nil, acme.BadNonceProblem(fmt.Sprintf(
"JWS has an invalid anti-replay nonce: %s", nonce))
}
expectedURL := url.URL{
// NOTE(@cpu): ACME **REQUIRES** HTTPS and Pebble is hardcoded to offer the
// API over HTTPS.
Scheme: "https",
Host: request.Host,
Path: request.RequestURI,
}
if expectedURL.String() != headerURL {
return nil, "", nil, acme.MalformedProblem(fmt.Sprintf(
"JWS header parameter 'url' incorrect. Expected %q, got %q",
expectedURL.String(), headerURL))
}
return payload, headerURL, pubKey, nil
}
// isASCII determines if every character in a string is encoded in
// the ASCII character set.
func isASCII(str string) bool {
for _, r := range str {
if r > unicode.MaxASCII {
return false
}
}
return true
}
func (wfe *WebFrontEndImpl) verifyContacts(acct acme.Account) *acme.ProblemDetails {
contacts := acct.Contact
// Providing no Contacts is perfectly acceptable
if contacts == nil || len(contacts) == 0 {
return nil
}
if len(contacts) > maxContactsPerAcct {
return acme.MalformedProblem(fmt.Sprintf(
"too many contacts provided: %d > %d", len(contacts), maxContactsPerAcct))
}
for _, c := range contacts {
parsed, err := url.Parse(c)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf("contact %q is invalid", c))
}
if parsed.Scheme != "mailto" {
return acme.UnsupportedContactProblem(fmt.Sprintf(
"contact method %q is not supported", parsed.Scheme))
}
email := parsed.Opaque
// An empty or omitted Contact array should be used instead of an empty contact
if email == "" {
return acme.InvalidContactProblem("empty contact email")
}
if !isASCII(email) {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q contains non-ASCII characters", email))
}
// NOTE(@cpu): ParseAddress may allow invalid emails since it supports RFC 5322
// display names. This is sufficient for Pebble because we don't intend to
// use the emails for anything and check this as a best effort for client
// developers to test invalid contact problems.
_, err = mail.ParseAddress(email)
if err != nil {
return acme.InvalidContactProblem(fmt.Sprintf(
"contact email %q is invalid", email))
}
}
return nil
}
func (wfe *WebFrontEndImpl) UpdateAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// updateAcctReq is the ACME account information submitted by the client
var updateAcctReq struct {
Contact []string `json:"contact"`
Status string `json:"status,omitempty"`
}
err := json.Unmarshal(body, &updateAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling account update JSON body"), response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// if this update contains no contacts or deactivated status,
// simply return the existing account and return early.
if updateAcctReq.Contact == nil && updateAcctReq.Status != acme.StatusDeactivated {
err = wfe.writeJsonResponse(response, http.StatusOK, existingAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
return
}
// Create a new account object with the existing data
newAcct := &core.Account{
Account: acme.Account{
Contact: existingAcct.Contact,
Status: existingAcct.Status,
Orders: existingAcct.Orders,
},
Key: existingAcct.Key,
ID: existingAcct.ID,
}
switch {
case updateAcctReq.Status == acme.StatusDeactivated:
newAcct.Status = updateAcctReq.Status
case updateAcctReq.Status != "" && updateAcctReq.Status != newAcct.Status:
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf(
"Invalid account status: %q", updateAcctReq.Status)), response)
return
case updateAcctReq.Contact != nil:
newAcct.Contact = updateAcctReq.Contact
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
}
err = wfe.db.UpdateAccountByID(existingAcct.ID, newAcct)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error storing updated account"), response)
return
}
err = wfe.writeJsonResponse(response, http.StatusOK, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
func (wfe *WebFrontEndImpl) verifyKeyRollover(
innerPayload []byte,
existingAcct *core.Account,
newKey *jose.JSONWebKey,
request *http.Request) *acme.ProblemDetails {
var innerContent struct {
Account string
OldKey jose.JSONWebKey
}
err := json.Unmarshal(innerPayload, &innerContent)
if err != nil {
return acme.MalformedProblem("Error unmarshaling key roll-over inner JWS body")
}
// Check account ID
prefix := wfe.relativeEndpoint(request, acctPath)
if !strings.HasPrefix(innerContent.Account, prefix) {
return acme.MalformedProblem(fmt.Sprintf("Key ID (account) in inner JWS body missing expected URL prefix (provided account value: %q)", innerContent.Account))
}
accountID := strings.TrimPrefix(innerContent.Account, prefix)
if accountID == "" {
return acme.MalformedProblem(fmt.Sprintf("No key ID (account) in inner JWS body (provided account value: %q)", innerContent.Account))
}
if accountID != existingAcct.ID {
return acme.MalformedProblem(fmt.Sprintf("Key roll-over inner JWS body contains wrong account ID (provided account value: %q)", innerContent.Account))
}
// Verify inner key
if !keyDigestEquals(innerContent.OldKey, *existingAcct.Key) {
return acme.MalformedProblem("Key roll-over inner JWS body JSON contains wrong old key")
}
// Check for same key
if keyDigestEquals(innerContent.OldKey, newKey) {
return acme.MalformedProblem("New and old key are identical")
}
return nil
}
func (wfe *WebFrontEndImpl) KeyRollover(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Extract and parse outer JWS, and retrieve account
body, outerHeaderURL, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Extract inner JWS
parsedInnerJWS, err := wfe.parseJWS(string(body))
if err != nil {
wfe.sendError(acme.MalformedProblem(err.Error()), response)
return
}
newPubKey, prob := wfe.extractJWK(request, parsedInnerJWS)
if prob != nil {
wfe.sendError(prob, response)
return
}
innerPayload, err := wfe.verifyJWSSignatureAndAlgorithm(newPubKey, parsedInnerJWS)
if err != nil {
wfe.sendError(acme.MalformedProblem("Inner JWS verification error"), response)
return
}
innerHeaderURL, ok := wfe.extractJWSURL(parsedInnerJWS)
if !ok {
wfe.sendError(acme.MalformedProblem("Inner JWS header parameter 'url' required."), response)
return
}
if innerHeaderURL != outerHeaderURL {
wfe.sendError(acme.MalformedProblem("JWS header parameter 'url' differs for inner and outer JWS."), response)
return
}
prob = wfe.verifyKeyRollover(innerPayload, existingAcct, newPubKey, request)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Ok, now change account key
err = wfe.db.ChangeAccountKey(existingAcct, newPubKey)
if err != nil {
if existingAccountError, ok := err.(*db.ExistingAccountError); ok {
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAccountError.MatchingAccount.ID))
response.Header().Set("Location", acctURL)
response.WriteHeader(http.StatusConflict)
} else {
wfe.sendError(acme.InternalErrorProblem(fmt.Sprintf("Error rolling over account key (%s)", err.Error())), response)
}
return
}
response.WriteHeader(http.StatusOK)
}
func (wfe *WebFrontEndImpl) NewAccount(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// We use extractJWK rather than lookupJWK here because the account is not yet
// created, so the user provides the full key in a JWS header rather than
// referring to an existing key.
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.extractJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// newAcctReq is the ACME account information submitted by the client
var newAcctReq struct {
Contact []string `json:"contact"`
ToSAgreed bool `json:"termsOfServiceAgreed"`
OnlyReturnExisting bool `json:"onlyReturnExisting"`
}
err := json.Unmarshal(body, &newAcctReq)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
// Lookup existing account to exit early if it exists
existingAcct, _ := wfe.db.GetAccountByKey(key)
if existingAcct != nil {
// If there is an existing account then return a Location header pointing to
// the account and a 200 OK response
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, existingAcct.ID))
response.Header().Set("Location", acctURL)
_ = wfe.writeJsonResponse(response, http.StatusOK, existingAcct)
return
} else if existingAcct == nil && newAcctReq.OnlyReturnExisting {
// If there *isn't* an existing account and the created account request
// contained OnlyReturnExisting then this is an error - return now before
// creating a new account with the key
wfe.sendError(acme.AccountDoesNotExistProblem(
"unable to find existing account for only-return-existing request"), response)
return
}
if newAcctReq.ToSAgreed == false {
response.Header().Add("Link", link(ToSURL, "terms-of-service"))
wfe.sendError(
acme.AgreementRequiredProblem(
"Provided account did not agree to the terms of service"),
response)
return
}
// Create a new account object with the provided contact
newAcct := core.Account{
Account: acme.Account{
Contact: newAcctReq.Contact,
// New accounts are valid to start.
Status: acme.StatusValid,
},
Key: key,
}
// Verify that the contact information provided is supported & valid
prob = wfe.verifyContacts(newAcct.Account)
if prob != nil {
wfe.sendError(prob, response)
return
}
count, err := wfe.db.AddAccount(&newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error saving account"), response)
return
}
wfe.log.Printf("There are now %d accounts in memory\n", count)
acctURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", acctPath, newAcct.ID))
response.Header().Add("Location", acctURL)
err = wfe.writeJsonResponse(response, http.StatusCreated, newAcct)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling account"), response)
return
}
}
// isDNSCharacter is ported from Boulder's `policy/pa.go` implementation.
func isDNSCharacter(ch byte) bool {
return ('a' <= ch && ch <= 'z') ||
('A' <= ch && ch <= 'Z') ||
('0' <= ch && ch <= '9') ||
ch == '.' || ch == '-' || ch == '*'
}
/* TODO(@cpu): Pebble's validation of domain names is still pretty weak
* compared to Boulder. We should consider adding:
* 1) Checks for the # of labels, and the size of each label
* 2) Checks against the Public Suffix List
* 3) Checks against a configured domain blocklist
* 4) Checks for malformed IDN, RLDH, etc
*/
// verifyOrder checks that a new order is considered well formed. Light
// validation is done on the order identifiers.
func (wfe *WebFrontEndImpl) verifyOrder(order *core.Order) *acme.ProblemDetails {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Shouldn't happen - defensive check
if order == nil {
return acme.InternalErrorProblem("Order is nil")
}
idents := order.Identifiers
if len(idents) == 0 {
return acme.MalformedProblem("Order did not specify any identifiers")
}
// Check that all of the identifiers in the new-order are DNS type
for _, ident := range idents {
if ident.Type != acme.IdentifierDNS {
return acme.MalformedProblem(fmt.Sprintf(
"Order included non-DNS type identifier: type %q, value %q",
ident.Type, ident.Value))
}
rawDomain := ident.Value
if rawDomain == "" {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier with empty value"))
}
for _, ch := range []byte(rawDomain) {
if !isDNSCharacter(ch) {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier with a value containing an illegal character: %q",
ch))
}
}
if len(rawDomain) > maxDNSIdentifierLength {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS identifier that was longer than %d characters",
maxDNSIdentifierLength))
}
if ip := net.ParseIP(rawDomain); ip != nil {
return acme.MalformedProblem(fmt.Sprintf(
"Order included a DNS identifier with an IP address value: %q\n",
rawDomain))
}
if strings.HasSuffix(rawDomain, ".") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included a DNS identifier with a value ending in a period: %q\n",
rawDomain))
}
// If there is a wildcard character in the ident value there should be only
// *one* instance
if strings.Count(rawDomain, "*") > 1 {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"too many wildcards %q",
rawDomain))
} else if strings.Count(rawDomain, "*") == 1 {
// If there is one wildcard character it should be the only character in
// the leftmost label.
if !strings.HasPrefix(rawDomain, "*.") {
return acme.MalformedProblem(fmt.Sprintf(
"Order included DNS type identifier with illegal wildcard value: "+
"wildcard isn't leftmost prefix %q",
rawDomain))
}
}
}
return nil
}
// makeAuthorizations populates an order with new authz's. The request parameter
// is required to make the authz URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeAuthorizations(order *core.Order, request *http.Request) error {
var auths []string
var authObs []*core.Authorization
// Lock the order for reading
order.RLock()
// Create one authz for each name in the order's parsed CSR
for _, name := range order.Names {
now := wfe.clk.Now().UTC()
expires := now.Add(pendingAuthzExpire)
ident := acme.Identifier{
Type: acme.IdentifierDNS,
Value: name,
}
authz := &core.Authorization{
ID: newToken(),
ExpiresDate: expires,
Order: order,
Authorization: acme.Authorization{
Status: acme.StatusPending,
Identifier: ident,
Expires: expires.UTC().Format(time.RFC3339),
},
}
authz.URL = wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
// Create the challenges for this authz
err := wfe.makeChallenges(authz, request)
if err != nil {
return err
}
// Save the authorization in memory
count, err := wfe.db.AddAuthorization(authz)
if err != nil {
return err
}
wfe.log.Printf("There are now %d authorizations in the db\n", count)
authzURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", authzPath, authz.ID))
auths = append(auths, authzURL)
authObs = append(authObs, authz)
}
// Unlock the order from reading
order.RUnlock()
// Lock the order for writing & update the order's authorizations
order.Lock()
order.Authorizations = auths
order.AuthorizationObjects = authObs
order.Unlock()
return nil
}
func (wfe *WebFrontEndImpl) makeChallenge(
chalType string,
authz *core.Authorization,
request *http.Request) (*core.Challenge, error) {
// Create a new challenge of the requested type
id := newToken()
chal := &core.Challenge{
ID: id,
Challenge: acme.Challenge{
Type: chalType,
Token: newToken(),
URL: wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", challengePath, id)),
Status: acme.StatusPending,
},
Authz: authz,
}
// Add it to the in-memory database
_, err := wfe.db.AddChallenge(chal)
if err != nil {
return nil, err
}
return chal, nil
}
// makeChallenges populates an authz with new challenges. The request parameter
// is required to make the challenge URL's absolute based on the request host
func (wfe *WebFrontEndImpl) makeChallenges(authz *core.Authorization, request *http.Request) error {
var chals []*core.Challenge
// Authorizations for a wildcard identifier only get a DNS-01 challenges to
// match Boulder/Let's Encrypt wildcard issuance policy
if strings.HasPrefix(authz.Identifier.Value, "*.") {
chal, err := wfe.makeChallenge(acme.ChallengeDNS01, authz, request)
if err != nil {
return err
}
chals = []*core.Challenge{chal}
} else {
// Non-wildcard authorizations get all of the enabled challenge types
enabledChallenges := []string{acme.ChallengeHTTP01, acme.ChallengeTLSALPN01, acme.ChallengeDNS01}
for _, chalType := range enabledChallenges {
chal, err := wfe.makeChallenge(chalType, authz, request)
if err != nil {
return err
}
chals = append(chals, chal)
}
}
// Lock the authorization for writing to update the challenges
authz.Lock()
authz.Challenges = nil
for _, c := range chals {
authz.Challenges = append(authz.Challenges, &c.Challenge)
}
authz.Unlock()
return nil
}
// NewOrder creates a new Order request and populates its authorizations
func (wfe *WebFrontEndImpl) NewOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingReg, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Unpack the order request body
var newOrder acme.Order
err := json.Unmarshal(body, &newOrder)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON: "+err.Error()), response)
return
}
expires := time.Now().AddDate(0, 0, 1)
order := &core.Order{
ID: newToken(),
AccountID: existingReg.ID,
Order: acme.Order{
Status: acme.StatusPending,
Expires: expires.UTC().Format(time.RFC3339),
// Only the Identifiers, NotBefore and NotAfter from the submitted order
// are carried forward
Identifiers: newOrder.Identifiers,
NotBefore: newOrder.NotBefore,
NotAfter: newOrder.NotAfter,
},
ExpiresDate: expires,
}
// Verify the details of the order before creating authorizations
if err := wfe.verifyOrder(order); err != nil {
wfe.sendError(err, response)
return
}
// Collect all of the DNS identifier values up into a []string
var orderNames []string
for _, ident := range order.Identifiers {
orderNames = append(orderNames, ident.Value)
}
// Store the unique lower version of the names on the order object
order.Names = uniqueLowerNames(orderNames)
// Create the authorizations for the order
err = wfe.makeAuthorizations(order, request)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error creating authorizations for order"), response)
return
}
// Add the order to the in-memory DB
count, err := wfe.db.AddOrder(order)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("Error saving order"), response)
return
}
wfe.log.Printf("Added order %q to the db\n", order.ID)
wfe.log.Printf("There are now %d orders in the db\n", count)
// Get the stored order back from the DB. The memorystore will set the order's
// status for us.
storedOrder := wfe.db.GetOrderByID(order.ID)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, storedOrder.ID))
response.Header().Add("Location", orderURL)
orderResp := wfe.orderForDisplay(storedOrder, request)
err = wfe.writeJsonResponse(response, http.StatusCreated, orderResp)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// orderForDisplay preps a *core.Order for display by populating some fields
// based on the http.request provided and returning a *acme.Order ready to be
// rendered to JSON for display to an API client.
func (wfe *WebFrontEndImpl) orderForDisplay(
order *core.Order,
request *http.Request) acme.Order {
// Lock the order for reading
order.RLock()
defer order.RUnlock()
// Copy the initial OrderRequest from the internal order object to mutate and
// use as the result.
result := order.Order
// Randomize the order of the order authorization URLs as well as the order's
// identifiers. ACME draft Section 7.4 "Applying for Certificate Issuance"
// says:
// Clients SHOULD NOT make any assumptions about the sort order of
// "identifiers" or "authorizations" elements in the returned order
// object.
rand.Shuffle(len(result.Authorizations), func(i, j int) {
result.Authorizations[i], result.Authorizations[j] = result.Authorizations[j], result.Authorizations[i]
})
rand.Shuffle(len(result.Identifiers), func(i, j int) {
result.Identifiers[i], result.Identifiers[j] = result.Identifiers[j], result.Identifiers[i]
})
// Populate a finalization URL for this order
result.Finalize = wfe.relativeEndpoint(request,
fmt.Sprintf("%s%s", orderFinalizePath, order.ID))
// If the order has a cert ID then set the certificate URL by constructing
// a relative path based on the HTTP request & the cert ID
if order.CertificateObject != nil {
result.Certificate = wfe.relativeEndpoint(
request,
certPath+order.CertificateObject.ID)
}
return result
}
// Order retrieves the details of an existing order
func (wfe *WebFrontEndImpl) Order(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
orderID := strings.TrimPrefix(request.URL.Path, orderPath)
order := wfe.db.GetOrderByID(orderID)
if order == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(order, request)
err := wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
func (wfe *WebFrontEndImpl) FinalizeOrder(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// Verify the POST request
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the account corresponding to the key that authenticated the POST request
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Find the order specified by the order ID
orderID := strings.TrimPrefix(request.URL.Path, orderFinalizePath)
existingOrder := wfe.db.GetOrderByID(orderID)
if existingOrder == nil {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// Lock the order for reading the properties we need to check
existingOrder.RLock()
orderAccountID := existingOrder.AccountID
orderStatus := existingOrder.Status
orderExpires := existingOrder.ExpiresDate
orderNames := existingOrder.Names
// And then immediately unlock it again - we don't defer() here because
// `maybeIssue` will also acquire a read lock and we call that before
// returning
existingOrder.RUnlock()
// If the order doesn't belong to the account that authenticted the POST
// request then pretend it doesn't exist.
if orderAccountID != existingAcct.ID {
response.WriteHeader(http.StatusNotFound)
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"No order %q found for account ID %q", orderID, existingAcct.ID)), response)
return
}
// The existing order must be in a ready status to finalize it
if orderStatus != acme.StatusReady {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Order's status (%q) was not %s", orderStatus, acme.StatusReady)), response)
return
}
// The existing order must not be expired
if orderExpires.Before(wfe.clk.Now()) {
wfe.sendError(acme.NotFoundProblem(fmt.Sprintf(
"Order %q expired %s", orderID, orderExpires)), response)
return
}
// The finalize POST body is expected to be the bytes from a base64 raw url
// encoded CSR
var finalizeMessage struct {
CSR string
}
err := json.Unmarshal(body, &finalizeMessage)
if err != nil {
wfe.sendError(acme.MalformedProblem(fmt.Sprintf(
"Error unmarshaling finalize order request body: %s", err.Error())), response)
return
}
csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeMessage.CSR)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error decoding Base64url-encoded CSR: "+err.Error()), response)
return
}
parsedCSR, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error parsing Base64url-encoded CSR: "+err.Error()), response)
return
}
// Check that the CSR has the same number of names as the initial order contained
csrNames := uniqueLowerNames(parsedCSR.DNSNames)
if len(csrNames) != len(orderNames) {
wfe.sendError(acme.UnauthorizedProblem(
"Order includes different number of names than CSR specifies"), response)
return
}
// Check that the CSR's names match the order names exactly
for i, name := range orderNames {
if name != csrNames[i] {
wfe.sendError(acme.UnauthorizedProblem(
fmt.Sprintf("CSR is missing Order domain %q", name)), response)
return
}
}
// Lock and update the order with the parsed CSR and the began processing
// state.
existingOrder.Lock()
existingOrder.ParsedCSR = parsedCSR
existingOrder.BeganProcessing = true
existingOrder.Unlock()
// Ask the CA to complete the order in a separate goroutine.
wfe.log.Printf("Order %s is fully authorized. Processing finalization", orderID)
go wfe.ca.CompleteOrder(existingOrder)
// Set the existingOrder to processing before displaying to the user
existingOrder.Status = acme.StatusProcessing
// Prepare the order for display as JSON
orderReq := wfe.orderForDisplay(existingOrder, request)
orderURL := wfe.relativeEndpoint(request, fmt.Sprintf("%s%s", orderPath, existingOrder.ID))
response.Header().Add("Location", orderURL)
err = wfe.writeJsonResponse(response, http.StatusOK, orderReq)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling order"), response)
return
}
}
// prepAuthorizationForDisplay prepares the provided acme.Authorization for
// display to an ACME client.
func prepAuthorizationForDisplay(authz acme.Authorization) acme.Authorization {
// Copy the authz to mutate and return
result := authz
identVal := result.Identifier.Value
// If the authorization identifier has a wildcard in the value, remove it and
// set the Wildcard field to true
if strings.HasPrefix(identVal, "*.") {
result.Identifier.Value = strings.TrimPrefix(identVal, "*.")
result.Wildcard = true
}
// If the authz isn't pending then we need to filter the challenges displayed
// to only those that were used to make the authz valid || invalid.
if result.Status != acme.StatusPending {
var chals []*acme.Challenge
// Scan each of the authz's challenges
for _, c := range result.Challenges {
// Include any that have an associated error, or that are status valid
if c.Error != nil || c.Status == acme.StatusValid {
chals = append(chals, c)
}
}
// Replace the authz's challenges with the filtered set
result.Challenges = chals
}
// Randomize the order of the challenges in the returned authorization.
// Clients should not make any assumptions about the sort order.
rand.Shuffle(len(result.Challenges), func(i, j int) {
result.Challenges[i], result.Challenges[j] = result.Challenges[j], result.Challenges[i]
})
return result
}
func (wfe *WebFrontEndImpl) Authz(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
authzID := strings.TrimPrefix(request.URL.Path, authzPath)
authz := wfe.db.GetAuthorizationByID(authzID)
if authz == nil {
response.WriteHeader(http.StatusNotFound)
return
}
if request.Method == "POST" {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz.Order.AccountID != existingAcct.ID {
wfe.sendError(acme.UnauthorizedProblem(
"Account does not own authorization"), response)
return
}
var deactivateRequest struct {
Status string
}
err := json.Unmarshal(body, &deactivateRequest)
if err != nil {
wfe.sendError(acme.MalformedProblem(
fmt.Sprintf("Malformed authorization update: %s",
err.Error())), response)
return
}
if deactivateRequest.Status != "deactivated" {
wfe.sendError(acme.MalformedProblem(
fmt.Sprintf("Malformed authorization update, status must be \"deactivated\" not %q",
deactivateRequest.Status)), response)
return
}
authz.Status = acme.StatusDeactivated
}
err := wfe.writeJsonResponse(
response,
http.StatusOK,
prepAuthorizationForDisplay(authz.Authorization))
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling authz"), response)
return
}
}
func (wfe *WebFrontEndImpl) Challenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
if request.Method == "POST" {
wfe.updateChallenge(ctx, logEvent, response, request)
return
}
wfe.getChallenge(ctx, logEvent, response, request)
}
func (wfe *WebFrontEndImpl) getChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
chal := wfe.db.GetChallengeByID(chalID)
if chal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
// Lock the challenge for reading in order to write the response
chal.RLock()
defer chal.RUnlock()
err := wfe.writeJsonResponse(response, http.StatusOK, chal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
// getAcctByKey finds a account by key or returns a problem pointer if an
// existing account can't be found or the key is invalid.
func (wfe *WebFrontEndImpl) getAcctByKey(key crypto.PublicKey) (*core.Account, *acme.ProblemDetails) {
// Find the existing account object for that key
existingAcct, err := wfe.db.GetAccountByKey(key)
if err != nil {
return nil, acme.AccountDoesNotExistProblem("Error while retrieving key ID from public key")
}
if existingAcct == nil {
return nil, acme.AccountDoesNotExistProblem(
"URL in JWS 'kid' field does not correspond to an account")
}
if existingAcct.Status == acme.StatusDeactivated {
return nil, acme.UnauthorizedProblem("Account has been deactivated")
}
return existingAcct, nil
}
func (wfe *WebFrontEndImpl) validateChallengeUpdate(
chal *core.Challenge,
acct *core.Account) (*core.Authorization, *acme.ProblemDetails) {
// Lock the challenge for reading to do validation
chal.RLock()
defer chal.RUnlock()
// Check that the existing challenge is Pending
if chal.Status != acme.StatusPending {
return nil, acme.MalformedProblem(
fmt.Sprintf("Cannot update challenge with status %s, only status %s",
chal.Status, acme.StatusPending))
}
return chal.Authz, nil
}
// validateAuthzForChallenge checks an authz is:
// 1) for a supported identifier type
// 2) not expired
// 3) associated to an order
// The associated order is returned when no problems are found to avoid needing
// another RLock() for the caller to get the order pointer later.
func (wfe *WebFrontEndImpl) validateAuthzForChallenge(authz *core.Authorization) (*core.Order, *acme.ProblemDetails) {
// Lock the authz for reading
authz.RLock()
defer authz.RUnlock()
ident := authz.Identifier
if ident.Type != acme.IdentifierDNS {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization identifier was type %s, only %s is supported",
ident.Type, acme.IdentifierDNS))
}
now := wfe.clk.Now()
if now.After(authz.ExpiresDate) {
return nil, acme.MalformedProblem(
fmt.Sprintf("Authorization expired %s",
authz.ExpiresDate.Format(time.RFC3339)))
}
existingOrder := authz.Order
if existingOrder == nil {
return nil, acme.InternalErrorProblem("authz missing associated order")
}
return existingOrder, nil
}
func (wfe *WebFrontEndImpl) updateChallenge(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
body, _, key, prob := wfe.verifyPOST(ctx, logEvent, request, wfe.lookupJWK)
if prob != nil {
wfe.sendError(prob, response)
return
}
existingAcct, prob := wfe.getAcctByKey(key)
if prob != nil {
wfe.sendError(prob, response)
return
}
var chalResp struct {
KeyAuthorization *string
}
err := json.Unmarshal(body, &chalResp)
if err != nil {
wfe.sendError(
acme.MalformedProblem("Error unmarshaling body JSON"), response)
return
}
// Historically challenges were updated by POSTing a KeyAuthorization. This is
// unnecessary, the server can calculate this itself. We could ignore this if
// sent (and that's what Boulder will do) but for Pebble we'd like to offer
// a way to be more aggressive about pushing clients implementations in the
// right direction, so we treat this as a malformed request when running in
// strict mode.
if wfe.strict && chalResp.KeyAuthorization != nil {
wfe.sendError(
acme.MalformedProblem(
"Challenge response body contained legacy KeyAuthorization field, "+
"POST body should be `{}`"), response)
return
}
chalID := strings.TrimPrefix(request.URL.Path, challengePath)
existingChal := wfe.db.GetChallengeByID(chalID)
if existingChal == nil {
response.WriteHeader(http.StatusNotFound)
return
}
authz, prob := wfe.validateChallengeUpdate(existingChal, existingAcct)
if prob != nil {
wfe.sendError(prob, response)
return
}
if authz == nil {
wfe.sendError(
acme.InternalErrorProblem("challenge missing associated authz"), response)
return
}
existingOrder, prob := wfe.validateAuthzForChallenge(authz)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Lock the order for reading to check the expiry date
existingOrder.RLock()
now := wfe.clk.Now()
if now.After(existingOrder.ExpiresDate) {
wfe.sendError(
acme.MalformedProblem(fmt.Sprintf("order expired %s",
existingOrder.ExpiresDate.Format(time.RFC3339))), response)
return
}
existingOrder.RUnlock()
// Lock the authorization to get the identifier value
authz.RLock()
ident := authz.Identifier.Value
authz.RUnlock()
// If the identifier value is for a wildcard domain then strip the wildcard
// prefix before dispatching the validation to ensure the base domain is
// validated.
if strings.HasPrefix(ident, "*.") {
ident = strings.TrimPrefix(ident, "*.")
}
// Submit a validation job to the VA, this will be processed asynchronously
wfe.va.ValidateChallenge(ident, existingChal, existingAcct)
// Lock the challenge for reading in order to write the response
existingChal.RLock()
defer existingChal.RUnlock()
response.Header().Add("Link", link(existingChal.Authz.URL, "up"))
err = wfe.writeJsonResponse(response, http.StatusOK, existingChal.Challenge)
if err != nil {
wfe.sendError(acme.InternalErrorProblem("Error marshalling challenge"), response)
return
}
}
func (wfe *WebFrontEndImpl) Certificate(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
serial := strings.TrimPrefix(request.URL.Path, certPath)
cert := wfe.db.GetCertificateByID(serial)
if cert == nil {
response.WriteHeader(http.StatusNotFound)
return
}
response.Header().Set("Content-Type", "application/pem-certificate-chain; charset=utf-8")
response.WriteHeader(http.StatusOK)
_, _ = response.Write(cert.Chain())
}
func (wfe *WebFrontEndImpl) writeJsonResponse(response http.ResponseWriter, status int, v interface{}) error {
jsonReply, err := marshalIndent(v)
if err != nil {
return err // All callers are responsible for handling this error
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
response.WriteHeader(status)
// Don't worry about returning an error from Write() because the caller will
// never handle it.
_, _ = response.Write(jsonReply)
return nil
}
func addNoCacheHeader(response http.ResponseWriter) {
response.Header().Add("Cache-Control", "public, max-age=0, no-cache")
}
func marshalIndent(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
func link(url, relation string) string {
return fmt.Sprintf("<%s>;rel=\"%s\"", url, relation)
}
// uniqueLowerNames returns the set of all unique names in the input after all
// of them are lowercased. The returned names will be in their lowercased form
// and sorted alphabetically. See Boulder `core/util.go UniqueLowerNames`.
func uniqueLowerNames(names []string) []string {
nameMap := make(map[string]int, len(names))
for _, name := range names {
nameMap[strings.ToLower(name)] = 1
}
unique := make([]string, 0, len(nameMap))
for name := range nameMap {
unique = append(unique, name)
}
sort.Strings(unique)
return unique
}
// RevokeCert revokes an ACME certificate.
// It currently only implements one method of ACME revocation:
// Signing the revocation request by signing it with the certificate
// to be revoked's private key and embedding the certificate
// to be revoked's public key as a JWK in the JWS.
//
// Pebble's idea of certificate revocation is to forget the certificate exists.
// This method does not percolate to a CRL or an OCSP response.
func (wfe *WebFrontEndImpl) RevokeCert(
ctx context.Context,
logEvent *requestEvent,
response http.ResponseWriter,
request *http.Request) {
// The ACME specification handles the verification of revocation requests
// differently from other endpoints that always use one JWS authentication
// method. For this endpoint we need to accept a JWS with an embedded JWK, or
// a JWS with an embedded key ID, handling each case differently in terms of
// which certificates are authorized to be revoked by the requester
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
wfe.sendError(
acme.InternalErrorProblem("unable to read request body"), response)
return
}
body := string(bodyBytes)
parsedJWS, err := wfe.parseJWS(body)
if err != nil {
wfe.sendError(
acme.MalformedProblem(err.Error()), response)
return
}
if prob := wfe.validPOST(request); prob != nil {
wfe.sendError(prob, response)
return
}
// Determine the authentication type for this request
authType, prob := checkJWSAuthType(parsedJWS)
if prob != nil {
wfe.sendError(prob, response)
return
}
// Handle the revocation request according to how it is authenticated, or if
// the authentication type is unknown, error immediately
if authType == embeddedKeyID {
prob = wfe.revokeCertByKeyID(ctx, logEvent, parsedJWS, request)
} else if authType == embeddedJWK {
prob = wfe.revokeCertByJWK(ctx, logEvent, parsedJWS, request)
} else {
prob = acme.MalformedProblem("Malformed JWS, no KeyID or embedded JWK")
}
if prob != nil {
wfe.sendError(prob, response)
return
}
response.WriteHeader(http.StatusOK)
}
func (wfe *WebFrontEndImpl) revokeCertByKeyID(
ctx context.Context,
logEvent *requestEvent,
jws *jose.JSONWebSignature,
request *http.Request) *acme.ProblemDetails {
pubKey, prob := wfe.lookupJWK(request, jws)
if prob != nil {
return prob
}
body, _, key, prob := wfe.verifyJWS(pubKey, jws, request)
if prob != nil {
return prob
}
existingAcct, err := wfe.db.GetAccountByKey(key)
if err != nil {
return acme.MalformedProblem(fmt.Sprintf("Cannot obtain key ID from public key (%s)", err.Error()))
}
if existingAcct == nil {
return acme.UnauthorizedProblem("No account found corresponding to public key authenticating this request")
}
// An account is only authorized to revoke its own certificates presently.
// TODO(@cpu): Allow an account to revoke another account's certificate if
// the revoker account has valid authorizations for all of the names in the
// to-be-revoked certificate.
authorizedToRevoke := func(cert *core.Certificate) *acme.ProblemDetails {
if cert.AccountID == existingAcct.ID {
return nil
}
return acme.UnauthorizedProblem(
fmt.Sprintf(
"The certificate being revoked is not associated with account %q",
existingAcct.ID))
}
return wfe.processRevocation(ctx, body, authorizedToRevoke, request, logEvent)
}
func (wfe *WebFrontEndImpl) revokeCertByJWK(
ctx context.Context,
logEvent *requestEvent,
jws *jose.JSONWebSignature,
request *http.Request) *acme.ProblemDetails {
var requestKey *jose.JSONWebKey
pubKey, prob := wfe.extractJWK(request, jws)
if prob != nil {
return prob
}
body, _, key, prob := wfe.verifyJWS(pubKey, jws, request)
if prob != nil {
return prob
}
requestKey = key
// For embedded JWK revocations we decide if a requester is able to revoke a specific
// certificate by checking that to-be-revoked certificate has the same public
// key as the JWK that was used to authenticate the request
authorizedToRevoke := func(cert *core.Certificate) *acme.ProblemDetails {
if keyDigestEquals(requestKey, cert.Cert.PublicKey) {
return nil
}
return acme.UnauthorizedProblem(
"JWK embedded in revocation request must be the same public key as the cert to be revoked")
}
return wfe.processRevocation(ctx, body, authorizedToRevoke, request, logEvent)
}
// authorizedToRevokeCert is a callback function that can be used to validate if
// a given requester is authorized to revoke the certificate parsed out of the
// revocation request. If the requester is not authorized to revoke the
// certificate a problem is returned. It is expected to be a closure containing
// additional state (an account ID or key) that will be used to make the
// decision.
type authorizedToRevokeCert func(*core.Certificate) *acme.ProblemDetails
func (wfe *WebFrontEndImpl) processRevocation(
ctx context.Context,
jwsBody []byte,
authorizedToRevoke authorizedToRevokeCert,
request *http.Request,
logEvent *requestEvent) *acme.ProblemDetails {
// revokeCertReq is the ACME certificate information submitted by the client
var revokeCertReq struct {
Certificate string `json:"certificate"`
Reason *uint `json:"reason,omitempty"`
}
err := json.Unmarshal(jwsBody, &revokeCertReq)
if err != nil {
return acme.MalformedProblem("Error unmarshaling certificate revocation JSON body")
}
if revokeCertReq.Reason != nil {
r := *revokeCertReq.Reason
if r == unusedRevocationReason || r > aACompromiseRevocationReason {
return acme.BadRevocationReasonProblem(fmt.Sprintf("Invalid revocation reason: %d", r))
}
}
derBytes, err := base64.RawURLEncoding.DecodeString(revokeCertReq.Certificate)
if err != nil {
return acme.MalformedProblem("Error decoding Base64url-encoded DER: " + err.Error())
}
cert := wfe.db.GetCertificateByDER(derBytes)
if cert == nil {
return acme.MalformedProblem(
"Unable to find specified certificate. It may already be revoked")
}
if prob := authorizedToRevoke(cert); prob != nil {
return prob
}
wfe.db.RevokeCertificate(cert)
return nil
}
|
package auth
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/gempir/gempbot/pkg/api"
"github.com/gempir/gempbot/pkg/config"
"github.com/gempir/gempbot/pkg/helix"
"github.com/gempir/gempbot/pkg/log"
"github.com/gempir/gempbot/pkg/store"
"github.com/golang-jwt/jwt"
nickHelix "github.com/nicklaw5/helix/v2"
)
func CreateApiToken(secret, userID string) string {
expirationTime := time.Now().Add(365 * 24 * time.Hour)
claims := &TokenClaims{
UserID: userID,
StandardClaims: jwt.StandardClaims{
// In JWT, the expiry time is expressed as unix milliseconds
ExpiresAt: expirationTime.Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, _ := token.SignedString([]byte(secret))
return tokenString
}
type TokenClaims struct {
UserID string
StandardClaims jwt.StandardClaims
}
func (t *TokenClaims) Valid() error {
return nil
}
func NewAuth(cfg *config.Config, db *store.Database, helixClient *helix.Client) *Auth {
return &Auth{
cfg: cfg,
db: db,
helixClient: helixClient,
}
}
type Auth struct {
helixClient *helix.Client
db *store.Database
cfg *config.Config
}
func (a *Auth) AttemptAuth(r *http.Request, w http.ResponseWriter) (nickHelix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
resp, token, err := a.Authenticate(r)
if err != nil {
a.WriteDeleteCookieResponse(w, err)
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, err
}
return resp, token, nil
}
func (a *Auth) Authenticate(r *http.Request) (nickHelix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
scToken := ""
for _, cookie := range r.Cookies() {
if cookie.Name == "scToken" {
scToken = cookie.Value
}
}
if scToken == "" {
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("no scToken cookie set"))
}
// Initialize a new instance of `Claims`
claims := &TokenClaims{}
// Parse the JWT string and store the result in `claims`.
// Note that we are passing the key in this method as well. This method will return an error
// if the token is invalid (if it has expired according to the expiry time we set on sign in),
// or if the signature does not match
tkn, err := jwt.ParseWithClaims(scToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(a.cfg.Secret), nil
})
if err != nil || !tkn.Valid {
log.Errorf("found to validate jwt: %s", err)
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("bad authentication"))
}
token, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("Failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err := a.helixClient.Client.ValidateToken(token.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("token did not validate: %s", err)
}
// Token might be expired, let's try refreshing
if resp.Error == "Unauthorized" {
err := a.refreshToken(r.Context(), token)
if err != nil {
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("failed to refresh token"))
}
refreshedToken, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("Failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err = a.helixClient.Client.ValidateToken(refreshedToken.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("refreshed Token did not validate: %s", err)
}
return nickHelix.ValidateTokenResponse{}, refreshedToken, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("refreshed token did not validate"))
}
return *resp, refreshedToken, nil
}
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("token not valid: %s", resp.ErrorMessage))
}
return *resp, token, nil
}
func (a *Auth) refreshToken(ctx context.Context, token store.UserAccessToken) error {
resp, err := a.helixClient.Client.RefreshUserAccessToken(token.RefreshToken)
if err != nil {
return err
}
err = a.db.SaveUserAccessToken(ctx, token.OwnerTwitchID, resp.Data.AccessToken, resp.Data.RefreshToken, strings.Join(resp.Data.Scopes, " "))
if err != nil {
return err
}
return nil
}
func (a *Auth) WriteDeleteCookieResponse(w http.ResponseWriter, err api.Error) {
cookie := &http.Cookie{
Name: "scToken",
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
}
http.SetCookie(w, cookie)
http.Error(w, err.Error(), err.Status())
}
// func (a *Auth) getUserConfig(userID string) UserConfig {
// uCfg := createDefaultUserConfig()
// botConfig, err := s.db.GetBotConfig(userID)
// if err != nil {
// uCfg.BotJoin = false
// } else {
// uCfg.BotJoin = botConfig.JoinBot
// }
// uCfg.Protected.CurrentUserID = userID
// perms := s.db.GetChannelPermissions(userID)
// for _, perm := range perms {
// uCfg.Permissions[perm.TwitchID] = Permission{perm.Editor, perm.Prediction}
// }
// for _, perm := range s.db.GetUserPermissions(userID) {
// uCfg.Protected.EditorFor = append(uCfg.Protected.EditorFor, perm.ChannelTwitchId)
// }
// return uCfg
// }
also allow header auth
package auth
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/gempir/gempbot/pkg/api"
"github.com/gempir/gempbot/pkg/config"
"github.com/gempir/gempbot/pkg/helix"
"github.com/gempir/gempbot/pkg/log"
"github.com/gempir/gempbot/pkg/store"
"github.com/golang-jwt/jwt"
nickHelix "github.com/nicklaw5/helix/v2"
)
func CreateApiToken(secret, userID string) string {
expirationTime := time.Now().Add(365 * 24 * time.Hour)
claims := &TokenClaims{
UserID: userID,
StandardClaims: jwt.StandardClaims{
// In JWT, the expiry time is expressed as unix milliseconds
ExpiresAt: expirationTime.Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, _ := token.SignedString([]byte(secret))
return tokenString
}
type TokenClaims struct {
UserID string
StandardClaims jwt.StandardClaims
}
func (t *TokenClaims) Valid() error {
return nil
}
func NewAuth(cfg *config.Config, db *store.Database, helixClient *helix.Client) *Auth {
return &Auth{
cfg: cfg,
db: db,
helixClient: helixClient,
}
}
type Auth struct {
helixClient *helix.Client
db *store.Database
cfg *config.Config
}
func (a *Auth) AttemptAuth(r *http.Request, w http.ResponseWriter) (nickHelix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
resp, token, err := a.Authenticate(r)
if err != nil {
a.WriteDeleteCookieResponse(w, err)
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, err
}
return resp, token, nil
}
func (a *Auth) Authenticate(r *http.Request) (nickHelix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
scToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
for _, cookie := range r.Cookies() {
if cookie.Name == "scToken" {
scToken = cookie.Value
}
}
if scToken == "" {
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("no scToken cookie set"))
}
// Initialize a new instance of `Claims`
claims := &TokenClaims{}
// Parse the JWT string and store the result in `claims`.
// Note that we are passing the key in this method as well. This method will return an error
// if the token is invalid (if it has expired according to the expiry time we set on sign in),
// or if the signature does not match
tkn, err := jwt.ParseWithClaims(scToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(a.cfg.Secret), nil
})
if err != nil || !tkn.Valid {
log.Errorf("found to validate jwt: %s", err)
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("bad authentication"))
}
token, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("Failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err := a.helixClient.Client.ValidateToken(token.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("token did not validate: %s", err)
}
// Token might be expired, let's try refreshing
if resp.Error == "Unauthorized" {
err := a.refreshToken(r.Context(), token)
if err != nil {
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("failed to refresh token"))
}
refreshedToken, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("Failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err = a.helixClient.Client.ValidateToken(refreshedToken.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("refreshed Token did not validate: %s", err)
}
return nickHelix.ValidateTokenResponse{}, refreshedToken, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("refreshed token did not validate"))
}
return *resp, refreshedToken, nil
}
return nickHelix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("token not valid: %s", resp.ErrorMessage))
}
return *resp, token, nil
}
func (a *Auth) refreshToken(ctx context.Context, token store.UserAccessToken) error {
resp, err := a.helixClient.Client.RefreshUserAccessToken(token.RefreshToken)
if err != nil {
return err
}
err = a.db.SaveUserAccessToken(ctx, token.OwnerTwitchID, resp.Data.AccessToken, resp.Data.RefreshToken, strings.Join(resp.Data.Scopes, " "))
if err != nil {
return err
}
return nil
}
func (a *Auth) WriteDeleteCookieResponse(w http.ResponseWriter, err api.Error) {
cookie := &http.Cookie{
Name: "scToken",
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
}
http.SetCookie(w, cookie)
http.Error(w, err.Error(), err.Status())
}
// func (a *Auth) getUserConfig(userID string) UserConfig {
// uCfg := createDefaultUserConfig()
// botConfig, err := s.db.GetBotConfig(userID)
// if err != nil {
// uCfg.BotJoin = false
// } else {
// uCfg.BotJoin = botConfig.JoinBot
// }
// uCfg.Protected.CurrentUserID = userID
// perms := s.db.GetChannelPermissions(userID)
// for _, perm := range perms {
// uCfg.Permissions[perm.TwitchID] = Permission{perm.Editor, perm.Prediction}
// }
// for _, perm := range s.db.GetUserPermissions(userID) {
// uCfg.Protected.EditorFor = append(uCfg.Protected.EditorFor, perm.ChannelTwitchId)
// }
// return uCfg
// }
|
package zk
import (
"strconv"
"github.com/funkygao/dbus/pkg/cluster"
log "github.com/funkygao/log4go"
"github.com/funkygao/zkclient"
)
type leader struct {
ctx *controller
lastDecision cluster.Decision
epoch int
epochZkVersion int32
pcl zkclient.ZkChildListener // leader watches live participants
rcl zkclient.ZkChildListener // leader watches resources
}
func newLeader(ctx *controller) *leader {
return &leader{
ctx: ctx,
pcl: newParticipantChangeListener(ctx),
rcl: newResourceChangeListener(ctx),
}
}
func (l *leader) fetchEpoch() {
data, stat, err := l.ctx.zc.GetWithStat(l.ctx.kb.leaderEpoch())
if err != nil {
if !zkclient.IsErrNoNode(err) {
log.Error("%v", err)
}
return
}
l.epoch, _ = strconv.Atoi(string(data))
l.epochZkVersion = stat.Version
}
func (l *leader) incrementEpoch() (ok bool) {
newEpoch := l.epoch + 1
data := []byte(strconv.Itoa(newEpoch))
// CAS
newStat, err := l.ctx.zc.SetWithVersion(l.ctx.kb.leaderEpoch(), data, l.epochZkVersion)
if err != nil {
switch {
case zkclient.IsErrNoNode(err):
// if path doesn't exist, this is the first controller whose epoch should be 1
// the following call can still fail if another controller gets elected between checking if the path exists and
// trying to create the controller epoch path
if err := l.ctx.zc.CreatePersistent(l.ctx.kb.leaderEpoch(), data); err != nil {
if zkclient.IsErrNodeExists(err) {
log.Warn("leader moved to another participant! abort rebalance")
return
}
// unexpected zk err
log.Error("Error while incrementing controller epoch: %v", err)
return
}
// will go to ok
case zkclient.IsErrVersionConflict(err):
log.Warn("leader moved to another participant! abort rebalance")
return
default:
// unexpected zk err
log.Error("Error while incrementing controller epoch: %v", err)
return
}
}
ok = true
l.epoch = newEpoch
l.epochZkVersion = newStat.Version
return
}
func (l *leader) onResigningAsLeader() {
l.ctx.zc.UnsubscribeChildChanges(l.ctx.kb.participants(), l.pcl)
l.ctx.zc.UnsubscribeChildChanges(l.ctx.kb.resources(), l.rcl)
l.lastDecision = nil
l.ctx.elector.leaderID = ""
l.epoch = 0
l.epochZkVersion = 0
log.Trace("[%s] resigned as leader", l.ctx.participant)
}
func (l *leader) onBecomingLeader() {
l.ctx.zc.SubscribeChildChanges(l.ctx.kb.participants(), l.pcl)
l.ctx.zc.SubscribeChildChanges(l.ctx.kb.resources(), l.rcl)
l.fetchEpoch()
if !l.incrementEpoch() {
return
}
log.Trace("[%s] become controller leader and trigger rebalance!", l.ctx.participant)
l.doRebalance()
}
/// rebalance happens on controller leader when:
// 1. participants change
// 2. resources change
// 3. becoming leader
func (l *leader) doRebalance() {
participants, err := l.ctx.LiveParticipants()
if err != nil {
// TODO
log.Critical("[%s] %s", l.ctx.participant, err)
return
}
if len(participants) == 0 {
log.Critical("[%s] no alive participants found", l.ctx.participant)
return
}
resources, err := l.ctx.RegisteredResources()
if err != nil {
// TODO
log.Critical("[%s] %s", l.ctx.participant, err)
return
}
newDecision := l.ctx.strategyFunc(participants, resources)
if !newDecision.Equals(l.lastDecision) {
l.lastDecision = newDecision
// WAL
walFailure := false
for participant, resources := range newDecision {
for _, resource := range resources {
rs := cluster.NewResourceState()
rs.LeaderEpoch = l.epoch
rs.Owner = participant.Endpoint
// TODO add random sleep here to test race condition
if err := l.ctx.zc.Set(l.ctx.kb.resourceState(resource.Name), rs.Marshal()); err != nil {
// zk conn lost? timeout?
// TODO
log.Critical("[%s] %s %v", l.ctx.participant, resource.Name, err)
walFailure = true
break
}
}
if walFailure {
break
}
}
l.ctx.onRebalance(l.epoch, newDecision)
} else {
log.Trace("[%s] decision stay unchanged, quit rebalance", l.ctx.participant)
}
}
a small refactor: rename var
package zk
import (
"strconv"
"github.com/funkygao/dbus/pkg/cluster"
log "github.com/funkygao/log4go"
"github.com/funkygao/zkclient"
)
type leader struct {
ctx *controller
lastDecision cluster.Decision
epoch int
epochZkVersion int32
pcl zkclient.ZkChildListener // leader watches live participants
rcl zkclient.ZkChildListener // leader watches resources
}
func newLeader(ctx *controller) *leader {
return &leader{
ctx: ctx,
pcl: newParticipantChangeListener(ctx),
rcl: newResourceChangeListener(ctx),
}
}
func (l *leader) fetchEpoch() {
data, stat, err := l.ctx.zc.GetWithStat(l.ctx.kb.leaderEpoch())
if err != nil {
if !zkclient.IsErrNoNode(err) {
log.Error("%v", err)
}
return
}
l.epoch, _ = strconv.Atoi(string(data))
l.epochZkVersion = stat.Version
}
func (l *leader) incrementEpoch() (ok bool) {
newEpoch := l.epoch + 1
data := []byte(strconv.Itoa(newEpoch))
// CAS
newStat, err := l.ctx.zc.SetWithVersion(l.ctx.kb.leaderEpoch(), data, l.epochZkVersion)
if err != nil {
switch {
case zkclient.IsErrNoNode(err):
// if path doesn't exist, this is the first controller whose epoch should be 1
// the following call can still fail if another controller gets elected between checking if the path exists and
// trying to create the controller epoch path
if err := l.ctx.zc.CreatePersistent(l.ctx.kb.leaderEpoch(), data); err != nil {
if zkclient.IsErrNodeExists(err) {
log.Warn("leader moved to another participant! abort rebalance")
return
}
// unexpected zk err
log.Error("Error while incrementing controller epoch: %v", err)
return
}
// will go to ok
case zkclient.IsErrVersionConflict(err):
log.Warn("leader moved to another participant! abort rebalance")
return
default:
// unexpected zk err
log.Error("Error while incrementing controller epoch: %v", err)
return
}
}
ok = true
l.epoch = newEpoch
l.epochZkVersion = newStat.Version
return
}
func (l *leader) onResigningAsLeader() {
l.ctx.zc.UnsubscribeChildChanges(l.ctx.kb.participants(), l.pcl)
l.ctx.zc.UnsubscribeChildChanges(l.ctx.kb.resources(), l.rcl)
l.lastDecision = nil
l.ctx.elector.leaderID = ""
l.epoch = 0
l.epochZkVersion = 0
log.Trace("[%s] resigned as leader", l.ctx.participant)
}
func (l *leader) onBecomingLeader() {
l.ctx.zc.SubscribeChildChanges(l.ctx.kb.participants(), l.pcl)
l.ctx.zc.SubscribeChildChanges(l.ctx.kb.resources(), l.rcl)
l.fetchEpoch()
if !l.incrementEpoch() {
return
}
log.Trace("[%s] become controller leader and trigger rebalance!", l.ctx.participant)
l.doRebalance()
}
/// rebalance happens on controller leader when:
// 1. participants change
// 2. resources change
// 3. becoming leader
func (l *leader) doRebalance() {
liveParticipants, err := l.ctx.LiveParticipants()
if err != nil {
// TODO
log.Critical("[%s] %s", l.ctx.participant, err)
return
}
if len(liveParticipants) == 0 {
log.Critical("[%s] no live participants found", l.ctx.participant)
return
}
resources, err := l.ctx.RegisteredResources()
if err != nil {
// TODO
log.Critical("[%s] %s", l.ctx.participant, err)
return
}
newDecision := l.ctx.strategyFunc(liveParticipants, resources)
if !newDecision.Equals(l.lastDecision) {
l.lastDecision = newDecision
// WAL
walFailure := false
for participant, resources := range newDecision {
for _, resource := range resources {
rs := cluster.NewResourceState()
rs.LeaderEpoch = l.epoch
rs.Owner = participant.Endpoint
// TODO add random sleep here to test race condition
if err := l.ctx.zc.Set(l.ctx.kb.resourceState(resource.Name), rs.Marshal()); err != nil {
// zk conn lost? timeout?
// TODO
log.Critical("[%s] %s %v", l.ctx.participant, resource.Name, err)
walFailure = true
break
}
}
if walFailure {
break
}
}
l.ctx.onRebalance(l.epoch, newDecision)
} else {
log.Trace("[%s] decision stay unchanged, quit rebalance", l.ctx.participant)
}
}
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conversion
import (
"fmt"
"reflect"
)
// Scheme defines an entire encoding and decoding scheme.
type Scheme struct {
// versionMap allows one to figure out the go type of an object with
// the given version and name.
versionMap map[string]map[string]reflect.Type
// typeToVersion allows one to figure out the version for a given go object.
// The reflect.Type we index by should *not* be a pointer. If the same type
// is registered for multiple versions, the last one wins.
typeToVersion map[reflect.Type]string
// typeToKind allows one to figure out the desired "kind" field for a given
// go object. Requirements and caveats are the same as typeToVersion.
typeToKind map[reflect.Type][]string
// converter stores all registered conversion functions. It also has
// default coverting behavior.
converter *Converter
// Indent will cause the JSON output from Encode to be indented, iff it is true.
Indent bool
// InternalVersion is the default internal version. It is recommended that
// you use "" for the internal version.
InternalVersion string
// MetaInsertionFactory is used to create an object to store and retrieve
// the version and kind information for all objects. The default uses the
// keys "apiVersion" and "kind" respectively.
MetaFactory MetaFactory
}
// NewScheme manufactures a new scheme.
func NewScheme() *Scheme {
s := &Scheme{
versionMap: map[string]map[string]reflect.Type{},
typeToVersion: map[reflect.Type]string{},
typeToKind: map[reflect.Type][]string{},
converter: NewConverter(),
InternalVersion: "",
MetaFactory: DefaultMetaFactory,
}
s.converter.NameFunc = s.nameFunc
return s
}
// Log sets a logger on the scheme. For test purposes only
func (s *Scheme) Log(l DebugLogger) {
s.converter.Debug = l
}
// nameFunc returns the name of the type that we wish to use for encoding. Defaults to
// the go name of the type if the type is not registered.
func (s *Scheme) nameFunc(t reflect.Type) string {
if kind, ok := s.typeToKind[t]; ok {
return kind[0]
}
return t.Name()
}
// AddKnownTypes registers all types passed in 'types' as being members of version 'version.
// Encode() will refuse objects unless their type has been registered with AddKnownTypes.
// All objects passed to types should be pointers to structs. The name that go reports for
// the struct becomes the "kind" field when encoding.
func (s *Scheme) AddKnownTypes(version string, types ...interface{}) {
knownTypes, found := s.versionMap[version]
if !found {
knownTypes = map[string]reflect.Type{}
s.versionMap[version] = knownTypes
}
for _, obj := range types {
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
knownTypes[t.Name()] = t
s.typeToVersion[t] = version
s.typeToKind[t] = append(s.typeToKind[t], t.Name())
}
}
// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
// be encoded as. Useful for testing when you don't want to make multiple packages to define
// your structs.
func (s *Scheme) AddKnownTypeWithName(version, kind string, obj interface{}) {
knownTypes, found := s.versionMap[version]
if !found {
knownTypes = map[string]reflect.Type{}
s.versionMap[version] = knownTypes
}
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
knownTypes[kind] = t
s.typeToVersion[t] = version
s.typeToKind[t] = append(s.typeToKind[t], kind)
}
// KnownTypes returns an array of the types that are known for a particular version.
func (s *Scheme) KnownTypes(version string) map[string]reflect.Type {
all, ok := s.versionMap[version]
if !ok {
return map[string]reflect.Type{}
}
types := make(map[string]reflect.Type)
for k, v := range all {
types[k] = v
}
return types
}
// NewObject returns a new object of the given version and name,
// or an error if it hasn't been registered.
func (s *Scheme) NewObject(versionName, typeName string) (interface{}, error) {
if types, ok := s.versionMap[versionName]; ok {
if t, ok := types[typeName]; ok {
return reflect.New(t).Interface(), nil
}
return nil, fmt.Errorf("No type '%v' for version '%v'", typeName, versionName)
}
return nil, fmt.Errorf("No version '%v'", versionName)
}
// AddConversionFuncs adds functions to the list of conversion functions. The given
// functions should know how to convert between two of your API objects, or their
// sub-objects. We deduce how to call these functions from the types of their two
// parameters; see the comment for Converter.Register.
//
// Note that, if you need to copy sub-objects that didn't change, you can use the
// conversion.Scope object that will be passed to your conversion function.
// Additionally, all conversions started by Scheme will set the SrcVersion and
// DestVersion fields on the Meta object. Example:
//
// s.AddConversionFuncs(
// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
// // You can depend on Meta() being non-nil, and this being set to
// // the source version, e.g., ""
// s.Meta().SrcVersion
// // You can depend on this being set to the destination version,
// // e.g., "v1beta1".
// s.Meta().DestVersion
// // Call scope.Convert to copy sub-fields.
// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
// return nil
// },
// )
//
// (For more detail about conversion functions, see Converter.Register's comment.)
//
// Also note that the default behavior, if you don't add a conversion function, is to
// sanely copy fields that have the same names and same type names. It's OK if the
// destination type has extra fields, but it must not remove any. So you only need to
// add conversion functions for things with changed/removed fields.
func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
for _, f := range conversionFuncs {
err := s.converter.Register(f)
if err != nil {
return err
}
}
return nil
}
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,
// a to test conversion of types that are nested within registered types), but in
// that case, the conversion.Scope object passed to your conversion functions won't
// have SrcVersion or DestVersion fields set correctly in Meta().
func (s *Scheme) Convert(in, out interface{}) error {
inVersion := "unknown"
outVersion := "unknown"
if v, _, err := s.ObjectVersionAndKind(in); err == nil {
inVersion = v
}
if v, _, err := s.ObjectVersionAndKind(out); err == nil {
outVersion = v
}
return s.converter.Convert(in, out, 0, s.generateConvertMeta(inVersion, outVersion))
}
// ConvertToVersion attempts to convert an input object to its matching Kind in another
// version within this scheme. Will return an error if the provided version does not
// contain the inKind (or a mapping by name defined with AddKnownTypeWithName).
func (s *Scheme) ConvertToVersion(in interface{}, outVersion string) (interface{}, error) {
t := reflect.TypeOf(in)
if t.Kind() != reflect.Ptr {
return nil, fmt.Errorf("only pointer types may be converted: %v", t)
}
t = t.Elem()
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
}
kinds, ok := s.typeToKind[t]
if !ok {
return nil, fmt.Errorf("%v cannot be converted into version %q", t, outVersion)
}
outKind := kinds[0]
inVersion, _, err := s.ObjectVersionAndKind(in)
if err != nil {
return nil, err
}
out, err := s.NewObject(outVersion, outKind)
if err != nil {
return nil, err
}
if err := s.converter.Convert(in, out, 0, s.generateConvertMeta(inVersion, outVersion)); err != nil {
return nil, err
}
if err := s.SetVersionAndKind(outVersion, outKind, out); err != nil {
return nil, err
}
return out, nil
}
// generateConvertMeta constructs the meta value we pass to Convert.
func (s *Scheme) generateConvertMeta(srcVersion, destVersion string) *Meta {
return &Meta{
SrcVersion: srcVersion,
DestVersion: destVersion,
}
}
// DataVersionAndKind will return the APIVersion and Kind of the given wire-format
// encoding of an API Object, or an error.
func (s *Scheme) DataVersionAndKind(data []byte) (version, kind string, err error) {
return s.MetaFactory.Interpret(data)
}
// ObjectVersionAndKind returns the API version and kind of the go object,
// or an error if it's not a pointer or is unregistered.
func (s *Scheme) ObjectVersionAndKind(obj interface{}) (apiVersion, kind string, err error) {
v, err := EnforcePtr(obj)
if err != nil {
return "", "", err
}
t := v.Type()
version, vOK := s.typeToVersion[t]
kinds, kOK := s.typeToKind[t]
if !vOK || !kOK {
return "", "", fmt.Errorf("Unregistered type: %v", t)
}
apiVersion = version
kind = kinds[0]
return
}
// SetVersionAndKind sets the version and kind fields (with help from
// MetaInsertionFactory). Returns an error if this isn't possible. obj
// must be a pointer.
func (s *Scheme) SetVersionAndKind(version, kind string, obj interface{}) error {
return s.MetaFactory.Update(version, kind, obj)
}
// maybeCopy copies obj if it is not a pointer, to get a settable/addressable
// object. Guaranteed to return a pointer.
func maybeCopy(obj interface{}) interface{} {
v := reflect.ValueOf(obj)
if v.Kind() == reflect.Ptr {
return obj
}
v2 := reflect.New(v.Type())
v2.Elem().Set(v)
return v2.Interface()
}
When explicitly converting two objects, allow names to differ
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conversion
import (
"fmt"
"reflect"
)
// Scheme defines an entire encoding and decoding scheme.
type Scheme struct {
// versionMap allows one to figure out the go type of an object with
// the given version and name.
versionMap map[string]map[string]reflect.Type
// typeToVersion allows one to figure out the version for a given go object.
// The reflect.Type we index by should *not* be a pointer. If the same type
// is registered for multiple versions, the last one wins.
typeToVersion map[reflect.Type]string
// typeToKind allows one to figure out the desired "kind" field for a given
// go object. Requirements and caveats are the same as typeToVersion.
typeToKind map[reflect.Type][]string
// converter stores all registered conversion functions. It also has
// default coverting behavior.
converter *Converter
// Indent will cause the JSON output from Encode to be indented, iff it is true.
Indent bool
// InternalVersion is the default internal version. It is recommended that
// you use "" for the internal version.
InternalVersion string
// MetaInsertionFactory is used to create an object to store and retrieve
// the version and kind information for all objects. The default uses the
// keys "apiVersion" and "kind" respectively.
MetaFactory MetaFactory
}
// NewScheme manufactures a new scheme.
func NewScheme() *Scheme {
s := &Scheme{
versionMap: map[string]map[string]reflect.Type{},
typeToVersion: map[reflect.Type]string{},
typeToKind: map[reflect.Type][]string{},
converter: NewConverter(),
InternalVersion: "",
MetaFactory: DefaultMetaFactory,
}
s.converter.NameFunc = s.nameFunc
return s
}
// Log sets a logger on the scheme. For test purposes only
func (s *Scheme) Log(l DebugLogger) {
s.converter.Debug = l
}
// nameFunc returns the name of the type that we wish to use for encoding. Defaults to
// the go name of the type if the type is not registered.
func (s *Scheme) nameFunc(t reflect.Type) string {
if kind, ok := s.typeToKind[t]; ok {
return kind[0]
}
return t.Name()
}
// AddKnownTypes registers all types passed in 'types' as being members of version 'version.
// Encode() will refuse objects unless their type has been registered with AddKnownTypes.
// All objects passed to types should be pointers to structs. The name that go reports for
// the struct becomes the "kind" field when encoding.
func (s *Scheme) AddKnownTypes(version string, types ...interface{}) {
knownTypes, found := s.versionMap[version]
if !found {
knownTypes = map[string]reflect.Type{}
s.versionMap[version] = knownTypes
}
for _, obj := range types {
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
knownTypes[t.Name()] = t
s.typeToVersion[t] = version
s.typeToKind[t] = append(s.typeToKind[t], t.Name())
}
}
// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
// be encoded as. Useful for testing when you don't want to make multiple packages to define
// your structs.
func (s *Scheme) AddKnownTypeWithName(version, kind string, obj interface{}) {
knownTypes, found := s.versionMap[version]
if !found {
knownTypes = map[string]reflect.Type{}
s.versionMap[version] = knownTypes
}
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
knownTypes[kind] = t
s.typeToVersion[t] = version
s.typeToKind[t] = append(s.typeToKind[t], kind)
}
// KnownTypes returns an array of the types that are known for a particular version.
func (s *Scheme) KnownTypes(version string) map[string]reflect.Type {
all, ok := s.versionMap[version]
if !ok {
return map[string]reflect.Type{}
}
types := make(map[string]reflect.Type)
for k, v := range all {
types[k] = v
}
return types
}
// NewObject returns a new object of the given version and name,
// or an error if it hasn't been registered.
func (s *Scheme) NewObject(versionName, typeName string) (interface{}, error) {
if types, ok := s.versionMap[versionName]; ok {
if t, ok := types[typeName]; ok {
return reflect.New(t).Interface(), nil
}
return nil, fmt.Errorf("No type '%v' for version '%v'", typeName, versionName)
}
return nil, fmt.Errorf("No version '%v'", versionName)
}
// AddConversionFuncs adds functions to the list of conversion functions. The given
// functions should know how to convert between two of your API objects, or their
// sub-objects. We deduce how to call these functions from the types of their two
// parameters; see the comment for Converter.Register.
//
// Note that, if you need to copy sub-objects that didn't change, you can use the
// conversion.Scope object that will be passed to your conversion function.
// Additionally, all conversions started by Scheme will set the SrcVersion and
// DestVersion fields on the Meta object. Example:
//
// s.AddConversionFuncs(
// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
// // You can depend on Meta() being non-nil, and this being set to
// // the source version, e.g., ""
// s.Meta().SrcVersion
// // You can depend on this being set to the destination version,
// // e.g., "v1beta1".
// s.Meta().DestVersion
// // Call scope.Convert to copy sub-fields.
// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
// return nil
// },
// )
//
// (For more detail about conversion functions, see Converter.Register's comment.)
//
// Also note that the default behavior, if you don't add a conversion function, is to
// sanely copy fields that have the same names and same type names. It's OK if the
// destination type has extra fields, but it must not remove any. So you only need to
// add conversion functions for things with changed/removed fields.
func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
for _, f := range conversionFuncs {
err := s.converter.Register(f)
if err != nil {
return err
}
}
return nil
}
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,
// a to test conversion of types that are nested within registered types), but in
// that case, the conversion.Scope object passed to your conversion functions won't
// have SrcVersion or DestVersion fields set correctly in Meta().
func (s *Scheme) Convert(in, out interface{}) error {
inVersion := "unknown"
outVersion := "unknown"
if v, _, err := s.ObjectVersionAndKind(in); err == nil {
inVersion = v
}
if v, _, err := s.ObjectVersionAndKind(out); err == nil {
outVersion = v
}
return s.converter.Convert(in, out, AllowDifferentFieldTypeNames, s.generateConvertMeta(inVersion, outVersion))
}
// ConvertToVersion attempts to convert an input object to its matching Kind in another
// version within this scheme. Will return an error if the provided version does not
// contain the inKind (or a mapping by name defined with AddKnownTypeWithName).
func (s *Scheme) ConvertToVersion(in interface{}, outVersion string) (interface{}, error) {
t := reflect.TypeOf(in)
if t.Kind() != reflect.Ptr {
return nil, fmt.Errorf("only pointer types may be converted: %v", t)
}
t = t.Elem()
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
}
kinds, ok := s.typeToKind[t]
if !ok {
return nil, fmt.Errorf("%v cannot be converted into version %q", t, outVersion)
}
outKind := kinds[0]
inVersion, _, err := s.ObjectVersionAndKind(in)
if err != nil {
return nil, err
}
out, err := s.NewObject(outVersion, outKind)
if err != nil {
return nil, err
}
if err := s.converter.Convert(in, out, 0, s.generateConvertMeta(inVersion, outVersion)); err != nil {
return nil, err
}
if err := s.SetVersionAndKind(outVersion, outKind, out); err != nil {
return nil, err
}
return out, nil
}
// generateConvertMeta constructs the meta value we pass to Convert.
func (s *Scheme) generateConvertMeta(srcVersion, destVersion string) *Meta {
return &Meta{
SrcVersion: srcVersion,
DestVersion: destVersion,
}
}
// DataVersionAndKind will return the APIVersion and Kind of the given wire-format
// encoding of an API Object, or an error.
func (s *Scheme) DataVersionAndKind(data []byte) (version, kind string, err error) {
return s.MetaFactory.Interpret(data)
}
// ObjectVersionAndKind returns the API version and kind of the go object,
// or an error if it's not a pointer or is unregistered.
func (s *Scheme) ObjectVersionAndKind(obj interface{}) (apiVersion, kind string, err error) {
v, err := EnforcePtr(obj)
if err != nil {
return "", "", err
}
t := v.Type()
version, vOK := s.typeToVersion[t]
kinds, kOK := s.typeToKind[t]
if !vOK || !kOK {
return "", "", fmt.Errorf("Unregistered type: %v", t)
}
apiVersion = version
kind = kinds[0]
return
}
// SetVersionAndKind sets the version and kind fields (with help from
// MetaInsertionFactory). Returns an error if this isn't possible. obj
// must be a pointer.
func (s *Scheme) SetVersionAndKind(version, kind string, obj interface{}) error {
return s.MetaFactory.Update(version, kind, obj)
}
// maybeCopy copies obj if it is not a pointer, to get a settable/addressable
// object. Guaranteed to return a pointer.
func maybeCopy(obj interface{}) interface{} {
v := reflect.ValueOf(obj)
if v.Kind() == reflect.Ptr {
return obj
}
v2 := reflect.New(v.Type())
v2.Elem().Set(v)
return v2.Interface()
}
|
package sinmetalcraft
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
"google.golang.org/appengine/user"
"google.golang.org/api/compute/v1"
"golang.org/x/net/context"
)
const PROJECT_NAME = "sinmetalcraft"
const INSTANCE_NAME = "minecraft"
func init() {
api := MinecraftApi{}
http.HandleFunc("/minecraft", handlerMinecraftLog)
http.HandleFunc("/api/1/minecraft", api.Handler)
}
type Minecraft struct {
Key *datastore.Key `json:"-" datastore:"-"`
KeyStr string `json:"key" datastore:"-"`
World string `json:"world"`
ResourceID int64 `json:"resourceID"`
Zone string `json:"zone" datastore:",unindexed"`
IPAddr string `json:"ipAddr" datastore:",unindexed"`
Status string `json:"status" datastore:",unindexed"`
OperationType string `json:"operationType" datastore:",unindexed"`
OperationStatus string `json:"operationstatus" datastore:",unindexed"`
LatestSnapshot string `json:"latestSnpshot" datastore:",unindexed"`
JarVersion string `json:"jarVersion" datastore:",unindexed"`
OverviewerSnapshot string `json:"overViewerSnapshot" datastore:",unindexed"` // Minecraft Overviewerを作成済みのsnapshot name
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type MinecraftApiListResponse struct {
Items []MinecraftApiResponse `json:"items"`
Cursor string `json:cursor`
}
type MinecraftApiResponse struct {
InstanceName string `json:"instanceName"`
Zone string `json:"zone"`
IPAddr string `json:"iPAddr"`
Status string `json:"status"`
CreationTimestamp string `json:"creationTimestamp"`
}
type Metadata struct {
ProjectID string `json:"projectId"`
ServiceName string `json:"serviceName"`
Zone string `json:"zone"`
Labels map[string]string `json:"labels"`
Timestamp string `json:"timestamp"`
}
type StructPayload struct {
Log string `json:"log"`
}
type PubSubData struct {
Metadata Metadata `json:"metadata"`
InsertID string `json:"insertId"`
Log string `json:"log"`
StructPayload StructPayload `json:"structPayload"`
}
type Message struct {
Data string `json:"data"`
Attributes map[string]string `json:"attributes"`
MessageID string `json:"message_id"`
}
type PubSubBody struct {
Message Message `json:"message"`
Subscription string `json:"subscription"`
}
type MinecraftApi struct{}
// /api/1/minecraft handler
func (a *MinecraftApi) Handler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
a.Post(w, r)
} else if r.Method == "PUT" {
a.Put(w, r)
} else if r.Method == "GET" {
a.List(w, r)
} else if r.Method == "DELETE" {
a.Delete(w, r)
} else {
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
// create world data
func (a *MinecraftApi) Post(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key := datastore.NewKey(ctx, "Minecraft", minecraft.World, 0, nil)
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
minecraft.Status = "not_exists"
now := time.Now()
minecraft.CreatedAt = now
minecraft.UpdatedAt = now
_, err = datastore.Put(ctx, key, &minecraft)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
minecraft.KeyStr = key.Encode()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(minecraft)
}
// update world data
func (a *MinecraftApi) Put(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key, err := datastore.DecodeKey(minecraft.KeyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
minecraft.Key = key
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
entity.IPAddr = minecraft.IPAddr
entity.Zone = minecraft.Zone
entity.JarVersion = minecraft.JarVersion
entity.UpdatedAt = time.Now()
_, err = datastore.Put(ctx, key, &entity)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(minecraft)
}
// delete world data
func (a *MinecraftApi) Delete(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
keyStr := r.FormValue("key")
key, err := datastore.DecodeKey(keyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
return datastore.Delete(ctx, key)
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Delete Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(`{}`)
}
// list world data
func (a *MinecraftApi) List(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
q := datastore.NewQuery("Minecraft").Order("-UpdatedAt")
list := make([]*Minecraft, 0)
for t := q.Run(ctx); ; {
var entity Minecraft
key, err := t.Next(&entity)
if err == datastore.Done {
break
}
if err != nil {
log.Errorf(ctx, "Minecraft Query Error. error = %s", err.Error())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusInternalServerError)
return
}
entity.Key = key
entity.KeyStr = key.Encode()
list = append(list, &entity)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(list)
}
// handle cloud pub/sub request
func handlerMinecraftLog(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
for k, v := range r.Header {
log.Infof(ctx, "%s:%s", k, v)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Errorf(ctx, "ERROR request body read: %s", err)
w.WriteHeader(500)
return
}
log.Infof(ctx, "request body = %s", string(body))
var psb PubSubBody
err = psb.Decode(body)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Body decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
log.Infof(ctx, "request Pub Sub Body = %v", psb)
var psd PubSubData
err = psd.Decode(psb.Message.Data)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Data decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
b, err := json.Marshal(psd)
if err != nil {
log.Errorf(ctx, "PubSubData json marshal error %v", err)
} else {
log.Infof(ctx, "request Pub Sub Data = %s", b)
}
if len(psd.StructPayload.Log) < 1 {
log.Infof(ctx, "StructPayload.Log is eompy.")
w.WriteHeader(http.StatusOK)
return
}
var sm SlackMessage
fields := make([]SlackField, 0)
sa := SlackAttachment{
Color: "#36a64f",
AuthorName: "sinmetalcraft",
AuthorIcon: "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg",
Title: psd.StructPayload.Log,
Fields: fields,
}
sm.UserName = "sinmetalcraft"
sm.IconUrl = "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg"
sm.Text = ""
sm.Attachments = []SlackAttachment{sa}
acs := AppConfigService{}
config, err := acs.Get(ctx)
if err != nil {
log.Errorf(ctx, "ERROR App Config Get: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
_, err = PostToSlack(ctx, config.SlackPostUrl, sm)
if err != nil {
log.Errorf(ctx, "ERROR Post Slack: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
// list gce instance
func listInstance(ctx context.Context, is *compute.InstancesService, zone string) ([]*compute.Instance, string, error) {
ilc := is.List(PROJECT_NAME, zone)
il, err := ilc.Do()
if err != nil {
return nil, "", err
}
return il.Items, il.NextPageToken, nil
}
// create disk from snapshot
func createDiskFromSnapshot(ctx context.Context, ds *compute.DisksService, minecraft Minecraft) (*compute.Operation, error) {
name := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
d := &compute.Disk{
Name: name,
SizeGb: 100,
SourceSnapshot: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/snapshots/" + minecraft.LatestSnapshot,
Type: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
}
ope, err := ds.Insert(PROJECT_NAME, minecraft.Zone, d).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert disk: %s", err)
return nil, err
}
WriteLog(ctx, "INSTNCE_DISK_OPE", ope)
return ope, err
}
// create gce instance
func createInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
worldDiskName := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
log.Infof(ctx, "create instance name = %s", name)
startupScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-startup-script.sh"
shutdownScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-shutdown-script.sh"
stateValue := "new"
newIns := &compute.Instance{
Name: name,
Zone: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone,
MachineType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/machineTypes/n1-highmem-2",
Disks: []*compute.AttachedDisk{
&compute.AttachedDisk{
AutoDelete: true,
Boot: true,
DeviceName: name,
Mode: "READ_WRITE",
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/images/family/minecraft",
DiskType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
DiskSizeGb: 100,
},
},
&compute.AttachedDisk{
AutoDelete: true,
Boot: false,
DeviceName: worldDiskName,
Mode: "READ_WRITE",
Source: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/disks/" + worldDiskName,
},
},
CanIpForward: false,
NetworkInterfaces: []*compute.NetworkInterface{
&compute.NetworkInterface{
Network: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/networks/default",
AccessConfigs: []*compute.AccessConfig{
&compute.AccessConfig{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
NatIP: minecraft.IPAddr,
},
},
},
},
Tags: &compute.Tags{
Items: []string{
"minecraft-server",
},
},
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
&compute.MetadataItems{
Key: "startup-script-url",
Value: &startupScriptURL,
},
&compute.MetadataItems{
Key: "shutdown-script-url",
Value: &shutdownScriptURL,
},
&compute.MetadataItems{
Key: "world",
Value: &minecraft.World,
},
&compute.MetadataItems{
Key: "state",
Value: &stateValue,
},
&compute.MetadataItems{
Key: "minecraft-version",
Value: &minecraft.JarVersion,
},
},
},
ServiceAccounts: []*compute.ServiceAccount{
&compute.ServiceAccount{
Email: "default",
Scopes: []string{
compute.DevstorageReadWriteScope,
compute.ComputeScope,
"https://www.googleapis.com/auth/logging.write",
},
},
},
Scheduling: &compute.Scheduling{
AutomaticRestart: false,
OnHostMaintenance: "TERMINATE",
Preemptible: true,
},
}
ope, err := is.Insert(PROJECT_NAME, minecraft.Zone, newIns).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_CREATE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// start instance
func startInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "start instance name = %s", name)
ope, err := is.Start(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_START_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// reset instance
func resetInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "reset instance name = %s", name)
ope, err := is.Reset(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_RESET_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// delete instance
func deleteInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "delete instance name = %s", name)
ope, err := is.Delete(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR delete instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_DELETE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
func (psb *PubSubBody) Decode(body []byte) error {
err := json.Unmarshal(body, psb)
if err != nil {
return err
}
return nil
}
func (psd *PubSubData) Decode(body string) error {
mr := base64.NewDecoder(base64.StdEncoding, strings.NewReader(body))
return json.NewDecoder(mr).Decode(psd)
}
type SlackMessage struct {
UserName string `json:"username"`
IconUrl string `json:"icon_url"`
Text string `json:"text"`
Attachments []SlackAttachment `json:"attachments"`
}
type SlackAttachment struct {
Color string `json:"color"`
AuthorName string `json:"author_name"`
AuthorLink string `json:"author_link"`
AuthorIcon string `json:"author_icon"`
Title string `json:"title"`
TitleLink string `json:"title_link"`
Fields []SlackField `json:"fields"`
}
type SlackField struct {
Title string `json:"title"`
}
func PostToSlack(ctx context.Context, url string, message SlackMessage) (resp *http.Response, err error) {
client := urlfetch.Client(ctx)
body, err := json.Marshal(message)
if err != nil {
return nil, err
}
fmt.Println(string(body))
return client.Post(
url,
"application/json",
bytes.NewReader(body))
}
func WriteLog(ctx context.Context, key string, v interface{}) {
body, err := json.Marshal(v)
if err != nil {
log.Errorf(ctx, "WriteLog Error %s %v", err.Error(), v)
}
log.Infof(ctx, `{"%s":%s}`, key, body)
}
Cloud DNS編集のために、Scopeを強化した refs #42
package sinmetalcraft
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
"google.golang.org/appengine/user"
"google.golang.org/api/compute/v1"
"golang.org/x/net/context"
)
const PROJECT_NAME = "sinmetalcraft"
const INSTANCE_NAME = "minecraft"
func init() {
api := MinecraftApi{}
http.HandleFunc("/minecraft", handlerMinecraftLog)
http.HandleFunc("/api/1/minecraft", api.Handler)
}
type Minecraft struct {
Key *datastore.Key `json:"-" datastore:"-"`
KeyStr string `json:"key" datastore:"-"`
World string `json:"world"`
ResourceID int64 `json:"resourceID"`
Zone string `json:"zone" datastore:",unindexed"`
IPAddr string `json:"ipAddr" datastore:",unindexed"`
Status string `json:"status" datastore:",unindexed"`
OperationType string `json:"operationType" datastore:",unindexed"`
OperationStatus string `json:"operationstatus" datastore:",unindexed"`
LatestSnapshot string `json:"latestSnpshot" datastore:",unindexed"`
JarVersion string `json:"jarVersion" datastore:",unindexed"`
OverviewerSnapshot string `json:"overViewerSnapshot" datastore:",unindexed"` // Minecraft Overviewerを作成済みのsnapshot name
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type MinecraftApiListResponse struct {
Items []MinecraftApiResponse `json:"items"`
Cursor string `json:cursor`
}
type MinecraftApiResponse struct {
InstanceName string `json:"instanceName"`
Zone string `json:"zone"`
IPAddr string `json:"iPAddr"`
Status string `json:"status"`
CreationTimestamp string `json:"creationTimestamp"`
}
type Metadata struct {
ProjectID string `json:"projectId"`
ServiceName string `json:"serviceName"`
Zone string `json:"zone"`
Labels map[string]string `json:"labels"`
Timestamp string `json:"timestamp"`
}
type StructPayload struct {
Log string `json:"log"`
}
type PubSubData struct {
Metadata Metadata `json:"metadata"`
InsertID string `json:"insertId"`
Log string `json:"log"`
StructPayload StructPayload `json:"structPayload"`
}
type Message struct {
Data string `json:"data"`
Attributes map[string]string `json:"attributes"`
MessageID string `json:"message_id"`
}
type PubSubBody struct {
Message Message `json:"message"`
Subscription string `json:"subscription"`
}
type MinecraftApi struct{}
// /api/1/minecraft handler
func (a *MinecraftApi) Handler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
a.Post(w, r)
} else if r.Method == "PUT" {
a.Put(w, r)
} else if r.Method == "GET" {
a.List(w, r)
} else if r.Method == "DELETE" {
a.Delete(w, r)
} else {
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
// create world data
func (a *MinecraftApi) Post(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key := datastore.NewKey(ctx, "Minecraft", minecraft.World, 0, nil)
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
minecraft.Status = "not_exists"
now := time.Now()
minecraft.CreatedAt = now
minecraft.UpdatedAt = now
_, err = datastore.Put(ctx, key, &minecraft)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
minecraft.KeyStr = key.Encode()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(minecraft)
}
// update world data
func (a *MinecraftApi) Put(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key, err := datastore.DecodeKey(minecraft.KeyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
minecraft.Key = key
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
entity.IPAddr = minecraft.IPAddr
entity.Zone = minecraft.Zone
entity.JarVersion = minecraft.JarVersion
entity.UpdatedAt = time.Now()
_, err = datastore.Put(ctx, key, &entity)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(minecraft)
}
// delete world data
func (a *MinecraftApi) Delete(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
keyStr := r.FormValue("key")
key, err := datastore.DecodeKey(keyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
return datastore.Delete(ctx, key)
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Delete Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(`{}`)
}
// list world data
func (a *MinecraftApi) List(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
q := datastore.NewQuery("Minecraft").Order("-UpdatedAt")
list := make([]*Minecraft, 0)
for t := q.Run(ctx); ; {
var entity Minecraft
key, err := t.Next(&entity)
if err == datastore.Done {
break
}
if err != nil {
log.Errorf(ctx, "Minecraft Query Error. error = %s", err.Error())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusInternalServerError)
return
}
entity.Key = key
entity.KeyStr = key.Encode()
list = append(list, &entity)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(list)
}
// handle cloud pub/sub request
func handlerMinecraftLog(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
for k, v := range r.Header {
log.Infof(ctx, "%s:%s", k, v)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Errorf(ctx, "ERROR request body read: %s", err)
w.WriteHeader(500)
return
}
log.Infof(ctx, "request body = %s", string(body))
var psb PubSubBody
err = psb.Decode(body)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Body decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
log.Infof(ctx, "request Pub Sub Body = %v", psb)
var psd PubSubData
err = psd.Decode(psb.Message.Data)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Data decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
b, err := json.Marshal(psd)
if err != nil {
log.Errorf(ctx, "PubSubData json marshal error %v", err)
} else {
log.Infof(ctx, "request Pub Sub Data = %s", b)
}
if len(psd.StructPayload.Log) < 1 {
log.Infof(ctx, "StructPayload.Log is eompy.")
w.WriteHeader(http.StatusOK)
return
}
var sm SlackMessage
fields := make([]SlackField, 0)
sa := SlackAttachment{
Color: "#36a64f",
AuthorName: "sinmetalcraft",
AuthorIcon: "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg",
Title: psd.StructPayload.Log,
Fields: fields,
}
sm.UserName = "sinmetalcraft"
sm.IconUrl = "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg"
sm.Text = ""
sm.Attachments = []SlackAttachment{sa}
acs := AppConfigService{}
config, err := acs.Get(ctx)
if err != nil {
log.Errorf(ctx, "ERROR App Config Get: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
_, err = PostToSlack(ctx, config.SlackPostUrl, sm)
if err != nil {
log.Errorf(ctx, "ERROR Post Slack: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
// list gce instance
func listInstance(ctx context.Context, is *compute.InstancesService, zone string) ([]*compute.Instance, string, error) {
ilc := is.List(PROJECT_NAME, zone)
il, err := ilc.Do()
if err != nil {
return nil, "", err
}
return il.Items, il.NextPageToken, nil
}
// create disk from snapshot
func createDiskFromSnapshot(ctx context.Context, ds *compute.DisksService, minecraft Minecraft) (*compute.Operation, error) {
name := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
d := &compute.Disk{
Name: name,
SizeGb: 100,
SourceSnapshot: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/snapshots/" + minecraft.LatestSnapshot,
Type: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
}
ope, err := ds.Insert(PROJECT_NAME, minecraft.Zone, d).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert disk: %s", err)
return nil, err
}
WriteLog(ctx, "INSTNCE_DISK_OPE", ope)
return ope, err
}
// create gce instance
func createInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
worldDiskName := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
log.Infof(ctx, "create instance name = %s", name)
startupScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-startup-script.sh"
shutdownScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-shutdown-script.sh"
stateValue := "new"
newIns := &compute.Instance{
Name: name,
Zone: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone,
MachineType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/machineTypes/n1-highmem-2",
Disks: []*compute.AttachedDisk{
&compute.AttachedDisk{
AutoDelete: true,
Boot: true,
DeviceName: name,
Mode: "READ_WRITE",
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/images/family/minecraft",
DiskType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
DiskSizeGb: 100,
},
},
&compute.AttachedDisk{
AutoDelete: true,
Boot: false,
DeviceName: worldDiskName,
Mode: "READ_WRITE",
Source: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/disks/" + worldDiskName,
},
},
CanIpForward: false,
NetworkInterfaces: []*compute.NetworkInterface{
&compute.NetworkInterface{
Network: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/networks/default",
AccessConfigs: []*compute.AccessConfig{
&compute.AccessConfig{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
NatIP: minecraft.IPAddr,
},
},
},
},
Tags: &compute.Tags{
Items: []string{
"minecraft-server",
},
},
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
&compute.MetadataItems{
Key: "startup-script-url",
Value: &startupScriptURL,
},
&compute.MetadataItems{
Key: "shutdown-script-url",
Value: &shutdownScriptURL,
},
&compute.MetadataItems{
Key: "world",
Value: &minecraft.World,
},
&compute.MetadataItems{
Key: "state",
Value: &stateValue,
},
&compute.MetadataItems{
Key: "minecraft-version",
Value: &minecraft.JarVersion,
},
},
},
ServiceAccounts: []*compute.ServiceAccount{
&compute.ServiceAccount{
Email: "default",
Scopes: []string{
"https://www.googleapis.com/auth/cloud-platform",
},
},
},
Scheduling: &compute.Scheduling{
AutomaticRestart: false,
OnHostMaintenance: "TERMINATE",
Preemptible: true,
},
}
ope, err := is.Insert(PROJECT_NAME, minecraft.Zone, newIns).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_CREATE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// start instance
func startInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "start instance name = %s", name)
ope, err := is.Start(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_START_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// reset instance
func resetInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "reset instance name = %s", name)
ope, err := is.Reset(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_RESET_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// delete instance
func deleteInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "delete instance name = %s", name)
ope, err := is.Delete(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR delete instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_DELETE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
func (psb *PubSubBody) Decode(body []byte) error {
err := json.Unmarshal(body, psb)
if err != nil {
return err
}
return nil
}
func (psd *PubSubData) Decode(body string) error {
mr := base64.NewDecoder(base64.StdEncoding, strings.NewReader(body))
return json.NewDecoder(mr).Decode(psd)
}
type SlackMessage struct {
UserName string `json:"username"`
IconUrl string `json:"icon_url"`
Text string `json:"text"`
Attachments []SlackAttachment `json:"attachments"`
}
type SlackAttachment struct {
Color string `json:"color"`
AuthorName string `json:"author_name"`
AuthorLink string `json:"author_link"`
AuthorIcon string `json:"author_icon"`
Title string `json:"title"`
TitleLink string `json:"title_link"`
Fields []SlackField `json:"fields"`
}
type SlackField struct {
Title string `json:"title"`
}
func PostToSlack(ctx context.Context, url string, message SlackMessage) (resp *http.Response, err error) {
client := urlfetch.Client(ctx)
body, err := json.Marshal(message)
if err != nil {
return nil, err
}
fmt.Println(string(body))
return client.Post(
url,
"application/json",
bytes.NewReader(body))
}
func WriteLog(ctx context.Context, key string, v interface{}) {
body, err := json.Marshal(v)
if err != nil {
log.Errorf(ctx, "WriteLog Error %s %v", err.Error(), v)
}
log.Infof(ctx, `{"%s":%s}`, key, body)
}
|
/*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acme
import (
"context"
"crypto/rsa"
"fmt"
"net/url"
"strings"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/acme"
"github.com/jetstack/cert-manager/pkg/acme/client"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util/errors"
"github.com/jetstack/cert-manager/pkg/util/pki"
acmeapi "github.com/jetstack/cert-manager/third_party/crypto/acme"
)
const (
errorAccountRegistrationFailed = "ErrRegisterACMEAccount"
errorAccountVerificationFailed = "ErrVerifyACMEAccount"
errorAccountUpdateFailed = "ErrUpdateACMEAccount"
successAccountRegistered = "ACMEAccountRegistered"
successAccountVerified = "ACMEAccountVerified"
messageAccountRegistrationFailed = "Failed to register ACME account: "
messageAccountVerificationFailed = "Failed to verify ACME account: "
messageAccountUpdateFailed = "Failed to update ACME account:"
messageAccountRegistered = "The ACME account was registered with the ACME server"
messageAccountVerified = "The ACME account was verified with the ACME server"
)
// Setup will verify an existing ACME registration, or create one if not
// already registered.
func (a *Acme) Setup(ctx context.Context) error {
log := logf.FromContext(ctx)
// check if user has specified a v1 account URL, and set a status condition if so.
if newURL, ok := acmev1ToV2Mappings[a.issuer.GetSpec().ACME.Server]; ok {
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, "InvalidConfig",
fmt.Sprintf("Your ACME server URL is set to a v1 endpoint (%s). "+
"You should update the spec.acme.server field to %q", a.issuer.GetSpec().ACME.Server, newURL))
// return nil so that Setup only gets called again after the spec is updated
return nil
}
// if the namespace field is not set, we are working on a ClusterIssuer resource
// therefore we should check for the ACME private key in the 'cluster resource namespace'.
ns := a.issuer.GetObjectMeta().Namespace
if ns == "" {
ns = a.IssuerOptions.ClusterResourceNamespace
}
log = logf.WithRelatedResourceName(log, a.issuer.GetSpec().ACME.PrivateKey.Name, ns, "Secret")
// attempt to obtain the existing private key from the apiserver.
// if it does not exist then we generate one
// if it contains invalid data, warn the user and return without error.
// if any other error occurs, return it and retry.
pk, err := a.helper.ReadPrivateKey(a.issuer.GetSpec().ACME.PrivateKey, ns)
switch {
case apierrors.IsNotFound(err):
log.Info("generating acme account private key")
pk, err = a.createAccountPrivateKey(a.issuer.GetSpec().ACME.PrivateKey, ns)
if err != nil {
s := messageAccountRegistrationFailed + err.Error()
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountRegistrationFailed, s)
return fmt.Errorf(s)
}
// We clear the ACME account URI as we have generated a new private key
a.issuer.GetStatus().ACMEStatus().URI = ""
case errors.IsInvalidData(err):
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, fmt.Sprintf("Account private key is invalid: %v", err))
return nil
case err != nil:
s := messageAccountVerificationFailed + err.Error()
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, s)
return fmt.Errorf(s)
}
acme.ClearClientCache()
cl, err := acme.ClientWithKey(a.issuer, pk)
if err != nil {
s := messageAccountVerificationFailed + err.Error()
log.Error(err, "failed to verify acme account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountVerificationFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, s)
return err
}
// TODO: perform a complex check to determine whether we need to verify
// the existing registration with the ACME server.
// This should take into account the ACME server URL, as well as a checksum
// of the private key's contents.
// Alternatively, we could add 'observed generation' fields here, tracking
// the most recent copy of the Issuer and Secret resource we have checked
// already.
rawServerURL := a.issuer.GetSpec().ACME.Server
parsedServerURL, err := url.Parse(rawServerURL)
if err != nil {
r := "InvalidURL"
s := fmt.Sprintf("Failed to parse existing ACME server URI %q: %v", rawServerURL, err)
a.Recorder.Eventf(a.issuer, v1.EventTypeWarning, r, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, r, s)
// absorb errors as retrying will not help resolve this error
return nil
}
rawAccountURL := a.issuer.GetStatus().ACMEStatus().URI
parsedAccountURL, err := url.Parse(rawAccountURL)
if err != nil {
r := "InvalidURL"
s := fmt.Sprintf("Failed to parse existing ACME account URI %q: %v", rawAccountURL, err)
a.Recorder.Eventf(a.issuer, v1.EventTypeWarning, r, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, r, s)
// absorb errors as retrying will not help resolve this error
return nil
}
hasReadyCondition := apiutil.IssuerHasCondition(a.issuer, v1alpha1.IssuerCondition{
Type: v1alpha1.IssuerConditionReady,
Status: v1alpha1.ConditionTrue,
})
// If the Host components of the server URL and the account URL match,
// and the cached email matches the registered email, then
// we skip re-checking the account status to save excess calls to the
// ACME api.
if hasReadyCondition &&
a.issuer.GetStatus().ACMEStatus().URI != "" &&
parsedAccountURL.Host == parsedServerURL.Host &&
a.issuer.GetStatus().ACMEStatus().LastRegisteredEmail == a.issuer.GetSpec().ACME.Email {
log.Info("skipping re-verifying ACME account as cached registration " +
"details look sufficient")
return nil
}
if parsedAccountURL.Host != parsedServerURL.Host {
log.Info("ACME server URL host and ACME private key registration " +
"host differ. Re-checking ACME account registration")
a.issuer.GetStatus().ACMEStatus().URI = ""
}
// registerAccount will also verify the account exists if it already
// exists.
account, err := a.registerAccount(ctx, cl)
if err != nil {
s := messageAccountVerificationFailed + err.Error()
log.Error(err, "failed to verify ACME account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountVerificationFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountRegistrationFailed, s)
acmeErr, ok := err.(*acmeapi.Error)
// If this is not an ACME error, we will simply return it and retry later
if !ok {
return err
}
// If the status code is 400 (BadRequest), we will *not* retry this registration
// as it implies that something about the request (i.e. email address or private key)
// is invalid.
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(acmeErr, "skipping retrying account registration as a "+
"BadRequest response was returned from the ACME server")
return nil
}
// Otherwise if we receive anything other than a 400, we will retry.
return err
}
// if we got an account successfully, we must check if the registered
// email is the same as in the issuer spec
// if no email was specified, then registeredEmail will remain empty
registeredEmail := ""
if len(account.Contact) > 0 {
registeredEmail = strings.Replace(account.Contact[0], "mailto:", "", 1)
}
// if they are different, we update the account
specEmail := a.issuer.GetSpec().ACME.Email
if registeredEmail != specEmail {
log.Info("Updating ACME account with email %s", specEmail)
emailurl := []string(nil)
if a.issuer.GetSpec().ACME.Email != "" {
emailurl = []string{fmt.Sprintf("mailto:%s", strings.ToLower(specEmail))}
}
account.Contact = emailurl
account, err = cl.UpdateAccount(ctx, account)
if err != nil {
s := messageAccountUpdateFailed + err.Error()
log.Error(err, "failed to update ACME account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountUpdateFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountUpdateFailed, s)
acmeErr, ok := err.(*acmeapi.Error)
// If this is not an ACME error, we will simply return it and retry later
if !ok {
return err
}
// If the status code is 400 (BadRequest), we will *not* retry this registration
// as it implies that something about the request (i.e. email address or private key)
// is invalid.
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(acmeErr, "skipping updating account email as a "+
"BadRequest response was returned from the ACME server")
return nil
}
// Otherwise if we receive anything other than a 400, we will retry.
return err
}
}
log.Info("verified existing registration with ACME server")
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionTrue, successAccountRegistered, messageAccountRegistered)
a.issuer.GetStatus().ACMEStatus().URI = account.URL
a.issuer.GetStatus().ACMEStatus().LastRegisteredEmail = registeredEmail
return nil
}
// registerAccount will register a new ACME account with the server. If an
// account with the clients private key already exists, it will attempt to look
// up and verify the corresponding account, and will return that. If this fails
// due to a not found error it will register a new account with the given key.
func (a *Acme) registerAccount(ctx context.Context, cl client.Interface) (*acmeapi.Account, error) {
// check if the account already exists
acc, err := cl.GetAccount(ctx)
if err == nil {
return acc, nil
}
// return all errors except for 404 errors (which indicate the account
// is not yet registered)
acmeErr, ok := err.(*acmeapi.Error)
if !ok || (acmeErr.StatusCode != 400 && acmeErr.StatusCode != 404) {
return nil, err
}
emailurl := []string(nil)
if a.issuer.GetSpec().ACME.Email != "" {
emailurl = []string{fmt.Sprintf("mailto:%s", strings.ToLower(a.issuer.GetSpec().ACME.Email))}
}
acc = &acmeapi.Account{
Contact: emailurl,
TermsAgreed: true,
}
acc, err = cl.CreateAccount(ctx, acc)
if err != nil {
return nil, err
}
// TODO: re-enable this check once this field is set by Pebble
// if acc.Status != acme.StatusValid {
// return nil, fmt.Errorf("acme account is not valid")
// }
return acc, nil
}
// createAccountPrivateKey will generate a new RSA private key, and create it
// as a secret resource in the apiserver.
func (a *Acme) createAccountPrivateKey(sel v1alpha1.SecretKeySelector, ns string) (*rsa.PrivateKey, error) {
sel = acme.PrivateKeySelector(sel)
accountPrivKey, err := pki.GenerateRSAPrivateKey(pki.MinRSAKeySize)
if err != nil {
return nil, err
}
_, err = a.Client.CoreV1().Secrets(ns).Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sel.Name,
Namespace: ns,
},
Data: map[string][]byte{
sel.Key: pki.EncodePKCS1PrivateKey(accountPrivKey),
},
})
if err != nil {
return nil, err
}
return accountPrivKey, err
}
var acmev1ToV2Mappings = map[string]string{
"https://acme-v01.api.letsencrypt.org/directory": "https://acme-v02.api.letsencrypt.org/directory",
"https://acme-staging.api.letsencrypt.org/directory": "https://acme-staging-v02.api.letsencrypt.org/directory",
"https://acme-v01.api.letsencrypt.org/directory/": "https://acme-v02.api.letsencrypt.org/directory",
"https://acme-staging.api.letsencrypt.org/directory/": "https://acme-staging-v02.api.letsencrypt.org/directory",
}
acme: fixup bugs with email updating
Signed-off-by: James Munnelly <474ba67bdb289c6263b36dfd8a7bed6c85b04943@munnelly.eu>
/*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acme
import (
"context"
"crypto/rsa"
"fmt"
"net/url"
"strings"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/acme"
"github.com/jetstack/cert-manager/pkg/acme/client"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util/errors"
"github.com/jetstack/cert-manager/pkg/util/pki"
acmeapi "github.com/jetstack/cert-manager/third_party/crypto/acme"
)
const (
errorAccountRegistrationFailed = "ErrRegisterACMEAccount"
errorAccountVerificationFailed = "ErrVerifyACMEAccount"
errorAccountUpdateFailed = "ErrUpdateACMEAccount"
successAccountRegistered = "ACMEAccountRegistered"
successAccountVerified = "ACMEAccountVerified"
messageAccountRegistrationFailed = "Failed to register ACME account: "
messageAccountVerificationFailed = "Failed to verify ACME account: "
messageAccountUpdateFailed = "Failed to update ACME account:"
messageAccountRegistered = "The ACME account was registered with the ACME server"
messageAccountVerified = "The ACME account was verified with the ACME server"
)
// Setup will verify an existing ACME registration, or create one if not
// already registered.
func (a *Acme) Setup(ctx context.Context) error {
log := logf.FromContext(ctx)
// check if user has specified a v1 account URL, and set a status condition if so.
if newURL, ok := acmev1ToV2Mappings[a.issuer.GetSpec().ACME.Server]; ok {
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, "InvalidConfig",
fmt.Sprintf("Your ACME server URL is set to a v1 endpoint (%s). "+
"You should update the spec.acme.server field to %q", a.issuer.GetSpec().ACME.Server, newURL))
// return nil so that Setup only gets called again after the spec is updated
return nil
}
// if the namespace field is not set, we are working on a ClusterIssuer resource
// therefore we should check for the ACME private key in the 'cluster resource namespace'.
ns := a.issuer.GetObjectMeta().Namespace
if ns == "" {
ns = a.IssuerOptions.ClusterResourceNamespace
}
log = logf.WithRelatedResourceName(log, a.issuer.GetSpec().ACME.PrivateKey.Name, ns, "Secret")
// attempt to obtain the existing private key from the apiserver.
// if it does not exist then we generate one
// if it contains invalid data, warn the user and return without error.
// if any other error occurs, return it and retry.
pk, err := a.helper.ReadPrivateKey(a.issuer.GetSpec().ACME.PrivateKey, ns)
switch {
case apierrors.IsNotFound(err):
log.Info("generating acme account private key")
pk, err = a.createAccountPrivateKey(a.issuer.GetSpec().ACME.PrivateKey, ns)
if err != nil {
s := messageAccountRegistrationFailed + err.Error()
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountRegistrationFailed, s)
return fmt.Errorf(s)
}
// We clear the ACME account URI as we have generated a new private key
a.issuer.GetStatus().ACMEStatus().URI = ""
case errors.IsInvalidData(err):
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, fmt.Sprintf("Account private key is invalid: %v", err))
return nil
case err != nil:
s := messageAccountVerificationFailed + err.Error()
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, s)
return fmt.Errorf(s)
}
acme.ClearClientCache()
cl, err := acme.ClientWithKey(a.issuer, pk)
if err != nil {
s := messageAccountVerificationFailed + err.Error()
log.Error(err, "failed to verify acme account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountVerificationFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountVerificationFailed, s)
return err
}
// TODO: perform a complex check to determine whether we need to verify
// the existing registration with the ACME server.
// This should take into account the ACME server URL, as well as a checksum
// of the private key's contents.
// Alternatively, we could add 'observed generation' fields here, tracking
// the most recent copy of the Issuer and Secret resource we have checked
// already.
rawServerURL := a.issuer.GetSpec().ACME.Server
parsedServerURL, err := url.Parse(rawServerURL)
if err != nil {
r := "InvalidURL"
s := fmt.Sprintf("Failed to parse existing ACME server URI %q: %v", rawServerURL, err)
a.Recorder.Eventf(a.issuer, v1.EventTypeWarning, r, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, r, s)
// absorb errors as retrying will not help resolve this error
return nil
}
rawAccountURL := a.issuer.GetStatus().ACMEStatus().URI
parsedAccountURL, err := url.Parse(rawAccountURL)
if err != nil {
r := "InvalidURL"
s := fmt.Sprintf("Failed to parse existing ACME account URI %q: %v", rawAccountURL, err)
a.Recorder.Eventf(a.issuer, v1.EventTypeWarning, r, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, r, s)
// absorb errors as retrying will not help resolve this error
return nil
}
hasReadyCondition := apiutil.IssuerHasCondition(a.issuer, v1alpha1.IssuerCondition{
Type: v1alpha1.IssuerConditionReady,
Status: v1alpha1.ConditionTrue,
})
// If the Host components of the server URL and the account URL match,
// and the cached email matches the registered email, then
// we skip re-checking the account status to save excess calls to the
// ACME api.
if hasReadyCondition &&
a.issuer.GetStatus().ACMEStatus().URI != "" &&
parsedAccountURL.Host == parsedServerURL.Host &&
a.issuer.GetStatus().ACMEStatus().LastRegisteredEmail == a.issuer.GetSpec().ACME.Email {
log.Info("skipping re-verifying ACME account as cached registration " +
"details look sufficient")
return nil
}
if parsedAccountURL.Host != parsedServerURL.Host {
log.Info("ACME server URL host and ACME private key registration " +
"host differ. Re-checking ACME account registration")
a.issuer.GetStatus().ACMEStatus().URI = ""
}
// registerAccount will also verify the account exists if it already
// exists.
account, err := a.registerAccount(ctx, cl)
if err != nil {
s := messageAccountVerificationFailed + err.Error()
log.Error(err, "failed to verify ACME account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountVerificationFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountRegistrationFailed, s)
acmeErr, ok := err.(*acmeapi.Error)
// If this is not an ACME error, we will simply return it and retry later
if !ok {
return err
}
// If the status code is 400 (BadRequest), we will *not* retry this registration
// as it implies that something about the request (i.e. email address or private key)
// is invalid.
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(acmeErr, "skipping retrying account registration as a "+
"BadRequest response was returned from the ACME server")
return nil
}
// Otherwise if we receive anything other than a 400, we will retry.
return err
}
// if we got an account successfully, we must check if the registered
// email is the same as in the issuer spec
// if no email was specified, then registeredEmail will remain empty
registeredEmail := ""
if len(account.Contact) > 0 {
registeredEmail = strings.Replace(account.Contact[0], "mailto:", "", 1)
}
// if they are different, we update the account
specEmail := a.issuer.GetSpec().ACME.Email
if registeredEmail != specEmail {
log.Info("updating ACME account email address", "email", specEmail)
emailurl := []string(nil)
if a.issuer.GetSpec().ACME.Email != "" {
emailurl = []string{fmt.Sprintf("mailto:%s", strings.ToLower(specEmail))}
}
account.Contact = emailurl
account, err = cl.UpdateAccount(ctx, account)
if err != nil {
s := messageAccountUpdateFailed + err.Error()
log.Error(err, "failed to update ACME account")
a.Recorder.Event(a.issuer, v1.EventTypeWarning, errorAccountUpdateFailed, s)
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionFalse, errorAccountUpdateFailed, s)
acmeErr, ok := err.(*acmeapi.Error)
// If this is not an ACME error, we will simply return it and retry later
if !ok {
return err
}
// If the status code is 400 (BadRequest), we will *not* retry this registration
// as it implies that something about the request (i.e. email address or private key)
// is invalid.
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(acmeErr, "skipping updating account email as a "+
"BadRequest response was returned from the ACME server")
return nil
}
// Otherwise if we receive anything other than a 400, we will retry.
return err
}
// update the registeredEmail var so it is updated properly in the status below
registeredEmail = specEmail
}
log.Info("verified existing registration with ACME server")
apiutil.SetIssuerCondition(a.issuer, v1alpha1.IssuerConditionReady, v1alpha1.ConditionTrue, successAccountRegistered, messageAccountRegistered)
a.issuer.GetStatus().ACMEStatus().URI = account.URL
a.issuer.GetStatus().ACMEStatus().LastRegisteredEmail = registeredEmail
return nil
}
// registerAccount will register a new ACME account with the server. If an
// account with the clients private key already exists, it will attempt to look
// up and verify the corresponding account, and will return that. If this fails
// due to a not found error it will register a new account with the given key.
func (a *Acme) registerAccount(ctx context.Context, cl client.Interface) (*acmeapi.Account, error) {
// check if the account already exists
acc, err := cl.GetAccount(ctx)
if err == nil {
return acc, nil
}
// return all errors except for 404 errors (which indicate the account
// is not yet registered)
acmeErr, ok := err.(*acmeapi.Error)
if !ok || (acmeErr.StatusCode != 400 && acmeErr.StatusCode != 404) {
return nil, err
}
emailurl := []string(nil)
if a.issuer.GetSpec().ACME.Email != "" {
emailurl = []string{fmt.Sprintf("mailto:%s", strings.ToLower(a.issuer.GetSpec().ACME.Email))}
}
acc = &acmeapi.Account{
Contact: emailurl,
TermsAgreed: true,
}
acc, err = cl.CreateAccount(ctx, acc)
if err != nil {
return nil, err
}
// TODO: re-enable this check once this field is set by Pebble
// if acc.Status != acme.StatusValid {
// return nil, fmt.Errorf("acme account is not valid")
// }
return acc, nil
}
// createAccountPrivateKey will generate a new RSA private key, and create it
// as a secret resource in the apiserver.
func (a *Acme) createAccountPrivateKey(sel v1alpha1.SecretKeySelector, ns string) (*rsa.PrivateKey, error) {
sel = acme.PrivateKeySelector(sel)
accountPrivKey, err := pki.GenerateRSAPrivateKey(pki.MinRSAKeySize)
if err != nil {
return nil, err
}
_, err = a.Client.CoreV1().Secrets(ns).Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sel.Name,
Namespace: ns,
},
Data: map[string][]byte{
sel.Key: pki.EncodePKCS1PrivateKey(accountPrivKey),
},
})
if err != nil {
return nil, err
}
return accountPrivKey, err
}
var acmev1ToV2Mappings = map[string]string{
"https://acme-v01.api.letsencrypt.org/directory": "https://acme-v02.api.letsencrypt.org/directory",
"https://acme-staging.api.letsencrypt.org/directory": "https://acme-staging-v02.api.letsencrypt.org/directory",
"https://acme-v01.api.letsencrypt.org/directory/": "https://acme-v02.api.letsencrypt.org/directory",
"https://acme-staging.api.letsencrypt.org/directory/": "https://acme-staging-v02.api.letsencrypt.org/directory",
}
|
package v3
import (
"encoding/json"
"fmt"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/generator"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/helpers"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/assets"
. "github.com/cloudfoundry/cf-acceptance-tests/helpers/v3_helpers"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/ginkgo"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/gomega"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/gomega/gexec"
)
var _ = Describe("v3 tasks", func() {
type Result struct {
FailureReason string `json:"failure_reason"`
}
type Task struct {
Guid string `json:"guid"`
Command string `json:"command"`
Name string `json:"name"`
State string `json:"state"`
Result Result `json:"result"`
}
var (
appName string
appGuid string
packageGuid string
spaceGuid string
appCreationEnvironmentVariables string
)
BeforeEach(func() {
appName = generator.PrefixedRandomName("CATS-APP-")
spaceGuid = GetSpaceGuidFromName(context.RegularUserContext().Space)
appCreationEnvironmentVariables = `"foo"=>"bar"`
appGuid = CreateApp(appName, spaceGuid, `{"foo":"bar"}`)
packageGuid = CreatePackage(appGuid)
token := GetAuthToken()
uploadUrl := fmt.Sprintf("%s/v3/packages/%s/upload", config.ApiEndpoint, packageGuid)
UploadPackage(uploadUrl, assets.NewAssets().DoraZip, token)
WaitForPackageToBeReady(packageGuid)
dropletGuid := StagePackage(packageGuid, "{}")
WaitForDropletToStage(dropletGuid)
AssignDropletToApp(appGuid, dropletGuid)
})
AfterEach(func() {
DeleteApp(appGuid)
})
config := helpers.LoadConfig()
if config.IncludeTasks {
Context("tasks lifecycle", func() {
It("can successfully create and run a task", func() {
By("creating the task")
var createOutput Task
postBody := `{"command": "echo 0", "name": "mreow"}`
createCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks", appGuid), "-X", "POST", "-d", postBody).Wait(DEFAULT_TIMEOUT)
Expect(createCommand).To(Exit(0))
err := json.Unmarshal(createCommand.Out.Contents(), &createOutput)
Expect(err).NotTo(HaveOccurred())
Expect(createOutput.Command).To(Equal("echo 0"))
Expect(createOutput.Name).To(Equal("mreow"))
Expect(createOutput.State).To(Equal("RUNNING"))
By("TASK_STARTED AppUsageEvent")
usageEvents := LastPageUsageEvents(context)
start_event := AppUsageEvent{Entity{State: "TASK_STARTED", ParentAppGuid: appGuid, ParentAppName: appName, TaskGuid: createOutput.Guid}}
Expect(UsageEventsInclude(usageEvents, start_event)).To(BeTrue())
By("successfully running")
var readOutput Task
Eventually(func() string {
readCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s", appGuid, createOutput.Guid), "-X", "GET").Wait(DEFAULT_TIMEOUT)
Expect(readCommand).To(Exit(0))
err := json.Unmarshal(readCommand.Out.Contents(), &readOutput)
Expect(err).NotTo(HaveOccurred())
return readOutput.State
}, DEFAULT_TIMEOUT).Should(Equal("SUCCEEDED"))
By("TASK_STOPPED AppUsageEvent")
usageEvents = LastPageUsageEvents(context)
stop_event := AppUsageEvent{Entity{State: "TASK_STOPPED", ParentAppGuid: appGuid, ParentAppName: appName, TaskGuid: createOutput.Guid}}
Expect(UsageEventsInclude(usageEvents, stop_event)).To(BeTrue())
})
})
Context("When canceling a task", func() {
var taskGuid string
BeforeEach(func() {
postBody := `{"command": "sleep 100;", "name": "mreow"}`
createCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks", appGuid), "-X", "POST", "-d", postBody).Wait(DEFAULT_TIMEOUT)
Expect(createCommand).To(Exit(0))
var createOutput Task
err := json.Unmarshal(createCommand.Out.Contents(), &createOutput)
Expect(err).NotTo(HaveOccurred())
Expect(createOutput.Guid).NotTo(Equal(""))
taskGuid = createOutput.Guid
})
It("should show task is in FAILED state", func() {
var failureReason string
cancelCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s/cancel", appGuid, taskGuid), "-X", "PUT").Wait(DEFAULT_TIMEOUT)
Expect(cancelCommand).To(Exit(0))
Eventually(func() string {
readCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s", appGuid, taskGuid), "-X", "GET").Wait(DEFAULT_TIMEOUT)
Expect(readCommand).To(Exit(0))
var readOutput Task
err := json.Unmarshal(readCommand.Out.Contents(), &readOutput)
Expect(err).NotTo(HaveOccurred())
failureReason = readOutput.Result.FailureReason
return readOutput.State
}, DEFAULT_TIMEOUT).Should(Equal("FAILED"))
Expect(failureReason).To(Equal("task was cancelled"))
})
})
}
})
temporarily pend task canceling tests
package v3
import (
"encoding/json"
"fmt"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/generator"
"github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/cloudfoundry-incubator/cf-test-helpers/helpers"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/assets"
. "github.com/cloudfoundry/cf-acceptance-tests/helpers/v3_helpers"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/ginkgo"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/gomega"
. "github.com/cloudfoundry/cf-acceptance-tests/Godeps/_workspace/src/github.com/onsi/gomega/gexec"
)
var _ = Describe("v3 tasks", func() {
type Result struct {
FailureReason string `json:"failure_reason"`
}
type Task struct {
Guid string `json:"guid"`
Command string `json:"command"`
Name string `json:"name"`
State string `json:"state"`
Result Result `json:"result"`
}
var (
appName string
appGuid string
packageGuid string
spaceGuid string
appCreationEnvironmentVariables string
)
BeforeEach(func() {
appName = generator.PrefixedRandomName("CATS-APP-")
spaceGuid = GetSpaceGuidFromName(context.RegularUserContext().Space)
appCreationEnvironmentVariables = `"foo"=>"bar"`
appGuid = CreateApp(appName, spaceGuid, `{"foo":"bar"}`)
packageGuid = CreatePackage(appGuid)
token := GetAuthToken()
uploadUrl := fmt.Sprintf("%s/v3/packages/%s/upload", config.ApiEndpoint, packageGuid)
UploadPackage(uploadUrl, assets.NewAssets().DoraZip, token)
WaitForPackageToBeReady(packageGuid)
dropletGuid := StagePackage(packageGuid, "{}")
WaitForDropletToStage(dropletGuid)
AssignDropletToApp(appGuid, dropletGuid)
})
AfterEach(func() {
DeleteApp(appGuid)
})
config := helpers.LoadConfig()
if config.IncludeTasks {
Context("tasks lifecycle", func() {
It("can successfully create and run a task", func() {
By("creating the task")
var createOutput Task
postBody := `{"command": "echo 0", "name": "mreow"}`
createCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks", appGuid), "-X", "POST", "-d", postBody).Wait(DEFAULT_TIMEOUT)
Expect(createCommand).To(Exit(0))
err := json.Unmarshal(createCommand.Out.Contents(), &createOutput)
Expect(err).NotTo(HaveOccurred())
Expect(createOutput.Command).To(Equal("echo 0"))
Expect(createOutput.Name).To(Equal("mreow"))
Expect(createOutput.State).To(Equal("RUNNING"))
By("TASK_STARTED AppUsageEvent")
usageEvents := LastPageUsageEvents(context)
start_event := AppUsageEvent{Entity{State: "TASK_STARTED", ParentAppGuid: appGuid, ParentAppName: appName, TaskGuid: createOutput.Guid}}
Expect(UsageEventsInclude(usageEvents, start_event)).To(BeTrue())
By("successfully running")
var readOutput Task
Eventually(func() string {
readCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s", appGuid, createOutput.Guid), "-X", "GET").Wait(DEFAULT_TIMEOUT)
Expect(readCommand).To(Exit(0))
err := json.Unmarshal(readCommand.Out.Contents(), &readOutput)
Expect(err).NotTo(HaveOccurred())
return readOutput.State
}, DEFAULT_TIMEOUT).Should(Equal("SUCCEEDED"))
By("TASK_STOPPED AppUsageEvent")
usageEvents = LastPageUsageEvents(context)
stop_event := AppUsageEvent{Entity{State: "TASK_STOPPED", ParentAppGuid: appGuid, ParentAppName: appName, TaskGuid: createOutput.Guid}}
Expect(UsageEventsInclude(usageEvents, stop_event)).To(BeTrue())
})
})
XContext("When canceling a task", func() {
var taskGuid string
BeforeEach(func() {
postBody := `{"command": "sleep 100;", "name": "mreow"}`
createCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks", appGuid), "-X", "POST", "-d", postBody).Wait(DEFAULT_TIMEOUT)
Expect(createCommand).To(Exit(0))
var createOutput Task
err := json.Unmarshal(createCommand.Out.Contents(), &createOutput)
Expect(err).NotTo(HaveOccurred())
Expect(createOutput.Guid).NotTo(Equal(""))
taskGuid = createOutput.Guid
})
It("should show task is in FAILED state", func() {
var failureReason string
cancelCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s/cancel", appGuid, taskGuid), "-X", "PUT").Wait(DEFAULT_TIMEOUT)
Expect(cancelCommand).To(Exit(0))
Eventually(func() string {
readCommand := cf.Cf("curl", fmt.Sprintf("/v3/apps/%s/tasks/%s", appGuid, taskGuid), "-X", "GET").Wait(DEFAULT_TIMEOUT)
Expect(readCommand).To(Exit(0))
var readOutput Task
err := json.Unmarshal(readCommand.Out.Contents(), &readOutput)
Expect(err).NotTo(HaveOccurred())
failureReason = readOutput.Result.FailureReason
return readOutput.State
}, DEFAULT_TIMEOUT).Should(Equal("FAILED"))
Expect(failureReason).To(Equal("task was cancelled"))
})
})
}
})
|
//Copyright 2015 TF2Stadium. All rights reserved.
//Use of this source code is governed by the MIT
//that can be found in the LICENSE file.
//Package wsevent implements thread-safe event-driven communication similar to socket.IO,
//on the top of Gorilla's WebSocket implementation.
package wsevent
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"log"
"net/http"
"sync"
ws "github.com/gorilla/websocket"
)
//Client
type Client struct {
//Session ID
id string
conn *ws.Conn
connLock *sync.RWMutex
request *http.Request
}
//Server
type Server struct {
//maps room string to a list of clients in it
rooms map[string]([]*Client)
roomsLock *sync.RWMutex
//maps client IDs to the list of rooms the corresponding client has joined
joinedRooms map[string][]string
joinedRoomsLock *sync.RWMutex
//The extractor function reads the byte array and the message type
//and returns the event represented by the message.
Extractor func(string) string
//Called when the websocket connection closes. The disconnected client's
//session ID is sent as an argument
OnDisconnect func(string)
//Called when no event handler for a specific event exists
DefaultHandler func(*Server, *Client, []byte) []byte
handlers map[string]func(*Server, *Client, []byte) []byte
handlersLock *sync.RWMutex
newClient chan *Client
}
func genID() string {
bytes := make([]byte, 32)
rand.Read(bytes)
return base64.URLEncoding.EncodeToString(bytes)
}
//Returns the client's unique session ID
func (c *Client) Id() string {
return c.id
}
// Returns the first http request when established connection.
func (c *Client) Request() *http.Request {
return c.request
}
func (s *Server) NewClientWithID(upgrader ws.Upgrader, w http.ResponseWriter, r *http.Request, id string) (*Client, error) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return nil, err
}
client := &Client{
id: id,
conn: conn,
connLock: new(sync.RWMutex),
request: r,
}
s.newClient <- client
return client, nil
}
func (s *Server) NewClient(upgrader ws.Upgrader, w http.ResponseWriter, r *http.Request) (*Client, error) {
return s.NewClientWithID(upgrader, w, r, genID())
}
func (c *Client) Close() error {
c.connLock.Lock()
defer c.connLock.Unlock()
return c.conn.Close()
}
//A thread-safe variant of WriteMessage
func (c *Client) Emit(data string) error {
c.connLock.Lock()
defer c.connLock.Unlock()
return c.conn.WriteMessage(ws.TextMessage, []byte(data))
}
//A thread-safe variant of EmitJSON
func (c *Client) EmitJSON(v interface{}) error {
c.connLock.Lock()
defer c.connLock.Unlock()
js := struct {
Id int `json:"id"`
Data interface{} `json:"data"`
}{-1, v}
return c.conn.WriteJSON(js)
}
//Return a new server object
func NewServer() *Server {
s := &Server{
rooms: make(map[string]([]*Client)),
roomsLock: new(sync.RWMutex),
//Maps socket ID -> list of rooms the client is in
joinedRooms: make(map[string][]string),
joinedRoomsLock: new(sync.RWMutex),
handlers: make(map[string](func(*Server, *Client, []byte) []byte)),
handlersLock: new(sync.RWMutex),
newClient: make(chan *Client),
}
go s.listener()
return s
}
//Add a client c to room r
func (s *Server) AddClient(c *Client, r string) {
s.joinedRoomsLock.RLock()
for _, room := range s.joinedRooms[c.id] {
if r == room {
//log.Printf("%s already in room %s", c.id, r)
s.joinedRoomsLock.RUnlock()
return
}
}
s.joinedRoomsLock.RUnlock()
s.roomsLock.Lock()
defer s.roomsLock.Unlock()
s.rooms[r] = append(s.rooms[r], c)
s.joinedRoomsLock.Lock()
defer s.joinedRoomsLock.Unlock()
s.joinedRooms[c.id] = append(s.joinedRooms[c.id], r)
//log.Printf("Added %s to room %s", c.id, r)
}
//Remove client c from room r
func (s *Server) RemoveClient(id, r string) {
index := -1
s.roomsLock.Lock()
for i, client := range s.rooms[r] {
if id == client.id {
index = i
break
}
}
if index == -1 {
//log.Printf("Client %s not found in room %s", id, r)
s.roomsLock.Unlock()
return
}
s.rooms[r] = append(s.rooms[r][:index], s.rooms[r][index+1:]...)
s.roomsLock.Unlock()
index = -1
s.joinedRoomsLock.Lock()
defer s.joinedRoomsLock.Unlock()
for i, room := range s.joinedRooms[id] {
if room == r {
index = i
}
}
if index == -1 {
return
}
s.joinedRooms[id] = append(s.joinedRooms[id][:index], s.joinedRooms[id][index+1:]...)
}
//Send all clients in room room data
func (s *Server) Broadcast(room string, data string) {
s.roomsLock.RLock()
for _, client := range s.rooms[room] {
//log.Printf("sending to %s in room %s\n", client.id, room)
go func(c *Client) {
c.Emit(data)
}(client)
}
s.roomsLock.RUnlock()
}
func (s *Server) BroadcastJSON(room string, v interface{}) {
s.roomsLock.RLock()
for _, client := range s.rooms[room] {
//log.Printf("sending to %s %s\n", client.id, room)
go func(c *Client) {
c.EmitJSON(v)
}(client)
}
s.roomsLock.RUnlock()
}
func (c *Client) cleanup(s *Server) {
c.conn.Close()
s.joinedRoomsLock.RLock()
for _, room := range s.joinedRooms[c.id] {
//log.Println(room)
index := -1
s.roomsLock.Lock()
for i, client := range s.rooms[room] {
if client.id == c.id {
index = i
}
}
s.rooms[room] = append(s.rooms[room][:index], s.rooms[room][index+1:]...)
s.roomsLock.Unlock()
}
s.joinedRoomsLock.RUnlock()
s.joinedRoomsLock.Lock()
delete(s.joinedRooms, c.id)
s.joinedRoomsLock.Unlock()
if s.OnDisconnect != nil {
s.OnDisconnect(c.id)
}
}
//Returns an array of rooms the client c has been added to
func (s *Server) RoomsJoined(id string) []string {
rooms := make([]string, len(s.joinedRooms[id]))
s.joinedRoomsLock.RLock()
defer s.joinedRoomsLock.RUnlock()
copy(rooms, s.joinedRooms[id])
return rooms
}
type request struct {
Id string
Data json.RawMessage
}
type reply struct {
Id string `json:"id"`
Data string `json:"data,string"`
}
var (
reqPool = &sync.Pool{New: func() interface{} { return request{} }}
replyPool = &sync.Pool{New: func() interface{} { return reply{} }}
)
func (c *Client) listener(s *Server) {
for {
mtype, data, err := c.conn.ReadMessage()
if err != nil {
c.cleanup(s)
return
}
js := reqPool.Get().(request)
err = json.Unmarshal(data, &js)
if err != nil || mtype != ws.TextMessage {
log.Println(err)
continue
}
callName := s.Extractor(string(js.Data))
s.handlersLock.RLock()
f, ok := s.handlers[callName]
s.handlersLock.RUnlock()
if !ok {
if s.DefaultHandler != nil {
f = s.DefaultHandler
goto call
}
continue
}
call:
go func() {
rtrn := f(s, c, js.Data)
replyJs := replyPool.Get().(reply)
replyJs.Id = js.Id
replyJs.Data = string(rtrn)
bytes, _ := json.Marshal(replyJs)
c.Emit(string(bytes))
reqPool.Put(js)
replyPool.Put(replyJs)
}()
}
}
func (s *Server) listener() {
for {
c := <-s.newClient
go c.listener(s)
}
}
//Registers a callback for the event string. The callback must take 2 arguments,
//The client from which the message was received and the string message itself.
func (s *Server) On(event string, f func(*Server, *Client, []byte) []byte) {
s.handlersLock.Lock()
s.handlers[event] = f
s.handlersLock.Unlock()
}
Use correct locks, sync.Pool
//Copyright 2015 TF2Stadium. All rights reserved.
//Use of this source code is governed by the MIT
//that can be found in the LICENSE file.
//Package wsevent implements thread-safe event-driven communication similar to socket.IO,
//on the top of Gorilla's WebSocket implementation.
package wsevent
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"log"
"net/http"
"sync"
ws "github.com/gorilla/websocket"
)
//Client
type Client struct {
//Session ID
id string
conn *ws.Conn
connLock *sync.RWMutex
request *http.Request
}
//Server
type Server struct {
//maps room string to a list of clients in it
rooms map[string]([]*Client)
roomsLock *sync.RWMutex
//maps client IDs to the list of rooms the corresponding client has joined
joinedRooms map[string][]string
joinedRoomsLock *sync.RWMutex
//The extractor function reads the byte array and the message type
//and returns the event represented by the message.
Extractor func(string) string
//Called when the websocket connection closes. The disconnected client's
//session ID is sent as an argument
OnDisconnect func(string)
//Called when no event handler for a specific event exists
DefaultHandler func(*Server, *Client, []byte) []byte
handlers map[string]func(*Server, *Client, []byte) []byte
handlersLock *sync.RWMutex
newClient chan *Client
}
func genID() string {
bytes := make([]byte, 32)
rand.Read(bytes)
return base64.URLEncoding.EncodeToString(bytes)
}
//Returns the client's unique session ID
func (c *Client) Id() string {
return c.id
}
// Returns the first http request when established connection.
func (c *Client) Request() *http.Request {
return c.request
}
func (s *Server) NewClientWithID(upgrader ws.Upgrader, w http.ResponseWriter, r *http.Request, id string) (*Client, error) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return nil, err
}
client := &Client{
id: id,
conn: conn,
connLock: new(sync.RWMutex),
request: r,
}
s.newClient <- client
return client, nil
}
func (s *Server) NewClient(upgrader ws.Upgrader, w http.ResponseWriter, r *http.Request) (*Client, error) {
return s.NewClientWithID(upgrader, w, r, genID())
}
func (c *Client) Close() error {
c.connLock.Lock()
defer c.connLock.Unlock()
return c.conn.Close()
}
//A thread-safe variant of WriteMessage
func (c *Client) Emit(data string) error {
c.connLock.Lock()
defer c.connLock.Unlock()
return c.conn.WriteMessage(ws.TextMessage, []byte(data))
}
type emitJS struct {
Id int `json:"id"`
Data interface{} `json:"data"`
}
var emitPool = &sync.Pool{New: func() interface{} { return emitJS{} }}
//A thread-safe variant of EmitJSON
func (c *Client) EmitJSON(v interface{}) error {
c.connLock.Lock()
defer c.connLock.Unlock()
js := emitPool.Get().(emitJS)
defer emitPool.Put(js)
return c.conn.WriteJSON(js)
}
//Return a new server object
func NewServer() *Server {
s := &Server{
rooms: make(map[string]([]*Client)),
roomsLock: new(sync.RWMutex),
//Maps socket ID -> list of rooms the client is in
joinedRooms: make(map[string][]string),
joinedRoomsLock: new(sync.RWMutex),
handlers: make(map[string](func(*Server, *Client, []byte) []byte)),
handlersLock: new(sync.RWMutex),
newClient: make(chan *Client),
}
go s.listener()
return s
}
//Add a client c to room r
func (s *Server) AddClient(c *Client, r string) {
s.joinedRoomsLock.RLock()
for _, room := range s.joinedRooms[c.id] {
if r == room {
//log.Printf("%s already in room %s", c.id, r)
s.joinedRoomsLock.RUnlock()
return
}
}
s.joinedRoomsLock.RUnlock()
s.roomsLock.Lock()
defer s.roomsLock.Unlock()
s.rooms[r] = append(s.rooms[r], c)
s.joinedRoomsLock.Lock()
defer s.joinedRoomsLock.Unlock()
s.joinedRooms[c.id] = append(s.joinedRooms[c.id], r)
//log.Printf("Added %s to room %s", c.id, r)
}
//Remove client c from room r
func (s *Server) RemoveClient(id, r string) {
index := -1
s.roomsLock.RLock()
for i, client := range s.rooms[r] {
if id == client.id {
index = i
break
}
}
s.roomsLock.RUnlock()
if index == -1 {
//log.Printf("Client %s not found in room %s", id, r)
return
}
s.roomsLock.Lock()
s.rooms[r] = append(s.rooms[r][:index], s.rooms[r][index+1:]...)
s.roomsLock.Unlock()
index = -1
s.joinedRoomsLock.RLock()
for i, room := range s.joinedRooms[id] {
if room == r {
index = i
}
}
s.joinedRoomsLock.RUnlock()
if index == -1 {
return
}
s.joinedRoomsLock.Lock()
defer s.joinedRoomsLock.Unlock()
s.joinedRooms[id] = append(s.joinedRooms[id][:index], s.joinedRooms[id][index+1:]...)
}
//Send all clients in room room data
func (s *Server) Broadcast(room string, data string) {
s.roomsLock.RLock()
for _, client := range s.rooms[room] {
//log.Printf("sending to %s in room %s\n", client.id, room)
go func(c *Client) {
c.Emit(data)
}(client)
}
s.roomsLock.RUnlock()
}
func (s *Server) BroadcastJSON(room string, v interface{}) {
s.roomsLock.RLock()
for _, client := range s.rooms[room] {
//log.Printf("sending to %s %s\n", client.id, room)
go func(c *Client) {
c.EmitJSON(v)
}(client)
}
s.roomsLock.RUnlock()
}
func (c *Client) cleanup(s *Server) {
c.conn.Close()
s.joinedRoomsLock.RLock()
for _, room := range s.joinedRooms[c.id] {
//log.Println(room)
index := -1
s.roomsLock.Lock()
for i, client := range s.rooms[room] {
if client.id == c.id {
index = i
}
}
s.rooms[room] = append(s.rooms[room][:index], s.rooms[room][index+1:]...)
s.roomsLock.Unlock()
}
s.joinedRoomsLock.RUnlock()
s.joinedRoomsLock.Lock()
delete(s.joinedRooms, c.id)
s.joinedRoomsLock.Unlock()
if s.OnDisconnect != nil {
s.OnDisconnect(c.id)
}
}
//Returns an array of rooms the client c has been added to
func (s *Server) RoomsJoined(id string) []string {
rooms := make([]string, len(s.joinedRooms[id]))
s.joinedRoomsLock.RLock()
defer s.joinedRoomsLock.RUnlock()
copy(rooms, s.joinedRooms[id])
return rooms
}
type request struct {
Id string
Data json.RawMessage
}
type reply struct {
Id string `json:"id"`
Data string `json:"data,string"`
}
var (
reqPool = &sync.Pool{New: func() interface{} { return request{} }}
replyPool = &sync.Pool{New: func() interface{} { return reply{} }}
)
func (c *Client) listener(s *Server) {
for {
mtype, data, err := c.conn.ReadMessage()
if err != nil {
c.cleanup(s)
return
}
js := reqPool.Get().(request)
err = json.Unmarshal(data, &js)
if err != nil || mtype != ws.TextMessage {
log.Println(err)
continue
}
callName := s.Extractor(string(js.Data))
s.handlersLock.RLock()
f, ok := s.handlers[callName]
s.handlersLock.RUnlock()
if !ok {
if s.DefaultHandler != nil {
f = s.DefaultHandler
goto call
}
continue
}
call:
go func() {
rtrn := f(s, c, js.Data)
replyJs := replyPool.Get().(reply)
replyJs.Id = js.Id
replyJs.Data = string(rtrn)
bytes, _ := json.Marshal(replyJs)
c.Emit(string(bytes))
reqPool.Put(js)
replyPool.Put(replyJs)
}()
}
}
func (s *Server) listener() {
for {
c := <-s.newClient
go c.listener(s)
}
}
//Registers a callback for the event string. The callback must take 2 arguments,
//The client from which the message was received and the string message itself.
func (s *Server) On(event string, f func(*Server, *Client, []byte) []byte) {
s.handlersLock.Lock()
s.handlers[event] = f
s.handlersLock.Unlock()
}
|
package html
import (
"bytes"
"encoding/xml"
"fmt"
"io"
)
type Encoder struct {
RewriteID string
buf bytes.Buffer
w io.Writer
stack []string
invoid bool
}
func NewEncoder(out io.Writer) *Encoder {
return &Encoder{
buf: bytes.Buffer{},
w: out,
stack: []string{},
}
}
func (enc *Encoder) Depth() int { return len(enc.stack) }
func (enc *Encoder) Stack() []string { return enc.stack }
func (enc *Encoder) WriteXMLStart(token *xml.StartElement) error {
return enc.WriteStart(token.Name.Local, token.Attr...)
}
func (enc *Encoder) WriteStart(tag string, attrs ...xml.Attr) error {
enc.stack = append(enc.stack, tag)
enc.invoid = voidElements[tag]
enc.buf.WriteByte('<')
enc.buf.WriteString(tag)
for _, attr := range attrs {
if attr.Name.Local == "" {
continue
}
enc.buf.WriteByte(' ')
if attr.Name.Local == "id" && enc.RewriteID != "" {
enc.buf.WriteString(enc.RewriteID)
} else {
if attr.Name.Space != "" {
enc.buf.WriteString(attr.Name.Space + ":" + attr.Name.Local)
} else {
enc.buf.WriteString(attr.Name.Local)
}
}
enc.buf.WriteString(`="`)
enc.buf.WriteString(EscapeAttribute(attr.Value))
enc.buf.WriteByte('"')
}
enc.buf.WriteByte('>')
return enc.flush()
}
func (enc *Encoder) WriteXMLEnd(token *xml.EndElement) error {
return enc.WriteEnd(token.Name.Local)
}
func (enc *Encoder) WriteEnd(tag string) error {
if len(enc.stack) == 0 {
return fmt.Errorf("no unclosed tags")
}
var current string
n := len(enc.stack) - 1
current, enc.stack = enc.stack[n], enc.stack[:n]
if current != tag {
return fmt.Errorf("writing end tag %v expected %v", tag, current)
}
enc.invoid = (len(enc.stack) > 0) && voidElements[enc.stack[len(enc.stack)-1]]
// void elements have only a single tag
if voidElements[tag] {
return nil
}
enc.buf.WriteString("</")
enc.buf.WriteString(tag)
enc.buf.WriteByte('>')
return enc.flush()
}
func (enc *Encoder) WriteRaw(data string) error {
_, err := enc.buf.WriteString(data)
return err
}
func (enc *Encoder) voiderror() error {
return fmt.Errorf("content not allowed inside void tag %s", enc.stack[len(enc.stack)-1])
}
func (enc *Encoder) Encode(token xml.Token) error {
switch token := token.(type) {
case xml.StartElement:
return enc.WriteXMLStart(&token)
case xml.EndElement:
return enc.WriteXMLEnd(&token)
case xml.CharData:
if enc.invoid {
return enc.voiderror()
}
enc.buf.Write([]byte(EscapeCharData(string(token))))
return enc.flush()
case xml.Comment:
if enc.invoid {
return enc.voiderror()
}
enc.buf.WriteString("<!--")
enc.buf.Write([]byte(EscapeCharData(string(token))))
enc.buf.WriteString("-->")
return enc.flush()
case xml.ProcInst:
if enc.invoid {
return enc.voiderror()
}
// skip processing instructions
return nil
case xml.Directive:
if enc.invoid {
return enc.voiderror()
}
// skip directives
return nil
default:
panic("invalid token")
}
}
func (enc *Encoder) flush() error {
if enc.buf.Len() > 1<<8 {
return enc.Flush()
}
return nil
}
func (enc *Encoder) Flush() error {
_, err := enc.buf.WriteTo(enc.w)
enc.buf.Reset()
return err
}
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents.
var voidElements = map[string]bool{
"area": true,
"base": true,
"br": true,
"col": true,
"command": true,
"embed": true,
"hr": true,
"img": true,
"input": true,
"keygen": true,
"link": true,
"meta": true,
"param": true,
"source": true,
"track": true,
"wbr": true,
}
Fold outputclass into html class
package html
import (
"bytes"
"encoding/xml"
"fmt"
"io"
)
type Encoder struct {
RewriteID string
buf bytes.Buffer
w io.Writer
stack []string
invoid bool
}
func NewEncoder(out io.Writer) *Encoder {
return &Encoder{
buf: bytes.Buffer{},
w: out,
stack: []string{},
}
}
func (enc *Encoder) Depth() int { return len(enc.stack) }
func (enc *Encoder) Stack() []string { return enc.stack }
func (enc *Encoder) WriteXMLStart(token *xml.StartElement) error {
return enc.WriteStart(token.Name.Local, token.Attr...)
}
func (enc *Encoder) WriteStart(tag string, attrs ...xml.Attr) error {
enc.stack = append(enc.stack, tag)
enc.invoid = voidElements[tag]
enc.buf.WriteByte('<')
enc.buf.WriteString(tag)
outputclass := ""
var class *xml.Attr
for i, attr := range attrs {
switch attr.Name.Local {
case "outputclass":
outputclass += " " + attr.Value
case "class":
class = &attrs[i]
}
}
if outputclass != "" {
if class != nil {
class.Value += outputclass
} else {
attrs = append(attrs, xml.Attr{
Name: xml.Name{Local: "class"},
Value: outputclass[1:],
})
}
}
for _, attr := range attrs {
if attr.Name.Local == "" {
continue
}
enc.buf.WriteByte(' ')
if attr.Name.Local == "id" && enc.RewriteID != "" {
enc.buf.WriteString(enc.RewriteID)
} else {
if attr.Name.Space != "" {
enc.buf.WriteString(attr.Name.Space + ":" + attr.Name.Local)
} else {
enc.buf.WriteString(attr.Name.Local)
}
}
enc.buf.WriteString(`="`)
enc.buf.WriteString(EscapeAttribute(attr.Value))
enc.buf.WriteByte('"')
}
enc.buf.WriteByte('>')
return enc.flush()
}
func (enc *Encoder) WriteXMLEnd(token *xml.EndElement) error {
return enc.WriteEnd(token.Name.Local)
}
func (enc *Encoder) WriteEnd(tag string) error {
if len(enc.stack) == 0 {
return fmt.Errorf("no unclosed tags")
}
var current string
n := len(enc.stack) - 1
current, enc.stack = enc.stack[n], enc.stack[:n]
if current != tag {
return fmt.Errorf("writing end tag %v expected %v", tag, current)
}
enc.invoid = (len(enc.stack) > 0) && voidElements[enc.stack[len(enc.stack)-1]]
// void elements have only a single tag
if voidElements[tag] {
return nil
}
enc.buf.WriteString("</")
enc.buf.WriteString(tag)
enc.buf.WriteByte('>')
return enc.flush()
}
func (enc *Encoder) WriteRaw(data string) error {
_, err := enc.buf.WriteString(data)
return err
}
func (enc *Encoder) voiderror() error {
return fmt.Errorf("content not allowed inside void tag %s", enc.stack[len(enc.stack)-1])
}
func (enc *Encoder) Encode(token xml.Token) error {
switch token := token.(type) {
case xml.StartElement:
return enc.WriteXMLStart(&token)
case xml.EndElement:
return enc.WriteXMLEnd(&token)
case xml.CharData:
if enc.invoid {
return enc.voiderror()
}
enc.buf.Write([]byte(EscapeCharData(string(token))))
return enc.flush()
case xml.Comment:
if enc.invoid {
return enc.voiderror()
}
enc.buf.WriteString("<!--")
enc.buf.Write([]byte(EscapeCharData(string(token))))
enc.buf.WriteString("-->")
return enc.flush()
case xml.ProcInst:
if enc.invoid {
return enc.voiderror()
}
// skip processing instructions
return nil
case xml.Directive:
if enc.invoid {
return enc.voiderror()
}
// skip directives
return nil
default:
panic("invalid token")
}
}
func (enc *Encoder) flush() error {
if enc.buf.Len() > 1<<8 {
return enc.Flush()
}
return nil
}
func (enc *Encoder) Flush() error {
_, err := enc.buf.WriteTo(enc.w)
enc.buf.Reset()
return err
}
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents.
var voidElements = map[string]bool{
"area": true,
"base": true,
"br": true,
"col": true,
"command": true,
"embed": true,
"hr": true,
"img": true,
"input": true,
"keygen": true,
"link": true,
"meta": true,
"param": true,
"source": true,
"track": true,
"wbr": true,
}
|
package main
import (
"fmt"
"github.com/Netflix-Skunkworks/go-jira/jira/cli"
"github.com/docopt/docopt-go"
"github.com/op/go-logging"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"strings"
)
var log = logging.MustGetLogger("jira")
var format = "%{color}%{time:2006-01-02T15:04:05.000Z07:00} %{level:-5s} [%{shortfile}]%{color:reset} %{message}"
func main() {
user := os.Getenv("USER")
home := os.Getenv("HOME")
usage := fmt.Sprintf(`
Usage:
jira [-v ...] [-u USER] [-e URI] [-t FILE] (ls|list) ( [-q JQL] | [-p PROJECT] [-c COMPONENT] [-a ASSIGNEE] [-i ISSUETYPE] [-w WATCHER] [-r REPORTER]) [-f FIELDS]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] view ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] edit ISSUE [--noedit] [-m COMMENT] [-o KEY=VAL]...
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] create [--noedit] [-p PROJECT] [-i ISSUETYPE] [-o KEY=VAL]...
jira [-v ...] [-u USER] [-e URI] [-b] DUPLICATE dups ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] BLOCKER blocks ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] watch ISSUE [-w WATCHER]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] (trans|transition) TRANSITION ISSUE [-m COMMENT] [-o KEY=VAL] [--noedit]
jira [-v ...] [-u USER] [-e URI] [-b] ack ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] close ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] resolve ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] reopen ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] start ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] stop ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] comment ISSUE [-m COMMENT]
jira [-v ...] [-u USER] [-e URI] [-b] take ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] (assign|give) ISSUE ASSIGNEE
jira [-v ...] [-u USER] [-e URI] [-t FILE] fields
jira [-v ...] [-u USER] [-e URI] [-t FILE] issuelinktypes
jira [-v ...] [-u USER] [-e URI] [-b][-t FILE] transmeta ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] editmeta ISSUE
jira [-v ...] [-u USER] [-e URI] [-t FILE] issuetypes [-p PROJECT]
jira [-v ...] [-u USER] [-e URI] [-t FILE] createmeta [-p PROJECT] [-i ISSUETYPE]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] transitions ISSUE
jira [-v ...] export-templates [-d DIR] [-t template]
jira [-v ...] [-u USER] [-e URI] (b|browse) ISSUE
jira [-v ...] [-u USER] [-e URI] [-t FILE] login
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] ISSUE
General Options:
-e --endpoint=URI URI to use for jira
-h --help Show this usage
-t --template=FILE Template file to use for output/editing
-u --user=USER Username to use for authenticaion (default: %s)
-v --verbose Increase output logging
--version Show this version
Command Options:
-a --assignee=USER Username assigned the issue
-b --browse Open your browser to the Jira issue
-c --component=COMPONENT Component to Search for
-d --directory=DIR Directory to export templates to (default: %s)
-f --queryfields=FIELDS Fields that are used in "list" template: (default: summary,created,priority,status,reporter,assignee)
-i --issuetype=ISSUETYPE Jira Issue Type (default: Bug)
-m --comment=COMMENT Comment message for transition
-o --override=KEY:VAL Set custom key/value pairs
-p --project=PROJECT Project to Search for
-q --query=JQL Jira Query Language expression for the search
-r --reporter=USER Reporter to search for
-w --watcher=USER Watcher to add to issue (default: %s)
or Watcher to search for
`, user, fmt.Sprintf("%s/.jira.d/templates", home), user)
args, err := docopt.Parse(usage, nil, true, "0.0.4", false, false)
if err != nil {
log.Error("Failed to parse options: %s", err)
os.Exit(1)
}
logBackend := logging.NewLogBackend(os.Stderr, "", 0)
logging.SetBackend(
logging.NewBackendFormatter(
logBackend,
logging.MustStringFormatter(format),
),
)
logging.SetLevel(logging.NOTICE, "")
if verbose, ok := args["--verbose"]; ok {
if verbose.(int) > 1 {
logging.SetLevel(logging.DEBUG, "")
} else if verbose.(int) > 0 {
logging.SetLevel(logging.INFO, "")
}
}
log.Info("Args: %v", args)
opts := make(map[string]string)
loadConfigs(opts)
// strip the "--" off the command line options
// and populate the opts that we pass to the cli ctor
for key, val := range args {
if val != nil && strings.HasPrefix(key, "--") {
opt := key[2:]
if opt == "override" {
for _, v := range val.([]string) {
if strings.Contains(v, "=") {
kv := strings.SplitN(v, "=", 2)
opts[kv[0]] = kv[1]
} else {
log.Error("Malformed override, expected KEY=VALUE, got %s", v)
os.Exit(1)
}
}
} else {
switch v := val.(type) {
case string:
opts[opt] = v
case int:
opts[opt] = fmt.Sprintf("%d", v)
case bool:
opts[opt] = fmt.Sprintf("%t", v)
}
}
}
}
// cant use proper [default:x] syntax in docopt
// because only want to default if the option is not
// already specified in some .jira.d/config.yml file
if _, ok := opts["user"]; !ok {
opts["user"] = user
}
if _, ok := opts["queryfields"]; !ok {
opts["queryfields"] = "summary,created,priority,status,reporter,assignee"
}
if _, ok := opts["directory"]; !ok {
opts["directory"] = fmt.Sprintf("%s/.jira.d/templates", home)
}
if _, ok := opts["endpoint"]; !ok {
log.Error("endpoint option required. Either use --endpoint or set a enpoint option in your ~/.jira.d/config.yml file")
os.Exit(1)
}
c := cli.New(opts)
log.Debug("opts: %s", opts)
validCommand := func(cmd string) bool {
if val, ok := args[cmd]; ok && val.(bool) {
return true
}
return false
}
validOpt := func(opt string, dflt interface{}) interface{} {
if val, ok := opts[opt]; ok {
return val
}
if dflt == nil {
log.Error("Missing required option --%s or \"%s\" property override in the config file", opt, opt)
os.Exit(1)
}
return dflt
}
setEditing := func(dflt bool) {
if dflt {
if val, ok := opts["noedit"]; ok && val == "true" {
opts["edit"] = "false"
} else {
opts["edit"] = "true"
}
} else {
if val, ok := opts["edit"]; ok && val == "true" {
opts["edit"] = "true"
} else {
opts["edit"] = "false"
}
}
}
if validCommand("login") {
err = c.CmdLogin()
} else if validCommand("fields") {
err = c.CmdFields()
} else if validCommand("ls") || validCommand("list") {
err = c.CmdList()
} else if validCommand("edit") {
setEditing(true)
err = c.CmdEdit(args["ISSUE"].(string))
} else if validCommand("editmeta") {
err = c.CmdEditMeta(args["ISSUE"].(string))
} else if validCommand("transmeta") {
err = c.CmdTransitionMeta(args["ISSUE"].(string))
} else if validCommand("issuelinktypes") {
err = c.CmdIssueLinkTypes()
} else if validCommand("issuetypes") {
err = c.CmdIssueTypes(validOpt("project", nil).(string))
} else if validCommand("createmeta") {
err = c.CmdCreateMeta(
validOpt("project", nil).(string),
validOpt("issuetype", "Bug").(string),
)
} else if validCommand("create") {
setEditing(true)
err = c.CmdCreate(
validOpt("project", nil).(string),
validOpt("issuetype", "Bug").(string),
)
} else if validCommand("transitions") {
err = c.CmdTransitions(args["ISSUE"].(string))
} else if validCommand("blocks") {
err = c.CmdBlocks(
args["BLOCKER"].(string),
args["ISSUE"].(string),
)
} else if validCommand("dups") {
err = c.CmdDups(
args["DUPLICATE"].(string),
args["ISSUE"].(string),
)
} else if validCommand("watch") {
err = c.CmdWatch(
args["ISSUE"].(string),
validOpt("watcher", user).(string),
)
} else if validCommand("trans") || validCommand("transition") {
setEditing(true)
err = c.CmdTransition(
args["ISSUE"].(string),
args["TRANSITION"].(string),
)
} else if validCommand("close") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "close")
} else if validCommand("ack") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "acknowledge")
} else if validCommand("reopen") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "reopen")
} else if validCommand("resolve") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "resolve")
} else if validCommand("start") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "start")
} else if validCommand("stop") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "stop")
} else if validCommand("comment") {
setEditing(true)
err = c.CmdComment(args["ISSUE"].(string))
} else if validCommand("take") {
err = c.CmdAssign(args["ISSUE"].(string), user)
} else if validCommand("browse") || validCommand("b") {
opts["browse"] = "true"
err = c.Browse(args["ISSUE"].(string))
} else if validCommand("export-templates") {
err = c.CmdExportTemplates()
} else if validCommand("assign") || validCommand("give") {
err = c.CmdAssign(
args["ISSUE"].(string),
args["ASSIGNEE"].(string),
)
} else if val, ok := args["ISSUE"]; ok {
err = c.CmdView(val.(string))
}
if err != nil {
os.Exit(1)
}
os.Exit(0)
}
func parseYaml(file string, opts map[string]string) {
if fh, err := ioutil.ReadFile(file); err == nil {
log.Debug("Found Config file: %s", file)
yaml.Unmarshal(fh, &opts)
}
}
func loadConfigs(opts map[string]string) {
paths := cli.FindParentPaths(".jira.d/config.yml")
// prepend
paths = append([]string{"/etc/jira-cli.yml"}, paths...)
for _, file := range paths {
parseYaml(file, opts)
}
}
if config files are executable then run them and parse the output
package main
import (
"bytes"
"fmt"
"github.com/Netflix-Skunkworks/go-jira/jira/cli"
"github.com/docopt/docopt-go"
"github.com/op/go-logging"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"os/exec"
"strings"
)
var log = logging.MustGetLogger("jira")
var format = "%{color}%{time:2006-01-02T15:04:05.000Z07:00} %{level:-5s} [%{shortfile}]%{color:reset} %{message}"
func main() {
user := os.Getenv("USER")
home := os.Getenv("HOME")
usage := fmt.Sprintf(`
Usage:
jira [-v ...] [-u USER] [-e URI] [-t FILE] (ls|list) ( [-q JQL] | [-p PROJECT] [-c COMPONENT] [-a ASSIGNEE] [-i ISSUETYPE] [-w WATCHER] [-r REPORTER]) [-f FIELDS]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] view ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] edit ISSUE [--noedit] [-m COMMENT] [-o KEY=VAL]...
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] create [--noedit] [-p PROJECT] [-i ISSUETYPE] [-o KEY=VAL]...
jira [-v ...] [-u USER] [-e URI] [-b] DUPLICATE dups ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] BLOCKER blocks ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] watch ISSUE [-w WATCHER]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] (trans|transition) TRANSITION ISSUE [-m COMMENT] [-o KEY=VAL] [--noedit]
jira [-v ...] [-u USER] [-e URI] [-b] ack ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] close ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] resolve ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] reopen ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] start ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] stop ISSUE [-m COMMENT] [-o KEY=VAL] [--edit]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] comment ISSUE [-m COMMENT]
jira [-v ...] [-u USER] [-e URI] [-b] take ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] (assign|give) ISSUE ASSIGNEE
jira [-v ...] [-u USER] [-e URI] [-t FILE] fields
jira [-v ...] [-u USER] [-e URI] [-t FILE] issuelinktypes
jira [-v ...] [-u USER] [-e URI] [-b][-t FILE] transmeta ISSUE
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] editmeta ISSUE
jira [-v ...] [-u USER] [-e URI] [-t FILE] issuetypes [-p PROJECT]
jira [-v ...] [-u USER] [-e URI] [-t FILE] createmeta [-p PROJECT] [-i ISSUETYPE]
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] transitions ISSUE
jira [-v ...] export-templates [-d DIR] [-t template]
jira [-v ...] [-u USER] [-e URI] (b|browse) ISSUE
jira [-v ...] [-u USER] [-e URI] [-t FILE] login
jira [-v ...] [-u USER] [-e URI] [-b] [-t FILE] ISSUE
General Options:
-e --endpoint=URI URI to use for jira
-h --help Show this usage
-t --template=FILE Template file to use for output/editing
-u --user=USER Username to use for authenticaion (default: %s)
-v --verbose Increase output logging
--version Show this version
Command Options:
-a --assignee=USER Username assigned the issue
-b --browse Open your browser to the Jira issue
-c --component=COMPONENT Component to Search for
-d --directory=DIR Directory to export templates to (default: %s)
-f --queryfields=FIELDS Fields that are used in "list" template: (default: summary,created,priority,status,reporter,assignee)
-i --issuetype=ISSUETYPE Jira Issue Type (default: Bug)
-m --comment=COMMENT Comment message for transition
-o --override=KEY:VAL Set custom key/value pairs
-p --project=PROJECT Project to Search for
-q --query=JQL Jira Query Language expression for the search
-r --reporter=USER Reporter to search for
-w --watcher=USER Watcher to add to issue (default: %s)
or Watcher to search for
`, user, fmt.Sprintf("%s/.jira.d/templates", home), user)
args, err := docopt.Parse(usage, nil, true, "0.0.4", false, false)
if err != nil {
log.Error("Failed to parse options: %s", err)
os.Exit(1)
}
logBackend := logging.NewLogBackend(os.Stderr, "", 0)
logging.SetBackend(
logging.NewBackendFormatter(
logBackend,
logging.MustStringFormatter(format),
),
)
logging.SetLevel(logging.NOTICE, "")
if verbose, ok := args["--verbose"]; ok {
if verbose.(int) > 1 {
logging.SetLevel(logging.DEBUG, "")
} else if verbose.(int) > 0 {
logging.SetLevel(logging.INFO, "")
}
}
log.Info("Args: %v", args)
populateEnv(args)
opts := make(map[string]string)
loadConfigs(opts)
// strip the "--" off the command line options
// and populate the opts that we pass to the cli ctor
for key, val := range args {
if val != nil && strings.HasPrefix(key, "--") {
opt := key[2:]
if opt == "override" {
for _, v := range val.([]string) {
if strings.Contains(v, "=") {
kv := strings.SplitN(v, "=", 2)
opts[kv[0]] = kv[1]
} else {
log.Error("Malformed override, expected KEY=VALUE, got %s", v)
os.Exit(1)
}
}
} else {
switch v := val.(type) {
case string:
opts[opt] = v
case int:
opts[opt] = fmt.Sprintf("%d", v)
case bool:
opts[opt] = fmt.Sprintf("%t", v)
}
}
}
}
// cant use proper [default:x] syntax in docopt
// because only want to default if the option is not
// already specified in some .jira.d/config.yml file
if _, ok := opts["user"]; !ok {
opts["user"] = user
}
if _, ok := opts["queryfields"]; !ok {
opts["queryfields"] = "summary,created,priority,status,reporter,assignee"
}
if _, ok := opts["directory"]; !ok {
opts["directory"] = fmt.Sprintf("%s/.jira.d/templates", home)
}
if _, ok := opts["endpoint"]; !ok {
log.Error("endpoint option required. Either use --endpoint or set a enpoint option in your ~/.jira.d/config.yml file")
os.Exit(1)
}
c := cli.New(opts)
log.Debug("opts: %s", opts)
validCommand := func(cmd string) bool {
if val, ok := args[cmd]; ok && val.(bool) {
return true
}
return false
}
validOpt := func(opt string, dflt interface{}) interface{} {
if val, ok := opts[opt]; ok {
return val
}
if dflt == nil {
log.Error("Missing required option --%s or \"%s\" property override in the config file", opt, opt)
os.Exit(1)
}
return dflt
}
setEditing := func(dflt bool) {
if dflt {
if val, ok := opts["noedit"]; ok && val == "true" {
opts["edit"] = "false"
} else {
opts["edit"] = "true"
}
} else {
if val, ok := opts["edit"]; ok && val == "true" {
opts["edit"] = "true"
} else {
opts["edit"] = "false"
}
}
}
if validCommand("login") {
err = c.CmdLogin()
} else if validCommand("fields") {
err = c.CmdFields()
} else if validCommand("ls") || validCommand("list") {
err = c.CmdList()
} else if validCommand("edit") {
setEditing(true)
err = c.CmdEdit(args["ISSUE"].(string))
} else if validCommand("editmeta") {
err = c.CmdEditMeta(args["ISSUE"].(string))
} else if validCommand("transmeta") {
err = c.CmdTransitionMeta(args["ISSUE"].(string))
} else if validCommand("issuelinktypes") {
err = c.CmdIssueLinkTypes()
} else if validCommand("issuetypes") {
err = c.CmdIssueTypes(validOpt("project", nil).(string))
} else if validCommand("createmeta") {
err = c.CmdCreateMeta(
validOpt("project", nil).(string),
validOpt("issuetype", "Bug").(string),
)
} else if validCommand("create") {
setEditing(true)
err = c.CmdCreate(
validOpt("project", nil).(string),
validOpt("issuetype", "Bug").(string),
)
} else if validCommand("transitions") {
err = c.CmdTransitions(args["ISSUE"].(string))
} else if validCommand("blocks") {
err = c.CmdBlocks(
args["BLOCKER"].(string),
args["ISSUE"].(string),
)
} else if validCommand("dups") {
err = c.CmdDups(
args["DUPLICATE"].(string),
args["ISSUE"].(string),
)
} else if validCommand("watch") {
err = c.CmdWatch(
args["ISSUE"].(string),
validOpt("watcher", user).(string),
)
} else if validCommand("trans") || validCommand("transition") {
setEditing(true)
err = c.CmdTransition(
args["ISSUE"].(string),
args["TRANSITION"].(string),
)
} else if validCommand("close") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "close")
} else if validCommand("ack") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "acknowledge")
} else if validCommand("reopen") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "reopen")
} else if validCommand("resolve") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "resolve")
} else if validCommand("start") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "start")
} else if validCommand("stop") {
setEditing(false)
err = c.CmdTransition(args["ISSUE"].(string), "stop")
} else if validCommand("comment") {
setEditing(true)
err = c.CmdComment(args["ISSUE"].(string))
} else if validCommand("take") {
err = c.CmdAssign(args["ISSUE"].(string), user)
} else if validCommand("browse") || validCommand("b") {
opts["browse"] = "true"
err = c.Browse(args["ISSUE"].(string))
} else if validCommand("export-templates") {
err = c.CmdExportTemplates()
} else if validCommand("assign") || validCommand("give") {
err = c.CmdAssign(
args["ISSUE"].(string),
args["ASSIGNEE"].(string),
)
} else if val, ok := args["ISSUE"]; ok {
err = c.CmdView(val.(string))
}
if err != nil {
os.Exit(1)
}
os.Exit(0)
}
func parseYaml(file string, opts map[string]string) {
if fh, err := ioutil.ReadFile(file); err == nil {
log.Debug("Found Config file: %s", file)
yaml.Unmarshal(fh, &opts)
}
}
func populateEnv(args map[string]interface{}) {
for key, val := range args {
if val != nil && strings.HasPrefix(key, "--") {
if key == "--override" {
for _, v := range val.([]string) {
if strings.Contains(v, "=") {
kv := strings.SplitN(v, "=", 2)
envName := fmt.Sprintf("JIRA_%s", strings.ToUpper(kv[0]))
os.Setenv(envName, kv[1])
} else {
log.Error("Malformed override, expected KEY=VALUE, got %s", v)
os.Exit(1)
}
}
} else {
envName := fmt.Sprintf("JIRA_%s", strings.ToUpper(key[2:]))
switch v := val.(type) {
case []string:
os.Setenv(envName, strings.Join(v, ","))
case string:
os.Setenv(envName, v)
case bool:
if v {
os.Setenv(envName, "1")
} else {
os.Setenv(envName, "0")
}
}
}
} else if val != nil {
// lower case strings are operations
if strings.ToLower(key) == key {
if key == "ls" && val.(bool) {
os.Setenv("JIRA_OPERATION", "list")
} else if key == "b" && val.(bool) {
os.Setenv("JIRA_OPERATION", "browse")
} else if key == "trans" && val.(bool) {
os.Setenv("JIRA_OPERATION", "transition")
} else if key == "give" && val.(bool) {
os.Setenv("JIRA_OPERATION", "assign")
} else if val.(bool) {
os.Setenv("JIRA_OPERATION", key)
}
} else {
os.Setenv(fmt.Sprintf("JIRA_%s", key), val.(string))
}
}
}
}
func loadConfigs(opts map[string]string) {
paths := cli.FindParentPaths(".jira.d/config.yml")
// prepend
paths = append([]string{"/etc/jira-cli.yml"}, paths...)
for _, file := range paths {
if stat, err := os.Stat(file); err == nil {
// check to see if config file is exectuable
if stat.Mode() & 0111 == 0 {
parseYaml(file, opts)
} else {
log.Debug("Found Executable Config file: %s", file)
// it is executable, so run it and try to parse the output
cmd := exec.Command(file)
stdout := bytes.NewBufferString("")
cmd.Stdout = stdout
cmd.Stderr = bytes.NewBufferString("")
if err := cmd.Run(); err != nil {
log.Error("%s is exectuable, but it failed to execute: %s\n%s", file, err, cmd.Stderr)
os.Exit(1)
}
yaml.Unmarshal(stdout.Bytes(), &opts)
}
}
}
}
|
// Copyright 2018-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
"time"
"github.com/cilium/cilium/pkg/monitor/notifications"
)
// Must be synchronized with <bpf/lib/common.h>
const (
// 0-128 are reserved for BPF datapath events
MessageTypeUnspec = iota
// MessageTypeDrop is a BPF datapath notification carrying a DropNotify
// which corresponds to drop_notify defined in bpf/lib/drop.h
MessageTypeDrop
// MessageTypeDebug is a BPF datapath notification carrying a DebugMsg
// which corresponds to debug_msg defined in bpf/lib/dbg.h
MessageTypeDebug
// MessageTypeCapture is a BPF datapath notification carrying a DebugCapture
// which corresponds to debug_capture_msg defined in bpf/lib/dbg.h
MessageTypeCapture
// MessageTypeTrace is a BPF datapath notification carrying a TraceNotify
// which corresponds to trace_notify defined in bpf/lib/trace.h
MessageTypeTrace
// MessageTypePolicyVerdict is a BPF datapath notification carrying a PolicyVerdictNotify
// which corresponds to policy_verdict_notify defined in bpf/lib/policy_log.h
MessageTypePolicyVerdict
// 129-255 are reserved for agent level events
// MessageTypeAccessLog contains a pkg/proxy/accesslog.LogRecord
MessageTypeAccessLog = 129
// MessageTypeAgent is an agent notification carrying a AgentNotify
MessageTypeAgent = 130
)
const (
MessageTypeNameDrop = "drop"
MessageTypeNameDebug = "debug"
MessageTypeNameCapture = "capture"
MessageTypeNameTrace = "trace"
MessageTypeNameL7 = "l7"
MessageTypeNameAgent = "agent"
MessageTypeNamePolicyVerdict = "policy-verdict"
)
type MessageTypeFilter []int
var (
// MessageTypeNames is a map of all type names
MessageTypeNames = map[string]int{
MessageTypeNameDrop: MessageTypeDrop,
MessageTypeNameDebug: MessageTypeDebug,
MessageTypeNameCapture: MessageTypeCapture,
MessageTypeNameTrace: MessageTypeTrace,
MessageTypeNameL7: MessageTypeAccessLog,
MessageTypeNameAgent: MessageTypeAgent,
MessageTypeNamePolicyVerdict: MessageTypePolicyVerdict,
}
)
// MessageTypeName returns the name for a message type or the numeric value if
// the name can't be found
func MessageTypeName(typ int) string {
for name, value := range MessageTypeNames {
if value == typ {
return name
}
}
return strconv.Itoa(typ)
}
func (m *MessageTypeFilter) String() string {
pieces := make([]string, 0, len(*m))
for _, typ := range *m {
pieces = append(pieces, MessageTypeName(typ))
}
return strings.Join(pieces, ",")
}
func (m *MessageTypeFilter) Set(value string) error {
i, err := MessageTypeNames[value]
if !err {
return fmt.Errorf("Unknown type (%s). Please use one of the following ones %v",
value, MessageTypeNames)
}
*m = append(*m, i)
return nil
}
func (m *MessageTypeFilter) Type() string {
return "[]string"
}
func (m *MessageTypeFilter) Contains(typ int) bool {
for _, v := range *m {
if v == typ {
return true
}
}
return false
}
// Must be synchronized with <bpf/lib/trace.h>
const (
TraceToLxc = iota
TraceToProxy
TraceToHost
TraceToStack
TraceToOverlay
TraceFromLxc
TraceFromProxy
TraceFromHost
TraceFromStack
TraceFromOverlay
TraceFromNetwork
TraceToNetwork
)
// TraceObservationPoints is a map of all supported trace observation points
var TraceObservationPoints = map[uint8]string{
TraceToLxc: "to-endpoint",
TraceToProxy: "to-proxy",
TraceToHost: "to-host",
TraceToStack: "to-stack",
TraceToOverlay: "to-overlay",
TraceToNetwork: "to-network",
TraceFromLxc: "from-endpoint",
TraceFromProxy: "from-proxy",
TraceFromHost: "from-host",
TraceFromStack: "from-stack",
TraceFromOverlay: "from-overlay",
TraceFromNetwork: "from-network",
}
// TraceObservationPoint returns the name of a trace observation point
func TraceObservationPoint(obsPoint uint8) string {
if str, ok := TraceObservationPoints[obsPoint]; ok {
return str
}
return fmt.Sprintf("%d", obsPoint)
}
// TraceObservationPointHasConnState returns true if the observation point
// obsPoint populates the TraceNotify.Reason field with connection tracking
// information.
func TraceObservationPointHasConnState(obsPoint uint8) bool {
switch obsPoint {
case TraceToLxc,
TraceToProxy,
TraceToHost,
TraceToStack,
TraceToNetwork:
return true
default:
return false
}
}
// AgentNotify is a notification from the agent. The notification is stored
// in its JSON-encoded representation
type AgentNotify struct {
Type AgentNotification
Text string
}
// AgentNotify is a notification from the agent. It is similar to AgentNotify,
// but the notification is an unencoded struct. See the *Message constructors
// in this package for possible values.
type AgentNotifyMessage struct {
Type AgentNotification
Notification interface{}
}
// ToJSON encodes a AgentNotifyMessage to its JSON-based AgentNotify representation
func (m *AgentNotifyMessage) ToJSON() (AgentNotify, error) {
repr, err := json.Marshal(m.Notification)
if err != nil {
return AgentNotify{}, err
}
return AgentNotify{
Type: m.Type,
Text: string(repr),
}, nil
}
// AgentNotification specifies the type of agent notification
type AgentNotification uint32
const (
AgentNotifyUnspec AgentNotification = iota
AgentNotifyGeneric
AgentNotifyStart
AgentNotifyEndpointRegenerateSuccess
AgentNotifyEndpointRegenerateFail
AgentNotifyPolicyUpdated
AgentNotifyPolicyDeleted
AgentNotifyEndpointCreated
AgentNotifyEndpointDeleted
AgentNotifyIPCacheUpserted
AgentNotifyIPCacheDeleted
AgentNotifyServiceUpserted
AgentNotifyServiceDeleted
)
var notifyTable = map[AgentNotification]string{
AgentNotifyUnspec: "unspecified",
AgentNotifyGeneric: "Message",
AgentNotifyStart: "Cilium agent started",
AgentNotifyEndpointRegenerateSuccess: "Endpoint regenerated",
AgentNotifyEndpointCreated: "Endpoint created",
AgentNotifyEndpointDeleted: "Endpoint deleted",
AgentNotifyEndpointRegenerateFail: "Failed endpoint regeneration",
AgentNotifyIPCacheDeleted: "IPCache entry deleted",
AgentNotifyIPCacheUpserted: "IPCache entry upserted",
AgentNotifyPolicyUpdated: "Policy updated",
AgentNotifyPolicyDeleted: "Policy deleted",
AgentNotifyServiceDeleted: "Service deleted",
AgentNotifyServiceUpserted: "Service upserted",
}
func resolveAgentType(t AgentNotification) string {
if n, ok := notifyTable[t]; ok {
return n
}
return fmt.Sprintf("%d", t)
}
// DumpInfo dumps an agent notification
func (n *AgentNotify) DumpInfo() {
fmt.Printf(">> %s: %s\n", resolveAgentType(n.Type), n.Text)
}
func (n *AgentNotify) getJSON() string {
return fmt.Sprintf(`{"type":"agent","subtype":"%s","message":%s}`, resolveAgentType(n.Type), n.Text)
}
// DumpJSON prints notification in json format
func (n *AgentNotify) DumpJSON() {
fmt.Println(n.getJSON())
}
// PolicyUpdateNotification structures update notification
type PolicyUpdateNotification struct {
Labels []string `json:"labels,omitempty"`
Revision uint64 `json:"revision,omitempty"`
RuleCount int `json:"rule_count"`
}
// PolicyUpdateMessage constructs an agent notification message for policy updates
func PolicyUpdateMessage(numRules int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: numRules,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyUpdated,
Notification: notification,
}
}
// PolicyDeleteMessage constructs an agent notification message for policy deletion
func PolicyDeleteMessage(deleted int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: deleted,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyDeleted,
Notification: notification,
}
}
// EndpointRegenNotification structures regeneration notification
type EndpointRegenNotification struct {
ID uint64 `json:"id,omitempty"`
Labels []string `json:"labels,omitempty"`
Error string `json:"error,omitempty"`
}
// EndpointRegenMessage constructs an agent notification message for endpoint regeneration
func EndpointRegenMessage(e notifications.RegenNotificationInfo, err error) AgentNotifyMessage {
notification := EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
}
typ := AgentNotifyEndpointRegenerateSuccess
if err != nil {
notification.Error = err.Error()
typ = AgentNotifyEndpointRegenerateFail
}
return AgentNotifyMessage{
Type: typ,
Notification: notification,
}
}
// EndpointCreateNotification structures the endpoint create notification
type EndpointCreateNotification struct {
EndpointRegenNotification
PodName string `json:"pod-name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// EndpointCreateMessage constructs an agent notification message for endpoint creation
func EndpointCreateMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointCreateNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointCreated,
Notification: notification,
}
}
// EndpointDeleteNotification structures the an endpoint delete notification
type EndpointDeleteNotification struct {
EndpointRegenNotification
PodName string `json:"pod-name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// EndpointDeleteMessage constructs an agent notification message for endpoint deletion
func EndpointDeleteMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointDeleteNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointDeleted,
Notification: notification,
}
}
// IPCacheNotification structures ipcache change notifications
type IPCacheNotification struct {
CIDR string `json:"cidr"`
Identity uint32 `json:"id"`
OldIdentity *uint32 `json:"old-id,omitempty"`
HostIP net.IP `json:"host-ip,omitempty"`
OldHostIP net.IP `json:"old-host-ip,omitempty"`
EncryptKey uint8 `json:"encrypt-key"`
Namespace string `json:"namespace,omitempty"`
PodName string `json:"pod-name,omitempty"`
}
// IPCacheUpsertedMessage constructs an agent notification message for ipcache upsertions
func IPCacheUpsertedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheUpserted,
Notification: notification,
}
}
// IPCacheDeletedMessage constructs an agent notification message for ipcache deletions
func IPCacheDeletedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheDeleted,
Notification: notification,
}
}
// TimeNotification structures agent start notification
type TimeNotification struct {
Time string `json:"time"`
}
// AgentStartMessage constructs an agent notification message when the agent starts
func StartMessage(t time.Time) AgentNotifyMessage {
notification := TimeNotification{
Time: t.String(),
}
return AgentNotifyMessage{
Type: AgentNotifyStart,
Notification: notification,
}
}
// ServiceUpsertNotificationAddr is part of ServiceUpsertNotification
type ServiceUpsertNotificationAddr struct {
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
}
// ServiceUpsertNotification structures service upsert notifications
type ServiceUpsertNotification struct {
ID uint32 `json:"id"`
Frontend ServiceUpsertNotificationAddr `json:"frontend-address"`
Backends []ServiceUpsertNotificationAddr `json:"backend-addresses"`
Type string `json:"type,omitempty"`
TrafficPolicy string `json:"traffic-policy,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,,omitempty"`
}
// ServiceUpsertMessage constructs an agent notification message for service upserts
func ServiceUpsertMessage(
id uint32,
frontend ServiceUpsertNotificationAddr,
backends []ServiceUpsertNotificationAddr,
svcType, svcTrafficPolicy, svcName, svcNamespace string,
) AgentNotifyMessage {
notification := ServiceUpsertNotification{
ID: id,
Frontend: frontend,
Backends: backends,
Type: svcType,
TrafficPolicy: svcTrafficPolicy,
Name: svcName,
Namespace: svcNamespace,
}
return AgentNotifyMessage{
Type: AgentNotifyServiceUpserted,
Notification: notification,
}
}
// ServiceDeleteNotification structures service delete notifications
type ServiceDeleteNotification struct {
ID uint32 `json:"id"`
}
// ServiceDeleteMessage constructs an agent notification message for service deletions
func ServiceDeleteMessage(id uint32) AgentNotifyMessage {
notification := ServiceDeleteNotification{
ID: id,
}
return AgentNotifyMessage{
Type: AgentNotifyServiceDeleted,
Notification: notification,
}
}
const (
// PolicyIngress is the value of Flags&PolicyNotifyFlagDirection for ingress traffic
PolicyIngress = 1
// PolicyEgress is the value of Flags&PolicyNotifyFlagDirection for egress traffic
PolicyEgress = 2
// PolicyMatchNone is the value of MatchType indicatating no policy match
PolicyMatchNone = 0
// PolicyMatchL3Only is the value of MatchType indicating a L3-only match
PolicyMatchL3Only = 1
// PolicyMatchL3L4 is the value of MatchType indicating a L3+L4 match
PolicyMatchL3L4 = 2
// PolicyMatchL4Only is the value of MatchType indicating a L4-only match
PolicyMatchL4Only = 3
// PolicyMatchAll is the value of MatchType indicating an allow-all match
PolicyMatchAll = 4
)
type PolicyMatchType int
func (m PolicyMatchType) String() string {
switch m {
case PolicyMatchL3Only:
return "L3-Only"
case PolicyMatchL3L4:
return "L3-L4"
case PolicyMatchL4Only:
return "L4-Only"
case PolicyMatchAll:
return "all"
case PolicyMatchNone:
return "none"
}
return "unknown"
}
monitor/api: fix godoc comments
Correct godoc comments for type AgentNotifyMessage and func StartMessage
to state the proper name.
Signed-off-by: Tobias Klauser <9249c549618448e4c699e8032ba6c7f0d7fd4b7f@distanz.ch>
// Copyright 2018-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
"time"
"github.com/cilium/cilium/pkg/monitor/notifications"
)
// Must be synchronized with <bpf/lib/common.h>
const (
// 0-128 are reserved for BPF datapath events
MessageTypeUnspec = iota
// MessageTypeDrop is a BPF datapath notification carrying a DropNotify
// which corresponds to drop_notify defined in bpf/lib/drop.h
MessageTypeDrop
// MessageTypeDebug is a BPF datapath notification carrying a DebugMsg
// which corresponds to debug_msg defined in bpf/lib/dbg.h
MessageTypeDebug
// MessageTypeCapture is a BPF datapath notification carrying a DebugCapture
// which corresponds to debug_capture_msg defined in bpf/lib/dbg.h
MessageTypeCapture
// MessageTypeTrace is a BPF datapath notification carrying a TraceNotify
// which corresponds to trace_notify defined in bpf/lib/trace.h
MessageTypeTrace
// MessageTypePolicyVerdict is a BPF datapath notification carrying a PolicyVerdictNotify
// which corresponds to policy_verdict_notify defined in bpf/lib/policy_log.h
MessageTypePolicyVerdict
// 129-255 are reserved for agent level events
// MessageTypeAccessLog contains a pkg/proxy/accesslog.LogRecord
MessageTypeAccessLog = 129
// MessageTypeAgent is an agent notification carrying a AgentNotify
MessageTypeAgent = 130
)
const (
MessageTypeNameDrop = "drop"
MessageTypeNameDebug = "debug"
MessageTypeNameCapture = "capture"
MessageTypeNameTrace = "trace"
MessageTypeNameL7 = "l7"
MessageTypeNameAgent = "agent"
MessageTypeNamePolicyVerdict = "policy-verdict"
)
type MessageTypeFilter []int
var (
// MessageTypeNames is a map of all type names
MessageTypeNames = map[string]int{
MessageTypeNameDrop: MessageTypeDrop,
MessageTypeNameDebug: MessageTypeDebug,
MessageTypeNameCapture: MessageTypeCapture,
MessageTypeNameTrace: MessageTypeTrace,
MessageTypeNameL7: MessageTypeAccessLog,
MessageTypeNameAgent: MessageTypeAgent,
MessageTypeNamePolicyVerdict: MessageTypePolicyVerdict,
}
)
// MessageTypeName returns the name for a message type or the numeric value if
// the name can't be found
func MessageTypeName(typ int) string {
for name, value := range MessageTypeNames {
if value == typ {
return name
}
}
return strconv.Itoa(typ)
}
func (m *MessageTypeFilter) String() string {
pieces := make([]string, 0, len(*m))
for _, typ := range *m {
pieces = append(pieces, MessageTypeName(typ))
}
return strings.Join(pieces, ",")
}
func (m *MessageTypeFilter) Set(value string) error {
i, err := MessageTypeNames[value]
if !err {
return fmt.Errorf("Unknown type (%s). Please use one of the following ones %v",
value, MessageTypeNames)
}
*m = append(*m, i)
return nil
}
func (m *MessageTypeFilter) Type() string {
return "[]string"
}
func (m *MessageTypeFilter) Contains(typ int) bool {
for _, v := range *m {
if v == typ {
return true
}
}
return false
}
// Must be synchronized with <bpf/lib/trace.h>
const (
TraceToLxc = iota
TraceToProxy
TraceToHost
TraceToStack
TraceToOverlay
TraceFromLxc
TraceFromProxy
TraceFromHost
TraceFromStack
TraceFromOverlay
TraceFromNetwork
TraceToNetwork
)
// TraceObservationPoints is a map of all supported trace observation points
var TraceObservationPoints = map[uint8]string{
TraceToLxc: "to-endpoint",
TraceToProxy: "to-proxy",
TraceToHost: "to-host",
TraceToStack: "to-stack",
TraceToOverlay: "to-overlay",
TraceToNetwork: "to-network",
TraceFromLxc: "from-endpoint",
TraceFromProxy: "from-proxy",
TraceFromHost: "from-host",
TraceFromStack: "from-stack",
TraceFromOverlay: "from-overlay",
TraceFromNetwork: "from-network",
}
// TraceObservationPoint returns the name of a trace observation point
func TraceObservationPoint(obsPoint uint8) string {
if str, ok := TraceObservationPoints[obsPoint]; ok {
return str
}
return fmt.Sprintf("%d", obsPoint)
}
// TraceObservationPointHasConnState returns true if the observation point
// obsPoint populates the TraceNotify.Reason field with connection tracking
// information.
func TraceObservationPointHasConnState(obsPoint uint8) bool {
switch obsPoint {
case TraceToLxc,
TraceToProxy,
TraceToHost,
TraceToStack,
TraceToNetwork:
return true
default:
return false
}
}
// AgentNotify is a notification from the agent. The notification is stored
// in its JSON-encoded representation
type AgentNotify struct {
Type AgentNotification
Text string
}
// AgentNotifyMessage is a notification from the agent. It is similar to
// AgentNotify, but the notification is an unencoded struct. See the *Message
// constructors in this package for possible values.
type AgentNotifyMessage struct {
Type AgentNotification
Notification interface{}
}
// ToJSON encodes a AgentNotifyMessage to its JSON-based AgentNotify representation
func (m *AgentNotifyMessage) ToJSON() (AgentNotify, error) {
repr, err := json.Marshal(m.Notification)
if err != nil {
return AgentNotify{}, err
}
return AgentNotify{
Type: m.Type,
Text: string(repr),
}, nil
}
// AgentNotification specifies the type of agent notification
type AgentNotification uint32
const (
AgentNotifyUnspec AgentNotification = iota
AgentNotifyGeneric
AgentNotifyStart
AgentNotifyEndpointRegenerateSuccess
AgentNotifyEndpointRegenerateFail
AgentNotifyPolicyUpdated
AgentNotifyPolicyDeleted
AgentNotifyEndpointCreated
AgentNotifyEndpointDeleted
AgentNotifyIPCacheUpserted
AgentNotifyIPCacheDeleted
AgentNotifyServiceUpserted
AgentNotifyServiceDeleted
)
var notifyTable = map[AgentNotification]string{
AgentNotifyUnspec: "unspecified",
AgentNotifyGeneric: "Message",
AgentNotifyStart: "Cilium agent started",
AgentNotifyEndpointRegenerateSuccess: "Endpoint regenerated",
AgentNotifyEndpointCreated: "Endpoint created",
AgentNotifyEndpointDeleted: "Endpoint deleted",
AgentNotifyEndpointRegenerateFail: "Failed endpoint regeneration",
AgentNotifyIPCacheDeleted: "IPCache entry deleted",
AgentNotifyIPCacheUpserted: "IPCache entry upserted",
AgentNotifyPolicyUpdated: "Policy updated",
AgentNotifyPolicyDeleted: "Policy deleted",
AgentNotifyServiceDeleted: "Service deleted",
AgentNotifyServiceUpserted: "Service upserted",
}
func resolveAgentType(t AgentNotification) string {
if n, ok := notifyTable[t]; ok {
return n
}
return fmt.Sprintf("%d", t)
}
// DumpInfo dumps an agent notification
func (n *AgentNotify) DumpInfo() {
fmt.Printf(">> %s: %s\n", resolveAgentType(n.Type), n.Text)
}
func (n *AgentNotify) getJSON() string {
return fmt.Sprintf(`{"type":"agent","subtype":"%s","message":%s}`, resolveAgentType(n.Type), n.Text)
}
// DumpJSON prints notification in json format
func (n *AgentNotify) DumpJSON() {
fmt.Println(n.getJSON())
}
// PolicyUpdateNotification structures update notification
type PolicyUpdateNotification struct {
Labels []string `json:"labels,omitempty"`
Revision uint64 `json:"revision,omitempty"`
RuleCount int `json:"rule_count"`
}
// PolicyUpdateMessage constructs an agent notification message for policy updates
func PolicyUpdateMessage(numRules int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: numRules,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyUpdated,
Notification: notification,
}
}
// PolicyDeleteMessage constructs an agent notification message for policy deletion
func PolicyDeleteMessage(deleted int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: deleted,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyDeleted,
Notification: notification,
}
}
// EndpointRegenNotification structures regeneration notification
type EndpointRegenNotification struct {
ID uint64 `json:"id,omitempty"`
Labels []string `json:"labels,omitempty"`
Error string `json:"error,omitempty"`
}
// EndpointRegenMessage constructs an agent notification message for endpoint regeneration
func EndpointRegenMessage(e notifications.RegenNotificationInfo, err error) AgentNotifyMessage {
notification := EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
}
typ := AgentNotifyEndpointRegenerateSuccess
if err != nil {
notification.Error = err.Error()
typ = AgentNotifyEndpointRegenerateFail
}
return AgentNotifyMessage{
Type: typ,
Notification: notification,
}
}
// EndpointCreateNotification structures the endpoint create notification
type EndpointCreateNotification struct {
EndpointRegenNotification
PodName string `json:"pod-name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// EndpointCreateMessage constructs an agent notification message for endpoint creation
func EndpointCreateMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointCreateNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointCreated,
Notification: notification,
}
}
// EndpointDeleteNotification structures the an endpoint delete notification
type EndpointDeleteNotification struct {
EndpointRegenNotification
PodName string `json:"pod-name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// EndpointDeleteMessage constructs an agent notification message for endpoint deletion
func EndpointDeleteMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointDeleteNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointDeleted,
Notification: notification,
}
}
// IPCacheNotification structures ipcache change notifications
type IPCacheNotification struct {
CIDR string `json:"cidr"`
Identity uint32 `json:"id"`
OldIdentity *uint32 `json:"old-id,omitempty"`
HostIP net.IP `json:"host-ip,omitempty"`
OldHostIP net.IP `json:"old-host-ip,omitempty"`
EncryptKey uint8 `json:"encrypt-key"`
Namespace string `json:"namespace,omitempty"`
PodName string `json:"pod-name,omitempty"`
}
// IPCacheUpsertedMessage constructs an agent notification message for ipcache upsertions
func IPCacheUpsertedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheUpserted,
Notification: notification,
}
}
// IPCacheDeletedMessage constructs an agent notification message for ipcache deletions
func IPCacheDeletedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheDeleted,
Notification: notification,
}
}
// TimeNotification structures agent start notification
type TimeNotification struct {
Time string `json:"time"`
}
// StartMessage constructs an agent notification message when the agent starts
func StartMessage(t time.Time) AgentNotifyMessage {
notification := TimeNotification{
Time: t.String(),
}
return AgentNotifyMessage{
Type: AgentNotifyStart,
Notification: notification,
}
}
// ServiceUpsertNotificationAddr is part of ServiceUpsertNotification
type ServiceUpsertNotificationAddr struct {
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
}
// ServiceUpsertNotification structures service upsert notifications
type ServiceUpsertNotification struct {
ID uint32 `json:"id"`
Frontend ServiceUpsertNotificationAddr `json:"frontend-address"`
Backends []ServiceUpsertNotificationAddr `json:"backend-addresses"`
Type string `json:"type,omitempty"`
TrafficPolicy string `json:"traffic-policy,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,,omitempty"`
}
// ServiceUpsertMessage constructs an agent notification message for service upserts
func ServiceUpsertMessage(
id uint32,
frontend ServiceUpsertNotificationAddr,
backends []ServiceUpsertNotificationAddr,
svcType, svcTrafficPolicy, svcName, svcNamespace string,
) AgentNotifyMessage {
notification := ServiceUpsertNotification{
ID: id,
Frontend: frontend,
Backends: backends,
Type: svcType,
TrafficPolicy: svcTrafficPolicy,
Name: svcName,
Namespace: svcNamespace,
}
return AgentNotifyMessage{
Type: AgentNotifyServiceUpserted,
Notification: notification,
}
}
// ServiceDeleteNotification structures service delete notifications
type ServiceDeleteNotification struct {
ID uint32 `json:"id"`
}
// ServiceDeleteMessage constructs an agent notification message for service deletions
func ServiceDeleteMessage(id uint32) AgentNotifyMessage {
notification := ServiceDeleteNotification{
ID: id,
}
return AgentNotifyMessage{
Type: AgentNotifyServiceDeleted,
Notification: notification,
}
}
const (
// PolicyIngress is the value of Flags&PolicyNotifyFlagDirection for ingress traffic
PolicyIngress = 1
// PolicyEgress is the value of Flags&PolicyNotifyFlagDirection for egress traffic
PolicyEgress = 2
// PolicyMatchNone is the value of MatchType indicatating no policy match
PolicyMatchNone = 0
// PolicyMatchL3Only is the value of MatchType indicating a L3-only match
PolicyMatchL3Only = 1
// PolicyMatchL3L4 is the value of MatchType indicating a L3+L4 match
PolicyMatchL3L4 = 2
// PolicyMatchL4Only is the value of MatchType indicating a L4-only match
PolicyMatchL4Only = 3
// PolicyMatchAll is the value of MatchType indicating an allow-all match
PolicyMatchAll = 4
)
type PolicyMatchType int
func (m PolicyMatchType) String() string {
switch m {
case PolicyMatchL3Only:
return "L3-Only"
case PolicyMatchL3L4:
return "L3-L4"
case PolicyMatchL4Only:
return "L4-Only"
case PolicyMatchAll:
return "all"
case PolicyMatchNone:
return "none"
}
return "unknown"
}
|
package paxos
import (
"fmt"
"strings"
"strconv"
"borg/assert"
"testing"
"container/vector"
)
const (
iFrom = iota
iTo
iCmd
iRnd
iNumParts
)
func accept(ins, outs chan string) {
var rnd, vrnd uint64
var vval string
ch, sent := make(chan int), 0
for in := range ins {
parts := strings.Split(in, ":", iNumParts)
if len(parts) != iNumParts {
continue
}
switch parts[iCmd] {
case "INVITE":
i, _ := strconv.Btoui64(parts[iRnd], 10)
// If parts[iRnd] is invalid, i is 0 and the message will be ignored
switch {
case i <= rnd:
case i > rnd:
rnd = i
sent++
msg := fmt.Sprintf("ACCEPT:%d:%d:%s", i, vrnd, vval)
go func(msg string) { outs <- msg ; ch <- 1 }(msg)
}
}
}
for x := 0; x < sent; x++ {
<-ch
}
close(outs)
}
// TESTING
func slurp(ch chan string) (got []string) {
for x := range ch { (*vector.StringVector)(&got).Push(x) }
return
}
func TestAcceptsInvite(t *testing.T) {
ins := make(chan string)
outs := make(chan string)
exp := []string{"ACCEPT:1:0:"}
go accept(ins, outs)
// Send a message with no senderId
ins <- "1:*:INVITE:1"
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
func TestIgnoresStaleInvites(t *testing.T) {
ins := make(chan string)
outs := make(chan string)
exp := []string{"ACCEPT:2:0:"}
go accept(ins, outs)
// Send a message with no senderId
ins <- "1:*:INVITE:2"
ins <- "1:*:INVITE:1"
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
func TestIgnoresMalformedMessages(t *testing.T) {
totest := []string{
"x", // too few separators
"x:x", // too few separators
"x:x:x", // too few separators
"x:x:x:x:x", // too many separators
"1:*:INVITE:x", // invalid round number
"1:*:x:1", // unknown command
}
for _, msg := range(totest) {
ins := make(chan string)
outs := make(chan string)
exp := []string{}
go accept(ins, outs)
// Send a message with no senderId
ins <- msg
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
}
test that replies go to the proper machine
package paxos
import (
"fmt"
"strings"
"strconv"
"borg/assert"
"testing"
"container/vector"
)
const (
iFrom = iota
iTo
iCmd
iRnd
iNumParts
)
func accept(me int, ins, outs chan string) {
var rnd, vrnd uint64
var vval string
ch, sent := make(chan int), 0
for in := range ins {
parts := strings.Split(in, ":", iNumParts)
if len(parts) != iNumParts {
continue
}
switch parts[iCmd] {
case "INVITE":
i, _ := strconv.Btoui64(parts[iRnd], 10)
inFrom, _ := strconv.Btoui64(parts[iFrom], 10)
// If parts[iRnd] is invalid, i is 0 and the message will be ignored
switch {
case i <= rnd:
case i > rnd:
rnd = i
sent++
outTo := inFrom // reply to the sender
msg := fmt.Sprintf(
"%d:%d:ACCEPT:%d:%d:%s",
me,
outTo,
i,
vrnd,
vval,
)
go func(msg string) { outs <- msg ; ch <- 1 }(msg)
}
}
}
for x := 0; x < sent; x++ {
<-ch
}
close(outs)
}
// TESTING
func slurp(ch chan string) (got []string) {
for x := range ch { (*vector.StringVector)(&got).Push(x) }
return
}
func TestAcceptsInvite(t *testing.T) {
ins := make(chan string)
outs := make(chan string)
exp := []string{"2:1:ACCEPT:1:0:"}
go accept(2, ins, outs)
// Send a message with no senderId
ins <- "1:*:INVITE:1"
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
func TestIgnoresStaleInvites(t *testing.T) {
ins := make(chan string)
outs := make(chan string)
exp := []string{"2:1:ACCEPT:2:0:"}
go accept(2, ins, outs)
// Send a message with no senderId
ins <- "1:*:INVITE:2"
ins <- "1:*:INVITE:1"
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
func TestIgnoresMalformedMessages(t *testing.T) {
totest := []string{
"x", // too few separators
"x:x", // too few separators
"x:x:x", // too few separators
"x:x:x:x:x", // too many separators
"1:*:INVITE:x", // invalid round number
"1:*:x:1", // unknown command
}
for _, msg := range(totest) {
ins := make(chan string)
outs := make(chan string)
exp := []string{}
go accept(2, ins, outs)
// Send a message with no senderId
ins <- msg
close(ins)
// outs was closed; therefore all messages have been processed
assert.Equal(t, exp, slurp(outs), "")
}
}
|
package registry
import (
"context"
"reflect"
"sort"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
type Descriptor struct {
Name string
Instance Service
InitPriority Priority
}
var services []*Descriptor
func RegisterService(instance Service) {
services = append(services, &Descriptor{
Name: reflect.TypeOf(instance).Elem().Name(),
Instance: instance,
InitPriority: Low,
})
}
func Register(descriptor *Descriptor) {
services = append(services, descriptor)
}
func GetServices() []*Descriptor {
sort.Slice(services, func(i, j int) bool {
return services[i].InitPriority > services[j].InitPriority
})
return services
}
// Service interface is the lowest common shape that services
// are expected to forfill to be started within Grafana.
type Service interface {
// Init is called by Grafana main process which gives the service
// the possibility do some initial work before its started. Things
// like adding routes, bus handlers should be done in the Init function
Init() error
}
// CanBeDisabled allows the services to decide if it should
// be started or not by itself. This is useful for services
// that might not always be started, ex alerting.
// This will be called after `Init()`.
type CanBeDisabled interface {
// IsDisabled should return a bool saying if it can be started or not.
IsDisabled() bool
}
// BackgroundService should be implemented for services that have
// long running tasks in the background.
type BackgroundService interface {
// Run starts the background process of the service after `Init` have been called
// on all services. The `context.Context` passed into the function should be used
// to subscribe to ctx.Done() so the service can be notified when Grafana shuts down.
Run(ctx context.Context) error
}
// DatabaseMigrator allows the caller to add migrations to
// the migrator passed as argument
type DatabaseMigrator interface {
// AddMigrations allows the service to add migrations to
// the database migrator.
AddMigration(mg *migrator.Migrator)
}
// IsDisabled takes an service and return true if its disabled
func IsDisabled(srv Service) bool {
canBeDisabled, ok := srv.(CanBeDisabled)
return ok && canBeDisabled.IsDisabled()
}
type Priority int
const (
High Priority = 100
Low Priority = 0
)
add functionality to override service in registry
package registry
import (
"context"
"reflect"
"sort"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
type Descriptor struct {
Name string
Instance Service
InitPriority Priority
}
var services []*Descriptor
func RegisterService(instance Service) {
services = append(services, &Descriptor{
Name: reflect.TypeOf(instance).Elem().Name(),
Instance: instance,
InitPriority: Low,
})
}
func Register(descriptor *Descriptor) {
services = append(services, descriptor)
}
func GetServices() []*Descriptor {
slice := getServicesWithOverrides()
sort.Slice(slice, func(i, j int) bool {
return slice[i].InitPriority > slice[j].InitPriority
})
return slice
}
type OverrideServiceFunc func(descriptor Descriptor) (*Descriptor, bool)
var overrides []OverrideServiceFunc
func RegisterOverride(fn OverrideServiceFunc) {
overrides = append(overrides, fn)
}
func getServicesWithOverrides() []*Descriptor {
slice := []*Descriptor{}
for _, s := range services {
var descriptor *Descriptor
for _, fn := range overrides {
if newDescriptor, override := fn(*s); override {
descriptor = newDescriptor
break
}
}
if descriptor != nil {
slice = append(slice, descriptor)
} else {
slice = append(slice, s)
}
}
return slice
}
// Service interface is the lowest common shape that services
// are expected to forfill to be started within Grafana.
type Service interface {
// Init is called by Grafana main process which gives the service
// the possibility do some initial work before its started. Things
// like adding routes, bus handlers should be done in the Init function
Init() error
}
// CanBeDisabled allows the services to decide if it should
// be started or not by itself. This is useful for services
// that might not always be started, ex alerting.
// This will be called after `Init()`.
type CanBeDisabled interface {
// IsDisabled should return a bool saying if it can be started or not.
IsDisabled() bool
}
// BackgroundService should be implemented for services that have
// long running tasks in the background.
type BackgroundService interface {
// Run starts the background process of the service after `Init` have been called
// on all services. The `context.Context` passed into the function should be used
// to subscribe to ctx.Done() so the service can be notified when Grafana shuts down.
Run(ctx context.Context) error
}
// DatabaseMigrator allows the caller to add migrations to
// the migrator passed as argument
type DatabaseMigrator interface {
// AddMigrations allows the service to add migrations to
// the database migrator.
AddMigration(mg *migrator.Migrator)
}
// IsDisabled takes an service and return true if its disabled
func IsDisabled(srv Service) bool {
canBeDisabled, ok := srv.(CanBeDisabled)
return ok && canBeDisabled.IsDisabled()
}
type Priority int
const (
High Priority = 100
Low Priority = 0
)
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce
import (
"context"
"fmt"
"strings"
compute "google.golang.org/api/compute/v1"
clouddns "google.golang.org/api/dns/v1"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/resources"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
)
type gceListFn func() ([]*resources.Resource, error)
const (
typeInstance = "Instance"
typeInstanceTemplate = "InstanceTemplate"
typeDisk = "Disk"
typeInstanceGroupManager = "InstanceGroupManager"
typeTargetPool = "TargetPool"
typeFirewallRule = "FirewallRule"
typeForwardingRule = "ForwardingRule"
typeHTTPHealthcheck = "HTTP HealthCheck"
typeAddress = "Address"
typeRoute = "Route"
typeNetwork = "Network"
typeSubnet = "Subnet"
typeRouter = "Router"
typeDNSRecord = "DNSRecord"
)
// Maximum number of `-` separated tokens in a name
// Example: nodeport-external-to-node-ipv6
const maxPrefixTokens = 5
// Maximum length of a GCE route name
const maxGCERouteNameLength = 63
func ListResourcesGCE(gceCloud gce.GCECloud, clusterName string, region string) (map[string]*resources.Resource, error) {
ctx := context.TODO()
if region == "" {
region = gceCloud.Region()
}
resources := make(map[string]*resources.Resource)
d := &clusterDiscoveryGCE{
cloud: gceCloud,
gceCloud: gceCloud,
clusterName: clusterName,
}
{
// TODO: Only zones in api.Cluster object, if we have one?
gceZones, err := d.gceCloud.Compute().Zones().List(ctx, d.gceCloud.Project())
if err != nil {
return nil, fmt.Errorf("error listing zones: %v", err)
}
for _, gceZone := range gceZones {
u, err := gce.ParseGoogleCloudURL(gceZone.Region)
if err != nil {
return nil, err
}
if u.Name != region {
continue
}
d.zones = append(d.zones, gceZone.Name)
}
if len(d.zones) == 0 {
return nil, fmt.Errorf("unable to determine zones in region %q", region)
}
klog.Infof("Scanning zones: %v", d.zones)
}
listFunctions := []gceListFn{
d.listGCEInstanceTemplates,
d.listInstanceGroupManagersAndInstances,
d.listTargetPools,
d.listForwardingRules,
d.listFirewallRules,
d.listGCEDisks,
d.listGCEDNSZone,
// TODO: Find routes via instances (via instance groups)
d.listAddresses,
d.listSubnets,
d.listRouters,
d.listNetworks,
}
for _, fn := range listFunctions {
resourceTrackers, err := fn()
if err != nil {
return nil, err
}
for _, t := range resourceTrackers {
resources[t.Type+":"+t.ID] = t
}
}
// We try to clean up orphaned routes.
{
resourceTrackers, err := d.listRoutes(ctx, resources)
if err != nil {
return nil, err
}
for _, t := range resourceTrackers {
resources[t.Type+":"+t.ID] = t
}
}
for k, t := range resources {
if t.Done {
delete(resources, k)
}
}
return resources, nil
}
type clusterDiscoveryGCE struct {
cloud fi.Cloud
gceCloud gce.GCECloud
clusterName string
instanceTemplates []*compute.InstanceTemplate
zones []string
}
func (d *clusterDiscoveryGCE) findInstanceTemplates() ([]*compute.InstanceTemplate, error) {
if d.instanceTemplates != nil {
return d.instanceTemplates, nil
}
instanceTemplates, err := gce.FindInstanceTemplates(d.gceCloud, d.clusterName)
if err != nil {
return nil, err
}
d.instanceTemplates = instanceTemplates
return d.instanceTemplates, nil
}
func (d *clusterDiscoveryGCE) listGCEInstanceTemplates() ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
for _, t := range templates {
selfLink := t.SelfLink // avoid closure-in-loop go-tcha
resourceTracker := &resources.Resource{
Name: t.Name,
ID: t.Name,
Type: typeInstanceTemplate,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error {
return gce.DeleteInstanceTemplate(d.gceCloud, selfLink)
},
Obj: t,
}
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Subnetwork != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeSubnet+":"+gce.LastComponent(ni.Subnetwork))
}
}
klog.V(4).Infof("Found resource: %s", t.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func (d *clusterDiscoveryGCE) listInstanceGroupManagersAndInstances() ([]*resources.Resource, error) {
c := d.gceCloud
project := c.Project()
var resourceTrackers []*resources.Resource
instanceTemplates := make(map[string]*compute.InstanceTemplate)
{
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
for _, t := range templates {
instanceTemplates[t.SelfLink] = t
}
}
ctx := context.Background()
for _, zoneName := range d.zones {
is, err := c.Compute().InstanceGroupManagers().List(ctx, project, zoneName)
if err != nil {
return nil, fmt.Errorf("error listing InstanceGroupManagers: %v", err)
}
for i := range is {
mig := is[i] // avoid closure-in-loop go-tcha
instanceTemplate := instanceTemplates[mig.InstanceTemplate]
if instanceTemplate == nil {
klog.V(2).Infof("Ignoring MIG with unmanaged InstanceTemplate: %s", mig.InstanceTemplate)
continue
}
resourceTracker := &resources.Resource{
Name: mig.Name,
ID: zoneName + "/" + mig.Name,
Type: typeInstanceGroupManager,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error { return gce.DeleteInstanceGroupManager(c, mig) },
Obj: mig,
}
resourceTracker.Blocks = append(resourceTracker.Blocks, typeInstanceTemplate+":"+instanceTemplate.Name)
klog.V(4).Infof("Found resource: %s", mig.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
instanceTrackers, err := d.listManagedInstances(mig)
if err != nil {
return nil, fmt.Errorf("error listing instances in InstanceGroupManager: %v", err)
}
resourceTrackers = append(resourceTrackers, instanceTrackers...)
}
}
return resourceTrackers, nil
}
func (d *clusterDiscoveryGCE) listManagedInstances(igm *compute.InstanceGroupManager) ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
zoneName := gce.LastComponent(igm.Zone)
instances, err := gce.ListManagedInstances(c, igm)
if err != nil {
return nil, err
}
for _, i := range instances {
url := i.Instance // avoid closure-in-loop go-tcha
name := gce.LastComponent(url)
resourceTracker := &resources.Resource{
Name: name,
ID: zoneName + "/" + name,
Type: typeInstance,
Deleter: func(cloud fi.Cloud, tracker *resources.Resource) error {
return gce.DeleteInstance(c, url)
},
Dumper: DumpManagedInstance,
Obj: i,
}
// We don't block deletion of the instance group manager
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
// findGCEDisks finds all Disks that are associated with the current cluster
// It matches them by looking for the cluster label
func (d *clusterDiscoveryGCE) findGCEDisks() ([]*compute.Disk, error) {
c := d.gceCloud
clusterTag := gce.SafeClusterName(d.clusterName)
var matches []*compute.Disk
ctx := context.Background()
// TODO: Push down tag filter?
diskLists, err := c.Compute().Disks().AggregatedList(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing disks: %v", err)
}
for _, list := range diskLists {
for _, d := range list.Disks {
match := false
for k, v := range d.Labels {
if k == gce.GceLabelNameKubernetesCluster {
if v == clusterTag {
match = true
} else {
match = false
break
}
}
}
if !match {
continue
}
matches = append(matches, d)
}
}
return matches, nil
}
func (d *clusterDiscoveryGCE) listGCEDisks() ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource
disks, err := d.findGCEDisks()
if err != nil {
return nil, err
}
for _, t := range disks {
resourceTracker := &resources.Resource{
Name: t.Name,
ID: t.Name,
Type: typeDisk,
Deleter: deleteGCEDisk,
Obj: t,
}
for _, u := range t.Users {
resourceTracker.Blocked = append(resourceTracker.Blocked, typeInstance+":"+gce.LastComponent(t.Zone)+"/"+gce.LastComponent(u))
}
klog.V(4).Infof("Found resource: %s", t.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteGCEDisk(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Disk)
klog.V(2).Infof("Deleting GCE Disk %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Disks().Delete(u.Project, u.Zone, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("disk not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting disk %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listTargetPools() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
tps, err := c.Compute().TargetPools().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing TargetPools: %v", err)
}
for _, tp := range tps {
if !d.matchesClusterName(tp.Name) {
continue
}
resourceTracker := &resources.Resource{
Name: tp.Name,
ID: tp.Name,
Type: typeTargetPool,
Deleter: deleteTargetPool,
Obj: tp,
}
klog.V(4).Infof("Found resource: %s", tp.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteTargetPool(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.TargetPool)
klog.V(2).Infof("Deleting GCE TargetPool %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().TargetPools().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("TargetPool not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting TargetPool %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listForwardingRules() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
frs, err := c.Compute().ForwardingRules().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing ForwardingRules: %v", err)
}
for _, fr := range frs {
if !d.matchesClusterName(fr.Name) {
continue
}
resourceTracker := &resources.Resource{
Name: fr.Name,
ID: fr.Name,
Type: typeForwardingRule,
Deleter: deleteForwardingRule,
Obj: fr,
}
if fr.Target != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeTargetPool+":"+gce.LastComponent(fr.Target))
}
if fr.IPAddress != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeAddress+":"+gce.LastComponent(fr.IPAddress))
}
klog.V(4).Infof("Found resource: %s", fr.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteForwardingRule(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.ForwardingRule)
klog.V(2).Infof("Deleting GCE ForwardingRule %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().ForwardingRules().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("ForwardingRule not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting ForwardingRule %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
// listFirewallRules discovers Firewall objects for the cluster
func (d *clusterDiscoveryGCE) listFirewallRules() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
frs, err := c.Compute().Firewalls().List(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing FirewallRules: %v", err)
}
for _, fr := range frs {
if !d.matchesClusterNameMultipart(fr.Name, maxPrefixTokens) && !strings.HasPrefix(fr.Name, "k8s-") {
continue
}
foundMatchingTarget := false
tagPrefix := gce.SafeClusterName(d.clusterName) + "-"
for _, target := range fr.TargetTags {
if strings.HasPrefix(target, tagPrefix) {
foundMatchingTarget = true
}
}
if !foundMatchingTarget {
continue
}
// find the Kubernetes LoadBalancer
if strings.HasPrefix(fr.Name, "k8s-fw-") {
name := strings.ReplaceAll(fr.Name, "k8s-fw-", "")
fr, err := c.Compute().ForwardingRules().Get(c.Project(), c.Region(), name)
if err != nil {
return nil, fmt.Errorf("error get ForwardingRule: %v", err)
}
frResourceTracker := &resources.Resource{
Name: fr.Name,
ID: fr.Name,
Type: typeForwardingRule,
Deleter: deleteForwardingRule,
Obj: fr,
}
if fr.Target != "" {
frResourceTracker.Blocks = append(frResourceTracker.Blocks, typeTargetPool+":"+gce.LastComponent(fr.Target))
}
resourceTrackers = append(resourceTrackers, frResourceTracker)
tp, err := c.Compute().TargetPools().Get(c.Project(), c.Region(), name)
if err != nil {
return nil, fmt.Errorf("error get TargetPool: %v", err)
}
tpResourceTracker := &resources.Resource{
Name: tp.Name,
ID: tp.Name,
Type: typeTargetPool,
Deleter: deleteTargetPool,
Obj: tp,
}
resourceTrackers = append(resourceTrackers, tpResourceTracker)
}
// l4 level healthchecks
if strings.HasPrefix(fr.Name, "k8s-") && strings.HasSuffix(fr.Name, "-http-hc") {
name := strings.ReplaceAll(strings.ReplaceAll(fr.Name, "k8s-", ""), "-http-hc", "")
hc, err := c.Compute().HTTPHealthChecks().Get(c.Project(), name)
if err != nil {
return nil, fmt.Errorf("error get HTTPHealthCheck: %v", err)
}
hcResourceTracker := &resources.Resource{
Name: hc.Name,
ID: hc.Name,
Type: typeHTTPHealthcheck,
Deleter: deleteHTTPHealthCheck,
Obj: hc,
}
resourceTrackers = append(resourceTrackers, hcResourceTracker)
}
resourceTracker := &resources.Resource{
Name: fr.Name,
ID: fr.Name,
Type: typeFirewallRule,
Deleter: deleteFirewallRule,
Obj: fr,
}
klog.V(4).Infof("Found resource: %s", fr.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
// deleteHTTPHealthCheck is the helper function to delete a Resource for a HTTP health check object
func deleteHTTPHealthCheck(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.HttpHealthCheck)
klog.V(2).Infof("Deleting GCE HTTP HealthCheck %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().HTTPHealthChecks().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("HTTP HealthCheck not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting HTTP HealthCheck %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
// deleteFirewallRule is the helper function to delete a Resource for a Firewall object
func deleteFirewallRule(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Firewall)
klog.V(2).Infof("Deleting GCE FirewallRule %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Firewalls().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("FirewallRule not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting FirewallRule %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listRoutes(ctx context.Context, resourceMap map[string]*resources.Resource) ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
instancesToDelete := make(map[string]*resources.Resource)
for _, resource := range resourceMap {
if resource.Type == typeInstance {
instancesToDelete[resource.ID] = resource
}
}
// TODO: Push-down prefix?
routes, err := c.Compute().Routes().List(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing Routes: %w", err)
}
for _, r := range routes {
if !d.matchesClusterNameWithUUID(r.Name, maxGCERouteNameLength) {
continue
}
remove := false
for _, w := range r.Warnings {
switch w.Code {
case "NEXT_HOP_INSTANCE_NOT_FOUND":
remove = true
default:
klog.Infof("Unknown warning on route %q: %q", r.Name, w.Code)
}
}
if r.NextHopInstance != "" {
u, err := gce.ParseGoogleCloudURL(r.NextHopInstance)
if err != nil {
klog.Warningf("error parsing URL for NextHopInstance=%q", r.NextHopInstance)
}
if instancesToDelete[u.Zone+"/"+u.Name] != nil {
remove = true
}
}
if remove {
resourceTracker := &resources.Resource{
Name: r.Name,
ID: r.Name,
Type: typeRoute,
Deleter: deleteRoute,
Obj: r,
}
// To avoid race conditions where the control-plane re-adds the routes, we delete routes
// only after we have deleted all the instances.
for _, instance := range instancesToDelete {
resourceTracker.Blocked = append(resourceTracker.Blocked, typeInstance+":"+instance.ID)
}
klog.V(4).Infof("Found resource: %s", r.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
}
return resourceTrackers, nil
}
func deleteRoute(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Route)
klog.V(2).Infof("Deleting GCE Route %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Routes().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("Route not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting Route %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listAddresses() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
addrs, err := c.Compute().Addresses().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing Addresses: %v", err)
}
for _, a := range addrs {
if !d.matchesClusterName(a.Name) {
klog.V(8).Infof("Skipping Address with name %q", a.Name)
continue
}
resourceTracker := &resources.Resource{
Name: a.Name,
ID: a.Name,
Type: typeAddress,
Deleter: deleteAddress,
Obj: a,
}
klog.V(4).Infof("Found resource: %s", a.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteAddress(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Address)
klog.V(2).Infof("Deleting GCE Address %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Addresses().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("Address not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting Address %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listSubnets() ([]*resources.Resource, error) {
// Templates are very accurate because of the metadata, so use those as the sanity check
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
subnetworkUrls := make(map[string]bool)
for _, t := range templates {
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Subnetwork != "" {
subnetworkUrls[ni.Subnetwork] = true
}
}
}
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
subnets, err := c.Compute().Subnetworks().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing subnetworks: %v", err)
}
for _, o := range subnets {
if !d.matchesClusterName(o.Name) {
klog.V(8).Infof("skipping Subnet with name %q", o.Name)
continue
}
if !subnetworkUrls[o.SelfLink] {
klog.Warningf("skipping subnetwork %q because it didn't match any instance template", o.SelfLink)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeSubnet,
Deleter: deleteSubnet,
Obj: o,
Dumper: DumpSubnetwork,
}
resourceTracker.Blocks = append(resourceTracker.Blocks, typeNetwork+":"+gce.LastComponent(o.Network))
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteSubnet(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Subnetwork)
klog.V(2).Infof("deleting GCE subnetwork %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Subnetworks().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("subnetwork not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting subnetwork %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listRouters() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
routers, err := c.Compute().Routers().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing routers: %v", err)
}
for _, o := range routers {
if !d.matchesClusterName(o.Name) {
klog.V(8).Infof("skipping Router with name %q", o.Name)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeRouter,
Deleter: deleteRouter,
Obj: o,
}
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteRouter(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Router)
klog.V(2).Infof("deleting GCE router %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Routers().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("router not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting router %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listNetworks() ([]*resources.Resource, error) {
// Templates are very accurate because of the metadata, so use those as the sanity check
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
networkUrls := make(map[string]bool)
for _, t := range templates {
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Network != "" {
networkUrls[ni.Network] = true
}
}
}
c := d.gceCloud
var resourceTrackers []*resources.Resource
networks, err := c.Compute().Networks().List(c.Project())
if err != nil {
return nil, fmt.Errorf("error listing networks: %v", err)
}
for _, o := range networks.Items {
if o.Name != gce.SafeClusterName(d.clusterName) {
klog.V(8).Infof("skipping network with name %q", o.Name)
continue
}
if !networkUrls[o.SelfLink] {
klog.Warningf("skipping network %q because it didn't match any instance template", o.SelfLink)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeNetwork,
Deleter: deleteNetwork,
Obj: o,
Dumper: DumpNetwork,
}
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteNetwork(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Network)
klog.V(2).Infof("deleting GCE network %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Networks().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("network not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting network %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) matchesClusterName(name string) bool {
// Names could have hypens in them, so really there is no limit.
// 8 hyphens feels like enough for any "reasonable" name
maxParts := 8
return d.matchesClusterNameMultipart(name, maxParts)
}
// matchesClusterNameMultipart checks if the name could have been generated by our cluster
// considering all the prefixes separated by `-`. maxParts limits the number of parts we consider.
func (d *clusterDiscoveryGCE) matchesClusterNameMultipart(name string, maxParts int) bool {
tokens := strings.Split(name, "-")
for i := 1; i <= maxParts; i++ {
if i > len(tokens) {
break
}
id := strings.Join(tokens[:i], "-")
if id == "" {
continue
}
if name == gce.SafeObjectName(id, d.clusterName) {
return true
}
}
return false
}
// matchesClusterNameWithUUID checks if the name is the clusterName with a UUID on the end.
// This is used by GCE routes (in "classic" mode)
func (d *clusterDiscoveryGCE) matchesClusterNameWithUUID(name string, maxLength int) bool {
const uuidLength = 36 // e.g. 51a343e2-c285-4e73-b933-18a6ea44c3e4
// Format is <cluster-name>-<uuid>
// <cluster-name> is truncated to ensure it fits into the GCE max length
if len(name) < uuidLength {
return false
}
withoutUUID := name[:len(name)-uuidLength]
clusterPrefix := gce.SafeClusterName(d.clusterName) + "-"
if len(clusterPrefix) > maxLength-uuidLength {
clusterPrefix = gce.SafeClusterName(d.clusterName)[:maxLength-uuidLength-1] + "-"
}
return clusterPrefix == withoutUUID
}
func (d *clusterDiscoveryGCE) clusterDNSName() string {
return d.clusterName + "."
}
func (d *clusterDiscoveryGCE) isKopsManagedDNSName(name string) bool {
prefix := []string{`api`, `api.internal`, `bastion`}
for _, p := range prefix {
if name == p+"."+d.clusterDNSName() {
return true
}
}
return false
}
func (d *clusterDiscoveryGCE) listGCEDNSZone() ([]*resources.Resource, error) {
if dns.IsGossipHostname(d.clusterName) {
return nil, nil
}
var resourceTrackers []*resources.Resource
managedZones, err := d.gceCloud.CloudDNS().ManagedZones().List(d.gceCloud.Project())
if err != nil {
return nil, fmt.Errorf("error getting GCE DNS zones %v", err)
}
for _, zone := range managedZones {
if !strings.HasSuffix(d.clusterDNSName(), zone.DnsName) {
continue
}
rrsets, err := d.gceCloud.CloudDNS().ResourceRecordSets().List(d.gceCloud.Project(), zone.Name)
if err != nil {
return nil, fmt.Errorf("error getting GCE DNS zone data %v", err)
}
for _, record := range rrsets {
// adapted from AWS implementation
if record.Type != "A" {
continue
}
if d.isKopsManagedDNSName(record.Name) {
resource := resources.Resource{
Name: record.Name,
ID: record.Name,
Type: typeDNSRecord,
GroupDeleter: deleteDNSRecords,
GroupKey: zone.Name,
Obj: record,
}
resourceTrackers = append(resourceTrackers, &resource)
}
}
}
return resourceTrackers, nil
}
func deleteDNSRecords(cloud fi.Cloud, r []*resources.Resource) error {
c := cloud.(gce.GCECloud)
var records []*clouddns.ResourceRecordSet
var zoneName string
for _, record := range r {
r := record.Obj.(*clouddns.ResourceRecordSet)
zoneName = record.GroupKey
records = append(records, r)
}
change := clouddns.Change{Deletions: records, Kind: "dns#change", IsServing: true}
_, err := c.CloudDNS().Changes().Create(c.Project(), zoneName, &change)
if err != nil {
return fmt.Errorf("error deleting GCE DNS resource record set %v", err)
}
return nil
}
gce: clean up networking objects by reference
We try to avoid cleaning up by name, and prefer checking references to
(e.g. targeting) a known resource, like an instancegroup.
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce
import (
"context"
"fmt"
"strings"
compute "google.golang.org/api/compute/v1"
clouddns "google.golang.org/api/dns/v1"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/resources"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
)
type gceListFn func() ([]*resources.Resource, error)
const (
typeInstance = "Instance"
typeInstanceTemplate = "InstanceTemplate"
typeDisk = "Disk"
typeInstanceGroupManager = "InstanceGroupManager"
typeTargetPool = "TargetPool"
typeFirewallRule = "FirewallRule"
typeForwardingRule = "ForwardingRule"
typeHTTPHealthcheck = "HTTP HealthCheck"
typeAddress = "Address"
typeRoute = "Route"
typeNetwork = "Network"
typeSubnet = "Subnet"
typeRouter = "Router"
typeDNSRecord = "DNSRecord"
)
// Maximum number of `-` separated tokens in a name
// Example: nodeport-external-to-node-ipv6
const maxPrefixTokens = 5
// Maximum length of a GCE route name
const maxGCERouteNameLength = 63
func ListResourcesGCE(gceCloud gce.GCECloud, clusterName string, region string) (map[string]*resources.Resource, error) {
ctx := context.TODO()
if region == "" {
region = gceCloud.Region()
}
resources := make(map[string]*resources.Resource)
d := &clusterDiscoveryGCE{
cloud: gceCloud,
gceCloud: gceCloud,
clusterName: clusterName,
}
{
// TODO: Only zones in api.Cluster object, if we have one?
gceZones, err := d.gceCloud.Compute().Zones().List(ctx, d.gceCloud.Project())
if err != nil {
return nil, fmt.Errorf("error listing zones: %v", err)
}
for _, gceZone := range gceZones {
u, err := gce.ParseGoogleCloudURL(gceZone.Region)
if err != nil {
return nil, err
}
if u.Name != region {
continue
}
d.zones = append(d.zones, gceZone.Name)
}
if len(d.zones) == 0 {
return nil, fmt.Errorf("unable to determine zones in region %q", region)
}
klog.Infof("Scanning zones: %v", d.zones)
}
listFunctions := []gceListFn{
d.listGCEInstanceTemplates,
d.listInstanceGroupManagersAndInstances,
d.listTargetPools,
d.listForwardingRules,
d.listFirewallRules,
d.listGCEDisks,
d.listGCEDNSZone,
// TODO: Find routes via instances (via instance groups)
d.listAddresses,
d.listSubnets,
d.listRouters,
d.listNetworks,
}
for _, fn := range listFunctions {
resourceTrackers, err := fn()
if err != nil {
return nil, err
}
for _, t := range resourceTrackers {
resources[t.Type+":"+t.ID] = t
}
}
// We try to clean up orphaned routes.
{
resourceTrackers, err := d.listRoutes(ctx, resources)
if err != nil {
return nil, err
}
for _, t := range resourceTrackers {
resources[t.Type+":"+t.ID] = t
}
}
for k, t := range resources {
if t.Done {
delete(resources, k)
}
}
return resources, nil
}
type clusterDiscoveryGCE struct {
cloud fi.Cloud
gceCloud gce.GCECloud
clusterName string
instanceTemplates []*compute.InstanceTemplate
zones []string
}
func (d *clusterDiscoveryGCE) findInstanceTemplates() ([]*compute.InstanceTemplate, error) {
if d.instanceTemplates != nil {
return d.instanceTemplates, nil
}
instanceTemplates, err := gce.FindInstanceTemplates(d.gceCloud, d.clusterName)
if err != nil {
return nil, err
}
d.instanceTemplates = instanceTemplates
return d.instanceTemplates, nil
}
func (d *clusterDiscoveryGCE) listGCEInstanceTemplates() ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
for _, t := range templates {
selfLink := t.SelfLink // avoid closure-in-loop go-tcha
resourceTracker := &resources.Resource{
Name: t.Name,
ID: t.Name,
Type: typeInstanceTemplate,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error {
return gce.DeleteInstanceTemplate(d.gceCloud, selfLink)
},
Obj: t,
}
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Subnetwork != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeSubnet+":"+gce.LastComponent(ni.Subnetwork))
}
}
klog.V(4).Infof("Found resource: %s", t.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func (d *clusterDiscoveryGCE) listInstanceGroupManagersAndInstances() ([]*resources.Resource, error) {
c := d.gceCloud
project := c.Project()
var resourceTrackers []*resources.Resource
instanceTemplates := make(map[string]*compute.InstanceTemplate)
{
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
for _, t := range templates {
instanceTemplates[t.SelfLink] = t
}
}
ctx := context.Background()
for _, zoneName := range d.zones {
is, err := c.Compute().InstanceGroupManagers().List(ctx, project, zoneName)
if err != nil {
return nil, fmt.Errorf("error listing InstanceGroupManagers: %v", err)
}
for i := range is {
mig := is[i] // avoid closure-in-loop go-tcha
instanceTemplate := instanceTemplates[mig.InstanceTemplate]
if instanceTemplate == nil {
klog.V(2).Infof("Ignoring MIG with unmanaged InstanceTemplate: %s", mig.InstanceTemplate)
continue
}
resourceTracker := &resources.Resource{
Name: mig.Name,
ID: zoneName + "/" + mig.Name,
Type: typeInstanceGroupManager,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error { return gce.DeleteInstanceGroupManager(c, mig) },
Obj: mig,
}
resourceTracker.Blocks = append(resourceTracker.Blocks, typeInstanceTemplate+":"+instanceTemplate.Name)
klog.V(4).Infof("Found resource: %s", mig.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
instanceTrackers, err := d.listManagedInstances(mig)
if err != nil {
return nil, fmt.Errorf("error listing instances in InstanceGroupManager: %v", err)
}
resourceTrackers = append(resourceTrackers, instanceTrackers...)
}
}
return resourceTrackers, nil
}
func (d *clusterDiscoveryGCE) listManagedInstances(igm *compute.InstanceGroupManager) ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
zoneName := gce.LastComponent(igm.Zone)
instances, err := gce.ListManagedInstances(c, igm)
if err != nil {
return nil, err
}
for _, i := range instances {
url := i.Instance // avoid closure-in-loop go-tcha
name := gce.LastComponent(url)
resourceTracker := &resources.Resource{
Name: name,
ID: zoneName + "/" + name,
Type: typeInstance,
Deleter: func(cloud fi.Cloud, tracker *resources.Resource) error {
return gce.DeleteInstance(c, url)
},
Dumper: DumpManagedInstance,
Obj: i,
}
// We don't block deletion of the instance group manager
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
// findGCEDisks finds all Disks that are associated with the current cluster
// It matches them by looking for the cluster label
func (d *clusterDiscoveryGCE) findGCEDisks() ([]*compute.Disk, error) {
c := d.gceCloud
clusterTag := gce.SafeClusterName(d.clusterName)
var matches []*compute.Disk
ctx := context.Background()
// TODO: Push down tag filter?
diskLists, err := c.Compute().Disks().AggregatedList(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing disks: %v", err)
}
for _, list := range diskLists {
for _, d := range list.Disks {
match := false
for k, v := range d.Labels {
if k == gce.GceLabelNameKubernetesCluster {
if v == clusterTag {
match = true
} else {
match = false
break
}
}
}
if !match {
continue
}
matches = append(matches, d)
}
}
return matches, nil
}
func (d *clusterDiscoveryGCE) listGCEDisks() ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource
disks, err := d.findGCEDisks()
if err != nil {
return nil, err
}
for _, t := range disks {
resourceTracker := &resources.Resource{
Name: t.Name,
ID: t.Name,
Type: typeDisk,
Deleter: deleteGCEDisk,
Obj: t,
}
for _, u := range t.Users {
resourceTracker.Blocked = append(resourceTracker.Blocked, typeInstance+":"+gce.LastComponent(t.Zone)+"/"+gce.LastComponent(u))
}
klog.V(4).Infof("Found resource: %s", t.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteGCEDisk(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Disk)
klog.V(2).Infof("Deleting GCE Disk %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Disks().Delete(u.Project, u.Zone, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("disk not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting disk %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listTargetPools() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
tps, err := c.Compute().TargetPools().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing TargetPools: %v", err)
}
for _, tp := range tps {
if !d.matchesClusterName(tp.Name) {
continue
}
resourceTracker := &resources.Resource{
Name: tp.Name,
ID: tp.Name,
Type: typeTargetPool,
Deleter: deleteTargetPool,
Obj: tp,
}
klog.V(4).Infof("Found resource: %s", tp.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteTargetPool(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.TargetPool)
klog.V(2).Infof("Deleting GCE TargetPool %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().TargetPools().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("TargetPool not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting TargetPool %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listForwardingRules() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
frs, err := c.Compute().ForwardingRules().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing ForwardingRules: %v", err)
}
for _, fr := range frs {
if !d.matchesClusterName(fr.Name) {
continue
}
resourceTracker := &resources.Resource{
Name: fr.Name,
ID: fr.Name,
Type: typeForwardingRule,
Deleter: deleteForwardingRule,
Obj: fr,
}
if fr.Target != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeTargetPool+":"+gce.LastComponent(fr.Target))
}
if fr.IPAddress != "" {
resourceTracker.Blocks = append(resourceTracker.Blocks, typeAddress+":"+gce.LastComponent(fr.IPAddress))
}
klog.V(4).Infof("Found resource: %s", fr.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteForwardingRule(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.ForwardingRule)
klog.V(2).Infof("Deleting GCE ForwardingRule %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().ForwardingRules().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("ForwardingRule not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting ForwardingRule %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
// listFirewallRules discovers Firewall objects for the cluster
func (d *clusterDiscoveryGCE) listFirewallRules() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
firewallRules, err := c.Compute().Firewalls().List(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing FirewallRules: %v", err)
}
nextFirewallRule:
for _, firewallRule := range firewallRules {
if !d.matchesClusterNameMultipart(firewallRule.Name, maxPrefixTokens) && !strings.HasPrefix(firewallRule.Name, "k8s-") {
continue
}
// TODO: Check network? (or other fields?) No label support currently.
// We consider only firewall rules that target our cluster tags, which include the cluster name
tagPrefix := gce.SafeClusterName(d.clusterName) + "-"
if len(firewallRule.TargetTags) != 0 {
tagMatchCount := 0
for _, target := range firewallRule.TargetTags {
if strings.HasPrefix(target, tagPrefix) {
tagMatchCount++
}
}
if len(firewallRule.TargetTags) != tagMatchCount {
continue nextFirewallRule
}
}
// We don't have any rules that match only on source tags, but if we did we could check them here
if len(firewallRule.TargetTags) == 0 {
continue nextFirewallRule
}
firewallRuleResource := &resources.Resource{
Name: firewallRule.Name,
ID: firewallRule.Name,
Type: typeFirewallRule,
Deleter: deleteFirewallRule,
Obj: firewallRule,
}
firewallRuleResource.Blocks = append(firewallRuleResource.Blocks, typeNetwork+":"+gce.LastComponent(firewallRule.Network))
if d.matchesClusterNameMultipart(firewallRule.Name, maxPrefixTokens) {
klog.V(4).Infof("Found resource: %s", firewallRule.SelfLink)
resourceTrackers = append(resourceTrackers, firewallRuleResource)
}
// find the objects if this is a Kubernetes LoadBalancer
if strings.HasPrefix(firewallRule.Name, "k8s-fw-") {
// We build a list of resources if this is a k8s firewall rule,
// but we only add them once all the checks are complete
var k8sResources []*resources.Resource
k8sResources = append(k8sResources, firewallRuleResource)
// We lookup the forwarding rule by name, but we then validate that it points to one of our resources
forwardingRuleName := strings.TrimPrefix(firewallRule.Name, "k8s-fw-")
forwardingRule, err := c.Compute().ForwardingRules().Get(c.Project(), c.Region(), forwardingRuleName)
if err != nil {
if gce.IsNotFound(err) {
// We looked it up by name, so an error isn't unlikely
klog.Warningf("could not find forwarding rule %q, assuming firewallRule %q is not a k8s rule", forwardingRuleName, firewallRule.Name)
continue nextFirewallRule
}
return nil, fmt.Errorf("error getting ForwardingRule %q: %w", forwardingRuleName, err)
}
forwardingRuleResource := &resources.Resource{
Name: forwardingRule.Name,
ID: forwardingRule.Name,
Type: typeForwardingRule,
Deleter: deleteForwardingRule,
Obj: forwardingRule,
}
if forwardingRule.Target != "" {
forwardingRuleResource.Blocks = append(forwardingRuleResource.Blocks, typeTargetPool+":"+gce.LastComponent(forwardingRule.Target))
}
k8sResources = append(k8sResources, forwardingRuleResource)
// TODO: Can we get k8s to set labels on the ForwardingRule?
// TODO: Check description? It looks like e.g. description: '{"kubernetes.io/service-name":"kube-system/guestbook"}'
if forwardingRule.Target == "" {
klog.Warningf("forwarding rule %q did not have target, assuming firewallRule %q is not a k8s rule", forwardingRuleName, firewallRule.Name)
continue nextFirewallRule
}
targetPoolName := gce.LastComponent(forwardingRule.Target)
targetPool, err := c.Compute().TargetPools().Get(c.Project(), c.Region(), targetPoolName)
if err != nil {
return nil, fmt.Errorf("error getting TargetPool %q: %w", targetPoolName, err)
}
targetPoolResource := &resources.Resource{
Name: targetPool.Name,
ID: targetPool.Name,
Type: typeTargetPool,
Deleter: deleteTargetPool,
Obj: targetPool,
}
k8sResources = append(k8sResources, targetPoolResource)
// TODO: Check description? (looks like description: '{"kubernetes.io/service-name":"k8s-dbb09d49d9780e7e-node"}' )
// TODO: Check instances?
for _, healthCheckLink := range targetPool.HealthChecks {
// l4 level healthchecks
healthCheckName := gce.LastComponent(healthCheckLink)
if !strings.HasPrefix(healthCheckName, "k8s-") || !strings.Contains(healthCheckLink, "/httpHealthChecks/") {
klog.Warningf("found non-k8s healthcheck %q in targetPool %q, assuming firewallRule %q is not a k8s rule", healthCheckLink, targetPoolName, firewallRule.Name)
continue nextFirewallRule
}
hc, err := c.Compute().HTTPHealthChecks().Get(c.Project(), healthCheckName)
if err != nil {
return nil, fmt.Errorf("error getting HTTPHealthCheck %q: %w", healthCheckName, err)
}
// TODO: Check description? (looks like description: '{"kubernetes.io/service-name":"k8s-dbb09d49d9780e7e-node"}' )
healthCheckResource := &resources.Resource{
Name: hc.Name,
ID: hc.Name,
Type: typeHTTPHealthcheck,
Deleter: deleteHTTPHealthCheck,
Obj: hc,
}
healthCheckResource.Blocked = append(healthCheckResource.Blocked, targetPoolResource.Type+":"+targetPoolResource.ID)
k8sResources = append(k8sResources, healthCheckResource)
}
// We now have confidence that this is a k8s LoadBalancer; add the resources
resourceTrackers = append(resourceTrackers, k8sResources...)
}
// find the objects if this is a Kubernetes node health check
if strings.HasPrefix(firewallRule.Name, "k8s-") && strings.HasSuffix(firewallRule.Name, "-node-http-hc") {
// TODO: Check port matches http health check (always 10256?)
// TODO: Check description - looks like '{"kubernetes.io/cluster-id":"cb2e931dec561053"}'
// We already know the target tags match
resourceTrackers = append(resourceTrackers, firewallRuleResource)
}
}
return resourceTrackers, nil
}
// deleteHTTPHealthCheck is the helper function to delete a Resource for a HTTP health check object
func deleteHTTPHealthCheck(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.HttpHealthCheck)
klog.V(2).Infof("Deleting GCE HTTP HealthCheck %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().HTTPHealthChecks().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("HTTP HealthCheck not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting HTTP HealthCheck %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
// deleteFirewallRule is the helper function to delete a Resource for a Firewall object
func deleteFirewallRule(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Firewall)
klog.V(2).Infof("Deleting GCE FirewallRule %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Firewalls().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("FirewallRule not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting FirewallRule %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listRoutes(ctx context.Context, resourceMap map[string]*resources.Resource) ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
instancesToDelete := make(map[string]*resources.Resource)
for _, resource := range resourceMap {
if resource.Type == typeInstance {
instancesToDelete[resource.ID] = resource
}
}
// TODO: Push-down prefix?
routes, err := c.Compute().Routes().List(ctx, c.Project())
if err != nil {
return nil, fmt.Errorf("error listing Routes: %w", err)
}
for _, r := range routes {
if !d.matchesClusterNameWithUUID(r.Name, maxGCERouteNameLength) {
continue
}
remove := false
for _, w := range r.Warnings {
switch w.Code {
case "NEXT_HOP_INSTANCE_NOT_FOUND":
remove = true
default:
klog.Infof("Unknown warning on route %q: %q", r.Name, w.Code)
}
}
if r.NextHopInstance != "" {
u, err := gce.ParseGoogleCloudURL(r.NextHopInstance)
if err != nil {
klog.Warningf("error parsing URL for NextHopInstance=%q", r.NextHopInstance)
}
if instancesToDelete[u.Zone+"/"+u.Name] != nil {
remove = true
}
}
if remove {
resourceTracker := &resources.Resource{
Name: r.Name,
ID: r.Name,
Type: typeRoute,
Deleter: deleteRoute,
Obj: r,
}
// To avoid race conditions where the control-plane re-adds the routes, we delete routes
// only after we have deleted all the instances.
for _, instance := range instancesToDelete {
resourceTracker.Blocked = append(resourceTracker.Blocked, typeInstance+":"+instance.ID)
}
klog.V(4).Infof("Found resource: %s", r.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
}
return resourceTrackers, nil
}
func deleteRoute(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Route)
klog.V(2).Infof("Deleting GCE Route %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Routes().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("Route not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting Route %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listAddresses() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
addrs, err := c.Compute().Addresses().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing Addresses: %v", err)
}
for _, a := range addrs {
if !d.matchesClusterName(a.Name) {
klog.V(8).Infof("Skipping Address with name %q", a.Name)
continue
}
resourceTracker := &resources.Resource{
Name: a.Name,
ID: a.Name,
Type: typeAddress,
Deleter: deleteAddress,
Obj: a,
}
klog.V(4).Infof("Found resource: %s", a.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteAddress(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
t := r.Obj.(*compute.Address)
klog.V(2).Infof("Deleting GCE Address %s", t.SelfLink)
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Addresses().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("Address not found, assuming deleted: %q", t.SelfLink)
return nil
}
return fmt.Errorf("error deleting Address %s: %v", t.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listSubnets() ([]*resources.Resource, error) {
// Templates are very accurate because of the metadata, so use those as the sanity check
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
subnetworkUrls := make(map[string]bool)
for _, t := range templates {
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Subnetwork != "" {
subnetworkUrls[ni.Subnetwork] = true
}
}
}
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
subnets, err := c.Compute().Subnetworks().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing subnetworks: %v", err)
}
for _, o := range subnets {
if !d.matchesClusterName(o.Name) {
klog.V(8).Infof("skipping Subnet with name %q", o.Name)
continue
}
if !subnetworkUrls[o.SelfLink] {
klog.Warningf("skipping subnetwork %q because it didn't match any instance template", o.SelfLink)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeSubnet,
Deleter: deleteSubnet,
Obj: o,
Dumper: DumpSubnetwork,
}
resourceTracker.Blocks = append(resourceTracker.Blocks, typeNetwork+":"+gce.LastComponent(o.Network))
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteSubnet(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Subnetwork)
klog.V(2).Infof("deleting GCE subnetwork %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Subnetworks().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("subnetwork not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting subnetwork %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listRouters() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
routers, err := c.Compute().Routers().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing routers: %v", err)
}
for _, o := range routers {
if !d.matchesClusterName(o.Name) {
klog.V(8).Infof("skipping Router with name %q", o.Name)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeRouter,
Deleter: deleteRouter,
Obj: o,
}
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteRouter(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Router)
klog.V(2).Infof("deleting GCE router %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Routers().Delete(u.Project, u.Region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("router not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting router %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listNetworks() ([]*resources.Resource, error) {
// Templates are very accurate because of the metadata, so use those as the sanity check
templates, err := d.findInstanceTemplates()
if err != nil {
return nil, err
}
networkUrls := make(map[string]bool)
for _, t := range templates {
for _, ni := range t.Properties.NetworkInterfaces {
if ni.Network != "" {
networkUrls[ni.Network] = true
}
}
}
c := d.gceCloud
var resourceTrackers []*resources.Resource
networks, err := c.Compute().Networks().List(c.Project())
if err != nil {
return nil, fmt.Errorf("error listing networks: %v", err)
}
for _, o := range networks.Items {
if o.Name != gce.SafeClusterName(d.clusterName) {
klog.V(8).Infof("skipping network with name %q", o.Name)
continue
}
if !networkUrls[o.SelfLink] {
klog.Warningf("skipping network %q because it didn't match any instance template", o.SelfLink)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeNetwork,
Deleter: deleteNetwork,
Obj: o,
Dumper: DumpNetwork,
}
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return resourceTrackers, nil
}
func deleteNetwork(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Network)
klog.V(2).Infof("deleting GCE network %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Networks().Delete(u.Project, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("network not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting network %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) matchesClusterName(name string) bool {
// Names could have hypens in them, so really there is no limit.
// 8 hyphens feels like enough for any "reasonable" name
maxParts := 8
return d.matchesClusterNameMultipart(name, maxParts)
}
// matchesClusterNameMultipart checks if the name could have been generated by our cluster
// considering all the prefixes separated by `-`. maxParts limits the number of parts we consider.
func (d *clusterDiscoveryGCE) matchesClusterNameMultipart(name string, maxParts int) bool {
tokens := strings.Split(name, "-")
for i := 1; i <= maxParts; i++ {
if i > len(tokens) {
break
}
id := strings.Join(tokens[:i], "-")
if id == "" {
continue
}
if name == gce.SafeObjectName(id, d.clusterName) {
return true
}
}
return false
}
// matchesClusterNameWithUUID checks if the name is the clusterName with a UUID on the end.
// This is used by GCE routes (in "classic" mode)
func (d *clusterDiscoveryGCE) matchesClusterNameWithUUID(name string, maxLength int) bool {
const uuidLength = 36 // e.g. 51a343e2-c285-4e73-b933-18a6ea44c3e4
// Format is <cluster-name>-<uuid>
// <cluster-name> is truncated to ensure it fits into the GCE max length
if len(name) < uuidLength {
return false
}
withoutUUID := name[:len(name)-uuidLength]
clusterPrefix := gce.SafeClusterName(d.clusterName) + "-"
if len(clusterPrefix) > maxLength-uuidLength {
clusterPrefix = gce.SafeClusterName(d.clusterName)[:maxLength-uuidLength-1] + "-"
}
return clusterPrefix == withoutUUID
}
func (d *clusterDiscoveryGCE) clusterDNSName() string {
return d.clusterName + "."
}
func (d *clusterDiscoveryGCE) isKopsManagedDNSName(name string) bool {
prefix := []string{`api`, `api.internal`, `bastion`}
for _, p := range prefix {
if name == p+"."+d.clusterDNSName() {
return true
}
}
return false
}
func (d *clusterDiscoveryGCE) listGCEDNSZone() ([]*resources.Resource, error) {
if dns.IsGossipHostname(d.clusterName) {
return nil, nil
}
var resourceTrackers []*resources.Resource
managedZones, err := d.gceCloud.CloudDNS().ManagedZones().List(d.gceCloud.Project())
if err != nil {
return nil, fmt.Errorf("error getting GCE DNS zones %v", err)
}
for _, zone := range managedZones {
if !strings.HasSuffix(d.clusterDNSName(), zone.DnsName) {
continue
}
rrsets, err := d.gceCloud.CloudDNS().ResourceRecordSets().List(d.gceCloud.Project(), zone.Name)
if err != nil {
return nil, fmt.Errorf("error getting GCE DNS zone data %v", err)
}
for _, record := range rrsets {
// adapted from AWS implementation
if record.Type != "A" {
continue
}
if d.isKopsManagedDNSName(record.Name) {
resource := resources.Resource{
Name: record.Name,
ID: record.Name,
Type: typeDNSRecord,
GroupDeleter: deleteDNSRecords,
GroupKey: zone.Name,
Obj: record,
}
resourceTrackers = append(resourceTrackers, &resource)
}
}
}
return resourceTrackers, nil
}
func deleteDNSRecords(cloud fi.Cloud, r []*resources.Resource) error {
c := cloud.(gce.GCECloud)
var records []*clouddns.ResourceRecordSet
var zoneName string
for _, record := range r {
r := record.Obj.(*clouddns.ResourceRecordSet)
zoneName = record.GroupKey
records = append(records, r)
}
change := clouddns.Change{Deletions: records, Kind: "dns#change", IsServing: true}
_, err := c.CloudDNS().Changes().Create(c.Project(), zoneName, &change)
if err != nil {
return fmt.Errorf("error deleting GCE DNS resource record set %v", err)
}
return nil
}
|
package main
import (
"crypto/sha256"
"fmt"
"io"
"net/http"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/gorilla/feeds"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/transform"
)
const (
MemoirsOfShibasakiSakiUrl = "http://shibasakisaki.web.fc2.com/"
)
func GetMemoirsOfShibasakiSaki() (*feeds.Feed, error) {
res, err := http.Get(MemoirsOfShibasakiSakiUrl)
if err != nil {
return nil, err
}
defer res.Body.Close()
return GetMemoirsOfShibasakiSakiFromReader(res.Body)
}
func GetMemoirsOfShibasakiSakiFromReader(reader io.Reader) (*feeds.Feed, error) {
decodedReader := transform.NewReader(reader, japanese.ShiftJIS.NewDecoder())
doc, err := goquery.NewDocumentFromReader(decodedReader)
if err != nil {
return nil, err
}
return GetMemoirsOfShibasakiSakiFromDocument(doc)
}
func GetMemoirsOfShibasakiSakiFromDocument(doc *goquery.Document) (*feeds.Feed, error) {
var items []*feeds.Item
doc.Find(`td[bgcolor="#330066"] font[size="+1"] > *`).Each(func(_ int, s *goquery.Selection) {
if s.Is("br") {
return
}
var href string
if s.Is("a") {
href = s.AttrOr("href", "")
}
text := strings.TrimSpace(s.Text())
if text == "" {
return
}
hash := fmt.Sprintf("%x", sha256.Sum256([]byte(text+href)))
items = append(items, &feeds.Item{
Title: text,
Link: &feeds.Link{Href: href},
Id: hash,
})
})
feed := &feeds.Feed{
Title: "柴崎さきの見聞録",
Link: &feeds.Link{Href: MemoirsOfShibasakiSakiUrl},
Items: items,
}
return feed, nil
}
Set default URL
package main
import (
"crypto/sha256"
"fmt"
"io"
"net/http"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/gorilla/feeds"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/transform"
)
const (
MemoirsOfShibasakiSakiUrl = "http://shibasakisaki.web.fc2.com/"
)
func GetMemoirsOfShibasakiSaki() (*feeds.Feed, error) {
res, err := http.Get(MemoirsOfShibasakiSakiUrl)
if err != nil {
return nil, err
}
defer res.Body.Close()
return GetMemoirsOfShibasakiSakiFromReader(res.Body)
}
func GetMemoirsOfShibasakiSakiFromReader(reader io.Reader) (*feeds.Feed, error) {
decodedReader := transform.NewReader(reader, japanese.ShiftJIS.NewDecoder())
doc, err := goquery.NewDocumentFromReader(decodedReader)
if err != nil {
return nil, err
}
return GetMemoirsOfShibasakiSakiFromDocument(doc)
}
func GetMemoirsOfShibasakiSakiFromDocument(doc *goquery.Document) (*feeds.Feed, error) {
var items []*feeds.Item
doc.Find(`td[bgcolor="#330066"] font[size="+1"] > *`).Each(func(_ int, s *goquery.Selection) {
if s.Is("br") {
return
}
var href string
if s.Is("a") {
href = s.AttrOr("href", "")
} else {
href = MemoirsOfShibasakiSakiUrl
}
text := strings.TrimSpace(s.Text())
if text == "" {
return
}
hash := fmt.Sprintf("%x", sha256.Sum256([]byte(text+href)))
items = append(items, &feeds.Item{
Title: text,
Link: &feeds.Link{Href: href},
Id: hash,
})
})
feed := &feeds.Feed{
Title: "柴崎さきの見聞録",
Link: &feeds.Link{Href: MemoirsOfShibasakiSakiUrl},
Items: items,
}
return feed, nil
}
|
// Copyright 2018 The Jadep Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pkgloading defines the package loading interface and implements functionality on top of it.
package pkgloading
import (
"fmt"
"os"
"path/filepath"
"sync"
"context"
"github.com/bazelbuild/tools_jvm_autodeps/bazel"
"github.com/bazelbuild/tools_jvm_autodeps/compat"
)
// Loader loads BUILD files.
type Loader interface {
// Load loads the named packages and returns a mapping from names to loaded packages, or an error if any item failed.
Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error)
}
// CachingLoader is a concurrent duplicate-supressing cache for results from a loader.
// It wraps another loader L, and guarantees each requested package is loaded exactly once.
//
// For example, Load(a, b) and then Load(b, c) will result in the following calls to the underlying loader:
// L.Load(a, b)
// L.Load(c)
// Notice that 'b' is only requested once.
// As a corollary, Load(P) will not call the underlying L.Load() at all if all of the packages in P have been previously loaded.
//
// Note that if an error occured when loading a set of packages, the failure will be cached and no loading will be re-attempted.
// In particular, it's possible to poison the cache for P by loading [P, BadPkg] first.
//
// CachingLoader is concurrency-safe as long as the underlying loader's Load function is concurrency-safe.
type CachingLoader struct {
loader Loader
mu sync.Mutex // guards cache
cache map[string]*entry
}
// NewCachingLoader returns a new CachingLoader wrapped around a loader.
func NewCachingLoader(loader Loader) *CachingLoader {
return &CachingLoader{loader: loader, cache: make(map[string]*entry)}
}
type entry struct {
pkgName string
res result
ready chan struct{} // closed when res is ready
}
type result struct {
value *bazel.Package
err error
}
// Load loads packages using an underlying loader.
// It will load each package at most once, and is safe to call concurrently.
// The returned error is a concatentation of all errors from calls to the underlying loader that occured in order to load 'packages'.
func (l *CachingLoader) Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error) {
var work, all []*entry
l.mu.Lock()
for _, p := range packages {
e, ok := l.cache[p]
if !ok {
e = &entry{pkgName: p, ready: make(chan struct{})}
l.cache[p] = e
work = append(work, e)
}
all = append(all, e)
}
l.mu.Unlock()
if len(work) > 0 {
var pkgsToLoad []string
for _, e := range work {
pkgsToLoad = append(pkgsToLoad, e.pkgName)
}
result, err := l.loader.Load(ctx, pkgsToLoad)
for _, e := range work {
e.res.value = result[e.pkgName]
e.res.err = err
close(e.ready)
}
}
result := make(map[string]*bazel.Package)
var errors []interface{}
for _, e := range all {
<-e.ready
if e.res.value != nil {
result[e.pkgName] = e.res.value
}
if e.res.err != nil {
errors = append(errors, e.res.err)
}
}
if len(errors) != 0 {
return nil, fmt.Errorf("Errors when loading packages: %v", errors)
}
return result, nil
}
// LoadRules loads the packages containing labels and returns the bazel.Rules represented by them.
func LoadRules(ctx context.Context, loader Loader, labels []bazel.Label) (map[bazel.Label]*bazel.Rule, map[string]*bazel.Package, error) {
if len(labels) == 0 {
return map[bazel.Label]*bazel.Rule{}, nil, nil
}
pkgs, err := loader.Load(ctx, distinctPkgs(labels))
if err != nil {
return nil, nil, err
}
result := make(map[bazel.Label]*bazel.Rule)
for _, label := range labels {
pkgName, ruleName := label.Split()
if pkg, ok := pkgs[pkgName]; ok {
if rule, ok := pkg.Rules[ruleName]; ok {
result[bazel.Label(label)] = rule
}
}
}
return result, pkgs, nil
}
// LoadPackageGroups loads the packages containing labels and returns the bazel.Rules represented by them.
func LoadPackageGroups(ctx context.Context, loader Loader, labels []bazel.Label) (map[bazel.Label]*bazel.PackageGroup, error) {
if len(labels) == 0 {
return map[bazel.Label]*bazel.PackageGroup{}, nil
}
pkgs, err := loader.Load(ctx, distinctPkgs(labels))
if err != nil {
return nil, err
}
result := make(map[bazel.Label]*bazel.PackageGroup)
for _, label := range labels {
pkgName, groupName := label.Split()
if pkg, ok := pkgs[pkgName]; ok {
if g, ok := pkg.PackageGroups[groupName]; ok {
result[bazel.Label(label)] = g
}
}
}
return result, nil
}
// distinctPkgs returns the set of unique packages mentioned in a set of labels.
func distinctPkgs(labels []bazel.Label) []string {
pkgsSeen := make(map[string]bool)
var result []string
for _, l := range labels {
pkgName, _ := l.Split()
if !pkgsSeen[pkgName] {
pkgsSeen[pkgName] = true
result = append(result, pkgName)
}
}
return result
}
// Siblings returns all the targets in all the packages that define the files in 'fileNames'.
// For example, if fileNames = {'foo/bar/Bar.java'}, and there's a BUILD file in foo/bar/, we return all the targets in the package defined by that BUILD file.
func Siblings(ctx context.Context, loader Loader, workspaceDir string, fileNames []string) (packages map[string]*bazel.Package, fileToPkgName map[string]string, err error) {
tctx, endSpan := compat.NewLocalSpan(ctx, "Jade: Find BUILD packages of files")
var wg sync.WaitGroup
var mu sync.Mutex
var pkgs []string
pkgsSet := make(map[string]bool)
fileToPkgName = make(map[string]string)
for _, f := range fileNames {
f := f
wg.Add(1)
go func() {
defer wg.Done()
if p := findPackageName(tctx, workspaceDir, f); p != "" {
mu.Lock()
fileToPkgName[f] = p
if !pkgsSet[p] {
pkgs = append(pkgs, p)
pkgsSet[p] = true
}
mu.Unlock()
}
}()
}
wg.Wait()
endSpan()
packages, err = loader.Load(ctx, pkgs)
return packages, fileToPkgName, err
}
// findPackageName finds the name of the package that the file is in.
func findPackageName(ctx context.Context, workspaceDir string, filename string) string {
for dir := filepath.Dir(filename); dir != "."; dir = filepath.Dir(dir) {
if _, err := compat.FileStat(ctx, filepath.Join(workspaceDir, dir, "BUILD")); !os.IsNotExist(err) {
return dir
}
}
return ""
}
// FilteringLoader is a Loader that loads using another Loader, after filtering the list of requested packages.
type FilteringLoader struct {
// Loader is the underlying Loader which we delegate Load() calls to.
Loader Loader
// blacklistedPackages is a set of packages we will not load.
// The underlying Loader will not be asked to load packages in this set.
BlacklistedPackages map[string]bool
}
// Load sends an RPC to a PkgLoader service, requesting it to interpret 'packages' (e.g., "foo/bar" to interpret <root>/foo/bar/BUILD)
func (l *FilteringLoader) Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error) {
var filtered []string
for _, p := range packages {
if !l.BlacklistedPackages[p] {
filtered = append(filtered, p)
}
}
return l.Loader.Load(ctx, filtered)
}
Fix minor typo.
PiperOrigin-RevId: 194000569
// Copyright 2018 The Jadep Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pkgloading defines the package loading interface and implements functionality on top of it.
package pkgloading
import (
"fmt"
"os"
"path/filepath"
"sync"
"context"
"github.com/bazelbuild/tools_jvm_autodeps/bazel"
"github.com/bazelbuild/tools_jvm_autodeps/compat"
)
// Loader loads BUILD files.
type Loader interface {
// Load loads the named packages and returns a mapping from names to loaded packages, or an error if any item failed.
Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error)
}
// CachingLoader is a concurrent duplicate-supressing cache for results from a loader.
// It wraps another loader L, and guarantees each requested package is loaded exactly once.
//
// For example, Load(a, b) and then Load(b, c) will result in the following calls to the underlying loader:
// L.Load(a, b)
// L.Load(c)
// Notice that 'b' is only requested once.
// As a corollary, Load(P) will not call the underlying L.Load() at all if all of the packages in P have been previously loaded.
//
// Note that if an error occurred when loading a set of packages, the failure will be cached and no loading will be re-attempted.
// In particular, it's possible to poison the cache for P by loading [P, BadPkg] first.
//
// CachingLoader is concurrency-safe as long as the underlying loader's Load function is concurrency-safe.
type CachingLoader struct {
loader Loader
mu sync.Mutex // guards cache
cache map[string]*entry
}
// NewCachingLoader returns a new CachingLoader wrapped around a loader.
func NewCachingLoader(loader Loader) *CachingLoader {
return &CachingLoader{loader: loader, cache: make(map[string]*entry)}
}
type entry struct {
pkgName string
res result
ready chan struct{} // closed when res is ready
}
type result struct {
value *bazel.Package
err error
}
// Load loads packages using an underlying loader.
// It will load each package at most once, and is safe to call concurrently.
// The returned error is a concatentation of all errors from calls to the underlying loader that occurred in order to load 'packages'.
func (l *CachingLoader) Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error) {
var work, all []*entry
l.mu.Lock()
for _, p := range packages {
e, ok := l.cache[p]
if !ok {
e = &entry{pkgName: p, ready: make(chan struct{})}
l.cache[p] = e
work = append(work, e)
}
all = append(all, e)
}
l.mu.Unlock()
if len(work) > 0 {
var pkgsToLoad []string
for _, e := range work {
pkgsToLoad = append(pkgsToLoad, e.pkgName)
}
result, err := l.loader.Load(ctx, pkgsToLoad)
for _, e := range work {
e.res.value = result[e.pkgName]
e.res.err = err
close(e.ready)
}
}
result := make(map[string]*bazel.Package)
var errors []interface{}
for _, e := range all {
<-e.ready
if e.res.value != nil {
result[e.pkgName] = e.res.value
}
if e.res.err != nil {
errors = append(errors, e.res.err)
}
}
if len(errors) != 0 {
return nil, fmt.Errorf("Errors when loading packages: %v", errors)
}
return result, nil
}
// LoadRules loads the packages containing labels and returns the bazel.Rules represented by them.
func LoadRules(ctx context.Context, loader Loader, labels []bazel.Label) (map[bazel.Label]*bazel.Rule, map[string]*bazel.Package, error) {
if len(labels) == 0 {
return map[bazel.Label]*bazel.Rule{}, nil, nil
}
pkgs, err := loader.Load(ctx, distinctPkgs(labels))
if err != nil {
return nil, nil, err
}
result := make(map[bazel.Label]*bazel.Rule)
for _, label := range labels {
pkgName, ruleName := label.Split()
if pkg, ok := pkgs[pkgName]; ok {
if rule, ok := pkg.Rules[ruleName]; ok {
result[bazel.Label(label)] = rule
}
}
}
return result, pkgs, nil
}
// LoadPackageGroups loads the packages containing labels and returns the bazel.Rules represented by them.
func LoadPackageGroups(ctx context.Context, loader Loader, labels []bazel.Label) (map[bazel.Label]*bazel.PackageGroup, error) {
if len(labels) == 0 {
return map[bazel.Label]*bazel.PackageGroup{}, nil
}
pkgs, err := loader.Load(ctx, distinctPkgs(labels))
if err != nil {
return nil, err
}
result := make(map[bazel.Label]*bazel.PackageGroup)
for _, label := range labels {
pkgName, groupName := label.Split()
if pkg, ok := pkgs[pkgName]; ok {
if g, ok := pkg.PackageGroups[groupName]; ok {
result[bazel.Label(label)] = g
}
}
}
return result, nil
}
// distinctPkgs returns the set of unique packages mentioned in a set of labels.
func distinctPkgs(labels []bazel.Label) []string {
pkgsSeen := make(map[string]bool)
var result []string
for _, l := range labels {
pkgName, _ := l.Split()
if !pkgsSeen[pkgName] {
pkgsSeen[pkgName] = true
result = append(result, pkgName)
}
}
return result
}
// Siblings returns all the targets in all the packages that define the files in 'fileNames'.
// For example, if fileNames = {'foo/bar/Bar.java'}, and there's a BUILD file in foo/bar/, we return all the targets in the package defined by that BUILD file.
func Siblings(ctx context.Context, loader Loader, workspaceDir string, fileNames []string) (packages map[string]*bazel.Package, fileToPkgName map[string]string, err error) {
tctx, endSpan := compat.NewLocalSpan(ctx, "Jade: Find BUILD packages of files")
var wg sync.WaitGroup
var mu sync.Mutex
var pkgs []string
pkgsSet := make(map[string]bool)
fileToPkgName = make(map[string]string)
for _, f := range fileNames {
f := f
wg.Add(1)
go func() {
defer wg.Done()
if p := findPackageName(tctx, workspaceDir, f); p != "" {
mu.Lock()
fileToPkgName[f] = p
if !pkgsSet[p] {
pkgs = append(pkgs, p)
pkgsSet[p] = true
}
mu.Unlock()
}
}()
}
wg.Wait()
endSpan()
packages, err = loader.Load(ctx, pkgs)
return packages, fileToPkgName, err
}
// findPackageName finds the name of the package that the file is in.
func findPackageName(ctx context.Context, workspaceDir string, filename string) string {
for dir := filepath.Dir(filename); dir != "."; dir = filepath.Dir(dir) {
if _, err := compat.FileStat(ctx, filepath.Join(workspaceDir, dir, "BUILD")); !os.IsNotExist(err) {
return dir
}
}
return ""
}
// FilteringLoader is a Loader that loads using another Loader, after filtering the list of requested packages.
type FilteringLoader struct {
// Loader is the underlying Loader which we delegate Load() calls to.
Loader Loader
// blacklistedPackages is a set of packages we will not load.
// The underlying Loader will not be asked to load packages in this set.
BlacklistedPackages map[string]bool
}
// Load sends an RPC to a PkgLoader service, requesting it to interpret 'packages' (e.g., "foo/bar" to interpret <root>/foo/bar/BUILD)
func (l *FilteringLoader) Load(ctx context.Context, packages []string) (map[string]*bazel.Package, error) {
var filtered []string
for _, p := range packages {
if !l.BlacklistedPackages[p] {
filtered = append(filtered, p)
}
}
return l.Loader.Load(ctx, filtered)
}
|
package jirardeau
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
)
// Jira holds Url like https://jira.tld
type Jira struct {
Log *log.Logger
Login string
Password string
Project string
ProjectID string
Url string
}
// FixVersion holds JIRA Version
type FixVersion struct {
Archived bool `json:"archived"`
Id string `json:"id"`
Name string `json:"name"`
Overdue bool `json:"overdue"`
ProjectID int `json:"projectId"`
ReleaseDate string `json:"releaseDate"`
Released bool `json:"released"`
Self string `json:"self"`
StartDate string `json:"startDate"`
UserReleaseDate string `json:"userReleaseDate"`
UserStartDate string `json:"userStartDate"`
}
// Issue holds issue data
type Issue struct {
Id string `json:"id"`
Self string `json:"self"`
Key string `json:"key"`
Fields IssueFields `json:"fields"`
Expand string `json:"expand"`
Names map[string]string `json:"names"`
}
// IssueFields
type IssueFields struct {
Summary string `json:"summary"`
IssueType IssueType `json:"issuetype"`
FixVersions []FixVersion `json:"fixVersions"`
Status Status `json:"status"`
Created string `json:"created"`
Description string `json:"description"`
Comment CommentField `json:"comment"`
}
// IssueType
type IssueType struct {
Id string `json:"id"`
Self string `json:"self"`
Name string `json:"name"`
SubTask bool `json:"subtask"`
Description string `json:"description"`
}
// CommentField holds Issue Comments
type CommentField struct {
StartAt int `json:"startAt"`
MaxResults int `json:"maxResults"`
Total int `json:"total"`
Comments []Comment `json:"comments"`
}
// Comment of Issue
type Comment struct {
Id string `json:"id"`
Self string `json:"self"`
Author Author `json:"author"`
UpdateAuthor Author `json:"updateAuthor"`
Body string `json:"body"`
Created string `json:"created"`
Updated string `json:"updated"`
}
// Author of Issue or Comment
type Author struct {
Self string `json:"self"`
Active bool `json:"active"`
Name string `json:"name"`
DisplayName string `json:"displayName"`
EmailAddress string `json:"emailAddress"`
}
// Status of Issue
type Status struct {
Id string `json:"id"`
Self string `json:"self"`
Name string `json:"name"`
Description string `json:"description"`
}
func (jira *Jira) request(method, relUrl string, reqBody io.Reader) (respBody io.Reader, err error) {
absUrl, err := url.Parse(jira.Url + relUrl)
if err != nil {
err = fmt.Errorf("Failed to parse %s and %s to URL: %s", jira.Url, relUrl, err)
jira.Log.Println(err)
return
}
jira.Log.Println("STRT", method, absUrl.String())
req, err := http.NewRequest(method, absUrl.String(), reqBody)
if err != nil {
err = fmt.Errorf("Failed to build HTTP request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
req.Header.Set("content-type", "application/json")
req.SetBasicAuth(jira.Login, jira.Password)
var buf bytes.Buffer
resp, err := http.DefaultClient.Do(req)
if resp != nil {
defer resp.Body.Close()
_, err = buf.ReadFrom(resp.Body)
if err != nil {
err = fmt.Errorf("Failed to read response from JIRA request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
respBody = &buf
if resp.StatusCode >= 400 {
err = fmt.Errorf("Failed to JIRA request %s %s with HTTP code %d: %s", method, absUrl.String(), resp.StatusCode, buf.String())
jira.Log.Println(err)
return
}
}
if err != nil {
err = fmt.Errorf("Failed to JIRA request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
jira.Log.Println("StatusCode:", resp.StatusCode)
jira.Log.Println("Headers:", resp.Header)
jira.Log.Println("DONE", method, absUrl.String())
return
}
// GetFixVersions returns versions of Jira.Project
func (jira *Jira) GetFixVersions() (releases []FixVersion, err error) {
relUrl := fmt.Sprintf("/project/%s/versions", jira.Project)
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&releases)
if err != nil {
return
}
return
}
// GetIssues returns issues of fixVersion specified by FixVersion
func (jira *Jira) GetIssues(fixVersion FixVersion) (issues map[string]Issue, err error) {
var result struct {
Issues []Issue `json:"issues"`
}
parameters := url.Values{}
parameters.Add("jql", fmt.Sprintf(`project = %s AND fixVersion = "%s"`, jira.Project, fixVersion.Name))
parameters.Add("fields", "id,key,self,summary,issuetype,status,description,created")
relUrl := fmt.Sprintf("/search?%s", parameters.Encode())
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&result)
if err != nil {
err = errors.Wrap(err, "decode failed")
return
}
issues = make(map[string]Issue)
for _, issue := range result.Issues {
issues[issue.Key] = issue
}
return
}
// GetIssue by id/key
func (jira *Jira) GetIssue(id string, expand []string) (issue Issue, err error) {
parameters := url.Values{}
if expand != nil {
parameters.Add("expand", strings.Join(expand, ","))
}
relUrl := fmt.Sprintf("/issue/%s?%s", id, parameters.Encode())
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&issue)
if err != nil {
err = errors.Wrap(err, "decode failed")
return
}
return
}
Draft create issue
package jirardeau
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
)
// Jira holds Url like https://jira.tld
type Jira struct {
Log *log.Logger
Login string
Password string
Project string
ProjectID string
Url string
}
// Project holds JIRA Project
type Project struct {
Id string `json:"id"`
Self string `json:"self"`
Key string `json:"key"`
Name string `json:"name"`
}
// FixVersion holds JIRA Version
type FixVersion struct {
Archived bool `json:"archived"`
Id string `json:"id"`
Name string `json:"name"`
Overdue bool `json:"overdue"`
ProjectID int `json:"projectId"`
ReleaseDate string `json:"releaseDate"`
Released bool `json:"released"`
Self string `json:"self"`
StartDate string `json:"startDate"`
UserReleaseDate string `json:"userReleaseDate"`
UserStartDate string `json:"userStartDate"`
}
// Issue holds issue data
type Issue struct {
Id string `json:"id"`
Self string `json:"self"`
Key string `json:"key"`
Fields IssueFields `json:"fields"`
Expand string `json:"expand"`
Names map[string]string `json:"names"`
}
// IssueFields
type IssueFields struct {
Project Project `json:"project"`
Summary string `json:"summary"`
IssueType IssueType `json:"issuetype"`
FixVersions []FixVersion `json:"fixVersions"`
Status Status `json:"status"`
Created string `json:"created"`
Description string `json:"description"`
Comment CommentField `json:"comment"`
}
// CustomField
type CustomField map[string]string
// IssueType
type IssueType struct {
Id string `json:"id"`
Self string `json:"self"`
Name string `json:"name"`
SubTask bool `json:"subtask"`
Description string `json:"description"`
}
// CommentField holds Issue Comments
type CommentField struct {
StartAt int `json:"startAt"`
MaxResults int `json:"maxResults"`
Total int `json:"total"`
Comments []Comment `json:"comments"`
}
// Comment of Issue
type Comment struct {
Id string `json:"id"`
Self string `json:"self"`
Author Author `json:"author"`
UpdateAuthor Author `json:"updateAuthor"`
Body string `json:"body"`
Created string `json:"created"`
Updated string `json:"updated"`
}
// Author of Issue or Comment
type Author struct {
Self string `json:"self"`
Active bool `json:"active"`
Name string `json:"name"`
DisplayName string `json:"displayName"`
EmailAddress string `json:"emailAddress"`
}
// Status of Issue
type Status struct {
Id string `json:"id"`
Self string `json:"self"`
Name string `json:"name"`
Description string `json:"description"`
}
func (jira *Jira) request(method, relUrl string, reqBody io.Reader) (respBody io.Reader, err error) {
absUrl, err := url.Parse(jira.Url + relUrl)
if err != nil {
err = fmt.Errorf("Failed to parse %s and %s to URL: %s", jira.Url, relUrl, err)
jira.Log.Println(err)
return
}
jira.Log.Println("STRT", method, absUrl.String())
req, err := http.NewRequest(method, absUrl.String(), reqBody)
if err != nil {
err = fmt.Errorf("Failed to build HTTP request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
req.Header.Set("content-type", "application/json")
req.SetBasicAuth(jira.Login, jira.Password)
var buf bytes.Buffer
resp, err := http.DefaultClient.Do(req)
if resp != nil {
defer resp.Body.Close()
_, err = buf.ReadFrom(resp.Body)
if err != nil {
err = fmt.Errorf("Failed to read response from JIRA request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
respBody = &buf
if resp.StatusCode >= 400 {
err = fmt.Errorf("Failed to JIRA request %s %s with HTTP code %d: %s", method, absUrl.String(), resp.StatusCode, buf.String())
jira.Log.Println(err)
return
}
}
if err != nil {
err = fmt.Errorf("Failed to JIRA request %s %s: %s", method, absUrl.String(), err)
jira.Log.Println(err)
return
}
jira.Log.Println("StatusCode:", resp.StatusCode)
jira.Log.Println("Headers:", resp.Header)
jira.Log.Println("DONE", method, absUrl.String())
return
}
// GetFixVersions returns versions of Jira.Project
func (jira *Jira) GetFixVersions() (releases []FixVersion, err error) {
relUrl := fmt.Sprintf("/project/%s/versions", jira.Project)
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&releases)
if err != nil {
return
}
return
}
// GetIssues returns issues of fixVersion specified by FixVersion
func (jira *Jira) GetIssues(fixVersion FixVersion) (issues map[string]Issue, err error) {
var result struct {
Issues []Issue `json:"issues"`
}
parameters := url.Values{}
parameters.Add("jql", fmt.Sprintf(`project = %s AND fixVersion = "%s"`, jira.Project, fixVersion.Name))
parameters.Add("fields", "id,key,self,summary,issuetype,status,description,created")
relUrl := fmt.Sprintf("/search?%s", parameters.Encode())
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&result)
if err != nil {
err = errors.Wrap(err, "decode failed")
return
}
issues = make(map[string]Issue)
for _, issue := range result.Issues {
issues[issue.Key] = issue
}
return
}
// GetIssue by id/key
func (jira *Jira) GetIssue(id string, expand []string) (issue Issue, err error) {
parameters := url.Values{}
if expand != nil {
parameters.Add("expand", strings.Join(expand, ","))
}
relUrl := fmt.Sprintf("/issue/%s?%s", id, parameters.Encode())
resp, err := jira.request("GET", relUrl, nil)
if err != nil {
return
}
err = json.NewDecoder(resp).Decode(&issue)
if err != nil {
err = errors.Wrap(err, "decode failed")
return
}
return
}
// CreateIssue
func (jira *Jira) createIssue(issue Issue) (err error) {
_, err = jira.request("POST", "/issue", nil)
if err != nil {
return
}
return
} |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
goRuntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/types"
uexec "k8s.io/kubernetes/pkg/util/exec"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch"
"github.com/blang/semver"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// How long to wait for the pod to no longer be running
podNoLongerRunningTimeout = 30 * time.Second
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// String used to mark pod deletion
nonExist = "NonExist"
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 2 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long each node is given during a process that restarts all nodes
// before the test is considered failed. (Note that the total time to
// restart all nodes will be this number times the number of nodes.)
RestartPerNodeTimeout = 5 * time.Minute
// How often to Poll the statues of a restart.
RestartPoll = 20 * time.Second
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of times we want to retry Updates in case of conflict
UpdateRetries = 5
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
)
var (
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
)
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c *client.Client) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c *client.Client) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goRuntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = version.MustParse("v1.1.0")
var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0")
func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error)
type ContainerFailures struct {
status *api.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
// Convenient wrapper around cache.Store that returns list of api.Pod instead of interface{}.
type PodStore struct {
cache.Store
stopCh chan struct{}
reflector *cache.Reflector
}
func NewPodStore(c *client.Client, namespace string, label labels.Selector, field fields.Selector) *PodStore {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).Watch(options)
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
stopCh := make(chan struct{})
reflector := cache.NewReflector(lw, &api.Pod{}, store, 0)
reflector.RunUntil(stopCh)
return &PodStore{store, stopCh, reflector}
}
func (s *PodStore) List() []*api.Pod {
objects := s.Store.List()
pods := make([]*api.Pod, 0)
for _, o := range objects {
pods = append(pods, o.(*api.Pod))
}
return pods
}
func (s *PodStore) Stop() {
close(s.stopCh)
}
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
DNSPolicy *api.DNSPolicy
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Node selector for pods in the RC.
NodeSelector map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
// Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c *client.Client) {
federationNS := os.Getenv("FEDERATION_NAMESPACE")
if federationNS == "" {
federationNS = "federation"
}
_, err := c.Namespaces().Get(federationNS)
if err != nil {
if apierrs.IsNotFound(err) {
Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
} else {
Failf("Unexpected error getting namespace: %v", err)
}
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr unversioned.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersion(gvr.GroupVersion())
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
type podCondition func(pod *api.Pod) (bool, error)
// podReady returns whether pod has a condition of Ready with a status of true.
// TODO: should be replaced with api.IsPodReady
func podReady(pod *api.Pod) bool {
for _, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady && cond.Status == api.ConditionTrue {
return true
}
}
return false
}
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []api.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in the desired state in %v\n", len(badPods), desiredPods, ns, timeout)
// Pirnt bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%s",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// PodRunningReady checks whether pod p's phase is running and it has a ready
// condition of status true.
func PodRunningReady(p *api.Pod) (bool, error) {
// Check the phase is running.
if p.Status.Phase != api.PodRunning {
return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodRunning, p.Status.Phase)
}
// Check the ready condition is true.
if !podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionTrue, p.Status.Conditions)
}
return true, nil
}
func PodRunningReadyOrSucceeded(p *api.Pod) (bool, error) {
// Check if the phase is succeeded.
if p.Status.Phase == api.PodSucceeded {
return true, nil
}
return PodRunningReady(p)
}
// PodNotReady checks whether pod p's has a ready condition of status false.
func PodNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
if podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionFalse, p.Status.Conditions)
}
return true, nil
}
// check if a Pod is controlled by a Replication Controller in the List
func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api.Pod) bool {
for _, rc := range rcs.Items {
selector := labels.SelectorFromSet(rc.Spec.Selector)
if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) {
return true
}
}
return false
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods := time.Now(), []api.Pod{}
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return false, nil
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []api.Pod{}
for _, pod := range podList.Items {
if pod.Status.Phase != api.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels)
return fmt.Errorf("Not all pods in namespace %q are successful within %v", ns, timeout)
}
return nil
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting.
// If ignoreLabels is not empty, pods matching this selector are ignored and
// this function waits for minPods to enter Running/Ready and for all pods
// matching ignoreLabels to enter Success phase. Otherwise an error is returned
// even if there are minPods pods, some of which are in Running/Ready
// and some in Success. This is to allow the client to decide if "Success"
// means "Ready" or not.
func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var waitForSuccessError error
badPods := []api.Pod{}
desiredPods := 0
go func() {
waitForSuccessError = WaitForPodsSuccess(c, ns, ignoreLabels, timeout)
wg.Done()
}()
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.ReplicationControllers(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
for _, rc := range rcList.Items {
replicas += rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
return false, nil
}
for _, rs := range rsList.Items {
replicas += rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk := int32(0)
badPods = []api.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
Logf("%v in state %v, ignoring", pod.Name, pod.Status.Phase)
continue
}
if res, err := PodRunningReady(&pod); res && err == nil {
nOk++
} else {
if pod.Status.Phase != api.PodFailed {
Logf("The status of Pod %s is %s, waiting for it to be either Running or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
} else if _, ok := pod.Annotations[api.CreatedByAnnotation]; !ok {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
logPodStates(badPods)
return false, nil
}) != nil {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, timeout))
}
wg.Wait()
if waitForSuccessError != nil {
return waitForSuccessError
}
return nil
}
func podFromManifest(filename string) (*api.Pod, error) {
var pod api.Pod
Logf("Parsing pod from %v", filename)
data := ReadOrDie(filename)
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, err
}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &pod); err != nil {
return nil, err
}
return &pod, nil
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func RunKubernetesServiceTestContainer(c *client.Client, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
p, err := podFromManifest(path)
if err != nil {
Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Pods(ns).Create(p); err != nil {
Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Pods(ns).Delete(p.Name, nil); err != nil {
Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, PodRunningReady); err != nil {
Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
Logf("Output of clusterapi-tester:\n%v", logs)
}
}
func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
Logf("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c *client.Client, ns string) {
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return
}
Logf("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "")
}
}
}
func LogPodsWithLabels(c *client.Client, ns string, match map[string]string) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
Logf("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "")
}
}
func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Namespaces().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Namespaces().Delete(nsName)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.ServiceAccountHasSecrets)
return err
}
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, Poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.Pods(api.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s", len(conditionNotMatch), desc)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForFederationApiserverReady waits for the federation apiserver to be ready.
// It tests the readiness by sending a GET request and expecting a non error response.
func WaitForFederationApiserverReady(c *federation_release_1_4.Clientset) error {
return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
_, err := c.Federation().Clusters().List(api.ListOptions{})
if err != nil {
return false, nil
}
return true, nil
})
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName)
if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: api.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *api.Namespace
if err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
var err error
got, err = c.Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
return nil, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c *client.Client, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == api.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c *client.Client, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
if err := c.Namespaces().Delete(namespace); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
// but they were all undergoing deletion (kubelet is probably culprit)
if missingTimestamp == 0 {
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c *client.Client, namespace string) {
namespaceList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == api.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c *client.Client, namespace string) {
ns, err := c.Namespaces().Get(namespace)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c *client.Client, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.Pods(namespace).List(api.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c *client.Client, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
groupVersionResources, err := c.Discovery().ServerPreferredNamespacedResources()
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for _, gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersion(gvr.GroupVersion())
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*runtime.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *runtime.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*api.Pod)
newPod := newer.(*api.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *api.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := api.GetPodCondition(&pod.Status, api.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == api.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == api.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == api.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c *client.Client, pod *api.Pod) error {
// this short-cicuit is needed for cases when we pass a list of pods instead
// of newly created pod (eg. VerifyPods) which means we are getting already
// running pod for which waiting does not make sense and will always fail
if pod.Status.Phase == api.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c *client.Client, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c *client.Client, podName, namespace, resourceVersion string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunning)
return err
}
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodCompleted)
return err
}
func waitTimeoutForPodReadyInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunningAndReady)
return err
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c *client.Client, ns, podName, resourceVersion string) error {
w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(PodStartTimeout, w, client.PodNotPending)
return err
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error {
return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, contName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) {
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
if !ok {
Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName)
} else {
if ci.State.Terminated != nil {
if ci.State.Terminated.ExitCode == 0 {
By("Saw pod success")
return true, nil
}
return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated)
}
Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace)
}
return false, nil
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, slowPodStartTimeout)
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *api.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c *client.Client, ns, name string, timeout time.Duration) error {
options := api.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector()}
w, err := c.ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(unversioned.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *api.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
rc.Spec.Replicas == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, rc.Spec.Replicas, rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that.
// Additionally, there can be non-zero grace period, so we are setting 10 minutes
// to be on the safe size.
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
switch {
case err == nil:
if !exist {
return false, nil
}
Logf("Service %s in namespace %s found.", name, namespace)
return true, nil
case apierrs.IsNotFound(err):
if exist {
return false, nil
}
Logf("Service %s in namespace %s disappeared.", name, namespace)
return true, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.Endpoints(namespace).List(api.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *api.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
func WaitForEndpoint(c *client.Client, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Endpoints(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c *client.Client
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *api.PodList
}
func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := api.ListOptions{LabelSelector: r.label}
currentPods, err := r.c.Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c)
if err != nil {
return false, err
}
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Get().
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := version.Parse(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.GTE(v), nil
}
func SkipUnlessKubectlVersionGTE(v semver.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v semver.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.GTE(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (semver.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return semver.Version{}, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return version.Parse(matches[1])
}
func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodList, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []api.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c *client.Client, pods *api.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p api.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return nil
}
func ServiceResponding(c *client.Client, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
body, err := proxyRequest.Namespace(ns).
Name(name).
Do().
Raw()
if err != nil {
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s\n", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s\n", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeName != "" {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := restclientConfig(TestContext.KubeContext)
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) {
c, err := restclientConfig(federatedKubeContext)
if err != nil {
return nil, fmt.Errorf("error creating federation client config: %v", err.Error())
}
cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig()
if cfg != nil {
//TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422
cfg.NegotiatedSerializer = api.Codecs
}
if err != nil {
return cfg, fmt.Errorf("error creating federation client config: %v", err.Error())
}
return cfg, nil
}
func loadClientFromConfig(config *restclient.Config) (*client.Client, error) {
c, err := client.New(config)
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
if c.Client.Timeout == 0 {
c.Client.Timeout = SingleCallTimeout
}
return c, nil
}
func setTimeouts(cs ...*http.Client) {
for _, client := range cs {
if client.Timeout == 0 {
client.Timeout = SingleCallTimeout
}
}
}
func LoadFederationClientset_1_4() (*federation_release_1_4.Clientset, error) {
config, err := LoadFederatedConfig(&clientcmd.ConfigOverrides{})
if err != nil {
return nil, err
}
c, err := federation_release_1_4.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating federation clientset: %v", err.Error())
}
// Set timeout for each client in the set.
setTimeouts(c.DiscoveryClient.Client, c.FederationClient.Client, c.CoreClient.Client, c.ExtensionsClient.Client)
return c, nil
}
func LoadClient() (*client.Client, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return loadClientFromConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c *client.Client, podID string) error
// ValidateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *api.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *api.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
podClient := f.PodClient()
ns := f.Namespace.Name
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
podClient.Create(pod)
// Wait for client pod to complete. All containers should succeed.
for _, container := range pod.Spec.Containers {
if err := WaitForPodSuccessInNamespace(f.Client, pod.Name, container.Name, ns); err != nil {
return fmt.Errorf("expected container %s success: %v", container.Name, err)
}
}
// Grab its logs. Get host first.
podStatus, err := podClient.Get(pod.Name)
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.Client, ns, pod.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) Print(ignorePhases sets.String) {
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
Logf("Pod %v was deleted, had phase %v and host %v", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
Logf(msg)
}
}
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
Logf("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
Logf("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
dnsDefault := api.DNSDefault
if config.DNSPolicy == nil {
config.DNSPolicy = &dnsDefault
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) Print(name string) {
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
startupStatus.Print(config.Name)
}
promPushRunningPending(startupStatus.Running, startupStatus.Pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
DumpNodeDebugInfo(config.Client, startupStatus.ContainerRestartNodes.List())
// Get the logs from the failed containers to help diagnose what caused them to fail
LogFailedContainers(config.Client, config.Namespace)
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
Logf("%v, pods that changed since the last iteration:", errorStr)
Diff(oldPods, pods).Print(sets.NewString())
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
dumpPodDebugInfo(config.Client, pods)
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
ExpectNoError(err)
}
Logf("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
}
}
func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
badNodes := sets.NewString()
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
if p.Spec.NodeName != "" {
Logf("Pod %v assigned to host %v (IP: %v) in %v", p.Name, p.Spec.NodeName, p.Status.HostIP, p.Status.Phase)
badNodes.Insert(p.Spec.NodeName)
} else {
Logf("Pod %v still unassigned", p.Name)
}
}
}
DumpNodeDebugInfo(c, badNodes.List())
}
func DumpAllNamespaceInfo(c *client.Client, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := c.Events(namespace).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.Nodes().List(api.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []api.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c *client.Client) {
pods, err := c.Pods("").List(api.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names)
}
func DumpNodeDebugInfo(c *client.Client, nodeNames []string) {
for _, n := range nodeNames {
Logf("\nLogging node info for node %v", n)
node, err := c.Nodes().Get(n)
if err != nil {
Logf("Error getting node info %v", err)
}
Logf("Node Info: %v", node)
Logf("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
Logf("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
Logf("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
Logf("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c *client.Client, nodeName string) []api.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": api.NamespaceAll,
"source": "kubelet",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
events, err := c.Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []api.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *api.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, api.NodeReady, true)
networkReady := IsNodeConditionUnset(node, api.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, api.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node api.Node) bool {
return isNodeSchedulable(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c *client.Client) error {
return wait.PollImmediate(30*time.Second, 4*time.Hour, func() (bool, error) {
opts := api.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(),
}
nodes, err := c.Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
// Ignore the error here - it will be retried.
return false, nil
}
schedulable := 0
for _, node := range nodes.Items {
if isNodeSchedulable(&node) {
schedulable++
}
}
if schedulable != len(nodes.Items) {
Logf("%d/%d nodes schedulable (polling after 30s)", schedulable, len(nodes.Items))
return false, nil
}
return true, nil
})
}
func AddOrUpdateLabelOnNode(c *client.Client, nodeName string, labelKey string, labelValue string) {
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, labelKey, labelValue)
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add a label %v:%v to %v", labelKey, labelValue, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
}
func ExpectNodeHasLabel(c *client.Client, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c *client.Client, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
var nodeUpdated *api.Node
var node *api.Node
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err = c.Nodes().Get(nodeName)
ExpectNoError(err)
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
return
}
delete(node.Labels, labelKey)
nodeUpdated, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove a label %v from %v", labelKey, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
By("verifying the node doesn't have the label " + labelKey)
if nodeUpdated.Labels != nil && len(nodeUpdated.Labels[labelKey]) != 0 {
Failf("Failed removing label " + labelKey + " of the node " + nodeName)
}
}
func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) {
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
var newTaints []api.Taint
updated := false
for _, existingTaint := range nodeTaints {
if taint.MatchTaint(existingTaint) {
newTaints = append(newTaints, taint)
updated = true
continue
}
newTaints = append(newTaints, existingTaint)
}
if !updated {
newTaints = append(newTaints, taint)
}
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
_, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %v to %v", taint, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func taintExists(taints []api.Taint, taintToFind api.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
func ExpectNodeHasTaint(c *client.Client, nodeName string, taint api.Taint) {
By("verifying the node has the taint " + taint.ToString())
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, error) {
newTaints := []api.Taint{}
found := false
for _, oldTaint := range oldTaints {
if oldTaint.MatchTaint(taintToDelete) {
found = true
continue
}
newTaints = append(newTaints, taintToDelete)
}
if !found {
return nil, fmt.Errorf("taint %s not found.", taintToDelete.ToString())
}
return newTaints, nil
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) {
By("removing the taint " + taint.ToString() + " off the node " + nodeName)
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 {
return
}
if !taintExists(nodeTaints, taint) {
return
}
newTaints, err := deleteTaint(nodeTaints, taint)
ExpectNoError(err)
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
_, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %s to node %v", taint.ToString(), nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
nodeUpdated, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
By("verifying the node doesn't have the taint " + taint.ToString())
taintsGot, err := api.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
ExpectNoError(err)
if taintExists(taintsGot, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForRCPodsRunning(c, ns, name)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error {
rc, err := c.ReplicationControllers(ns).Get(rcName)
if err != nil {
return err
}
selector := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
err = WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err)
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) {
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := api.ListOptions{LabelSelector: label}
pods, err = c.Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// DeleteRCAndPods a Replication Controller and all pods it spawned
func DeleteRCAndPods(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperForReplicationController(c, 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
ps, err := podStoreForRC(c, rc)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping RC: %s: %v", name, err)
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
func DeleteRCAndWaitForGC(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s, will wait for the garbage collector to delete the pods", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
ps, err := podStoreForRC(c, rc)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &api.DeleteOptions{OrphanDependents: &falseVar}
err = c.ReplicationControllers(ns).Delete(name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
if err != nil {
return err
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
var interval, timeout time.Duration
switch {
case rc.Spec.Replicas < 100:
interval = 100 * time.Millisecond
case rc.Spec.Replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if rc.Spec.Replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(rc.Spec.Replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// podStoreForRC creates a PodStore that monitors pods belong to the rc. It
// waits until the reflector does a List() before returning.
func podStoreForRC(c *client.Client, rc *api.ReplicationController) (*PodStore, error) {
labels := labels.SelectorFromSet(rc.Spec.Selector)
ps := NewPodStore(c, rc.Namespace, labels, fields.Everything())
err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
if len(ps.reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(c, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(Poll, 2*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
Logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
Logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
Logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
Logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == deployment.Spec.Replicas &&
deployment.Status.UpdatedReplicas == deployment.Spec.Replicas &&
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil || newRS == nil || !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, err
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision ||
newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision ||
deployment.Spec.Template.Spec.Containers[0].Image != image || newRS.Spec.Template.Spec.Containers[0].Image != image {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, nil, newRS)
}
if newRS == nil {
return fmt.Errorf("deployment %s failed to create new RS: %v", deploymentName, err)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s (got %s / %s) and new RS %s (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error {
return wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Annotations[deploymentutil.OverlapAnnotation] == expected {
return true, nil
}
return false, nil
})
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds), time.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
Logf("Deployment: %+v. Selector = %+v", *deployment, deployment.Spec.Selector)
for i := range allOldRSs {
Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, *allOldRSs[i], allOldRSs[i].Spec.Selector)
}
if newRS != nil {
Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, *newRS, newRS.Spec.Selector)
} else {
Logf("New ReplicaSet of deployment %s is nil.", deployment.Name)
}
}
func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
minReadySeconds := deployment.Spec.MinReadySeconds
podList, err := deploymentutil.ListPods(deployment,
func(namespace string, options api.ListOptions) (*api.PodList, error) {
return c.Core().Pods(namespace).List(options)
})
if err != nil {
Logf("Failed to list pods of deployment %s: %v", deployment.Name, err)
return
}
if err == nil {
for _, pod := range podList.Items {
availability := "not available"
if deploymentutil.IsPodAvailable(&pod, minReadySeconds, time.Now()) {
availability = "available"
}
Logf("Pod %s is %s: %+v", pod.Name, availability, pod)
}
}
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("Updating deployment %s", name)
return true, nil
}
return false, nil
})
return deployment, err
}
// FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a
// struct containing the restart and failure information
func FailedContainers(pod *api.Pod) map[string]ContainerFailures {
var state ContainerFailures
states := make(map[string]ContainerFailures)
statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return nil
} else {
for _, status := range statuses {
if status.State.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.State.Terminated}
} else if status.LastTerminationState.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Terminated}
}
if status.RestartCount > 0 {
var ok bool
if state, ok = states[status.ContainerID]; !ok {
state = ContainerFailures{}
}
state.Restarts = int(status.RestartCount)
states[status.ContainerID] = state
}
}
}
return states
}
// Prints the histogram of the events and returns the number of bad events.
func BadEvents(events []*api.Event) int {
type histogramKey struct {
reason string
source string
}
histogram := make(map[histogramKey]int)
for _, e := range events {
histogram[histogramKey{reason: e.Reason, source: e.Source.Component}]++
}
for key, number := range histogram {
Logf("- reason: %s, source: %s -> %d", key.reason, key.source, number)
}
badPatterns := []string{"kill", "fail"}
badEvents := 0
for key, number := range histogram {
for _, s := range badPatterns {
if strings.Contains(key.reason, s) {
Logf("WARNING %d events from %s with reason: %s", number, key.source, key.reason)
badEvents += number
break
}
}
}
return badEvents
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == api.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *api.Node) error {
result, err := IssueSSHCommandWithResult(cmd, provider, node)
if result != nil {
LogSSHResult(*result)
}
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: api.PullIfNotPresent,
},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
key := filepath.Join(keydir, keyfile)
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podNames[ix])
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == api.ConditionTrue) == wantTrue {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether not-ready nodes can be ignored while checking if all nodes are
// ready (we allow e.g. for incorrect provisioning of some small percentage of nodes
// while validating cluster, and those nodes may never become healthy).
// Currently we allow only for:
// - not present CNI plugins on node
// TODO: we should extend it for other reasons.
func allowedNotReadyReasons(nodes []*api.Node) bool {
for _, node := range nodes {
index, condition := api.GetNodeCondition(&node.Status, api.NodeReady)
if index == -1 ||
!strings.Contains(condition.Reason, "could not locate kubenet required CNI plugins") {
return false
}
}
return true
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []*api.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notReady) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notReady), nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []api.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == api.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(&node) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) {
var l []api.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartApiserver(c *client.Client) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c *client.Client) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node api.Node) bool {
return IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) {
node, err := client.Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client *client.Client, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingress(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, error) {
svc, err := client.Services(ns).Get(name)
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *api.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
rcs, err := client.ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
return err
}
rc, err := client.ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
if replicas == 0 {
ps, err := podStoreForRC(client, rc)
if err != nil {
return err
}
defer ps.Stop()
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
} else {
if err := WaitForPodsWithLabelRunning(
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for ix := range list.Items {
item := list.Items[ix]
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *api.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template)
}
return nil
}
func CheckPodHashLabel(pods *api.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
if len(invalidPod) == 0 {
invalidPod = "unexpected pods missing required pod-hash-template:"
}
invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod)
}
}
if len(invalidPod) > 0 {
return fmt.Errorf("%s", invalidPod)
}
return nil
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
} else {
result = c.Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c *client.Client, node, resource string) (*api.PodList, error) {
result := &api.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &api.PodList{}, err
}
if err = client.Into(result); err != nil {
return &api.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []api.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName)
ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip)
return
}
// CheckConnectivityToHost launches a pod running wget on the
// specified node to test connectivity to the specified host. An
// error will be returned if the host is not reachable from the pod.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"wget", fmt.Sprintf("--timeout=%d", timeout), "-s", host},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
defer podClient.Delete(podName, nil)
err = WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.Client, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s \"wget\" logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
func CoreDump(dir string) {
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
}
}
func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*api.Pod)) (*api.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.Pods(ns).Get(name)
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c *client.Client, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) {
pods, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
return []*api.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*api.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c *client.Client, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]api.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c *client.Client) (sets.String, *api.NodeList) {
nodes := &api.NodeList{}
masters := sets.NewString()
all, _ := c.Nodes().List(api.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(&n) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func CreateFileForGoBinData(gobindataPath, outputFilename string) error {
data := ReadOrDie(gobindataPath)
if len(data) == 0 {
return fmt.Errorf("Failed to read gobindata from %v", gobindataPath)
}
fullPath := filepath.Join(TestContext.OutputDir, outputFilename)
err := os.MkdirAll(filepath.Dir(fullPath), 0777)
if err != nil {
return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err)
}
err = ioutil.WriteFile(fullPath, data, 0644)
if err != nil {
return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err)
}
return nil
}
func ListNamespaceEvents(c *client.Client, ns string) error {
ls, err := c.Events(ns).List(api.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
Guard the ready replica checking by server version
This disables ready replica checking for 1.3 masters, but only from 1.4
or 1.5 clients. The old logic was broken anyway due to overlapping
labels with replica sets.
Signed-off-by: Jess Frazelle <e0d1a862d8f31af605ecef8c92857b8938ba622e@google.com>
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
goRuntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/types"
uexec "k8s.io/kubernetes/pkg/util/exec"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch"
"github.com/blang/semver"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// How long to wait for the pod to no longer be running
podNoLongerRunningTimeout = 30 * time.Second
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// String used to mark pod deletion
nonExist = "NonExist"
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 2 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long each node is given during a process that restarts all nodes
// before the test is considered failed. (Note that the total time to
// restart all nodes will be this number times the number of nodes.)
RestartPerNodeTimeout = 5 * time.Minute
// How often to Poll the statues of a restart.
RestartPoll = 20 * time.Second
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of times we want to retry Updates in case of conflict
UpdateRetries = 5
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
)
var (
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
)
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c *client.Client) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c *client.Client) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goRuntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = version.MustParse("v1.1.0")
var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0")
func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error)
type ContainerFailures struct {
status *api.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
// Convenient wrapper around cache.Store that returns list of api.Pod instead of interface{}.
type PodStore struct {
cache.Store
stopCh chan struct{}
reflector *cache.Reflector
}
func NewPodStore(c *client.Client, namespace string, label labels.Selector, field fields.Selector) *PodStore {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).Watch(options)
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
stopCh := make(chan struct{})
reflector := cache.NewReflector(lw, &api.Pod{}, store, 0)
reflector.RunUntil(stopCh)
return &PodStore{store, stopCh, reflector}
}
func (s *PodStore) List() []*api.Pod {
objects := s.Store.List()
pods := make([]*api.Pod, 0)
for _, o := range objects {
pods = append(pods, o.(*api.Pod))
}
return pods
}
func (s *PodStore) Stop() {
close(s.stopCh)
}
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
DNSPolicy *api.DNSPolicy
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Node selector for pods in the RC.
NodeSelector map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
// Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c *client.Client) {
federationNS := os.Getenv("FEDERATION_NAMESPACE")
if federationNS == "" {
federationNS = "federation"
}
_, err := c.Namespaces().Get(federationNS)
if err != nil {
if apierrs.IsNotFound(err) {
Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
} else {
Failf("Unexpected error getting namespace: %v", err)
}
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr unversioned.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersion(gvr.GroupVersion())
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
type podCondition func(pod *api.Pod) (bool, error)
// podReady returns whether pod has a condition of Ready with a status of true.
// TODO: should be replaced with api.IsPodReady
func podReady(pod *api.Pod) bool {
for _, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady && cond.Status == api.ConditionTrue {
return true
}
}
return false
}
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []api.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in the desired state in %v\n", len(badPods), desiredPods, ns, timeout)
// Pirnt bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%s",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// PodRunningReady checks whether pod p's phase is running and it has a ready
// condition of status true.
func PodRunningReady(p *api.Pod) (bool, error) {
// Check the phase is running.
if p.Status.Phase != api.PodRunning {
return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodRunning, p.Status.Phase)
}
// Check the ready condition is true.
if !podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionTrue, p.Status.Conditions)
}
return true, nil
}
func PodRunningReadyOrSucceeded(p *api.Pod) (bool, error) {
// Check if the phase is succeeded.
if p.Status.Phase == api.PodSucceeded {
return true, nil
}
return PodRunningReady(p)
}
// PodNotReady checks whether pod p's has a ready condition of status false.
func PodNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
if podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionFalse, p.Status.Conditions)
}
return true, nil
}
// check if a Pod is controlled by a Replication Controller in the List
func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api.Pod) bool {
for _, rc := range rcs.Items {
selector := labels.SelectorFromSet(rc.Spec.Selector)
if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) {
return true
}
}
return false
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods := time.Now(), []api.Pod{}
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return false, nil
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []api.Pod{}
for _, pod := range podList.Items {
if pod.Status.Phase != api.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels)
return fmt.Errorf("Not all pods in namespace %q are successful within %v", ns, timeout)
}
return nil
}
var ReadyReplicaVersion = version.MustParse("v1.4.0")
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting.
// If ignoreLabels is not empty, pods matching this selector are ignored and
// this function waits for minPods to enter Running/Ready and for all pods
// matching ignoreLabels to enter Success phase. Otherwise an error is returned
// even if there are minPods pods, some of which are in Running/Ready
// and some in Success. This is to allow the client to decide if "Success"
// means "Ready" or not.
func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
// This can be removed when we no longer have 1.3 servers running with upgrade tests.
hasReadyReplicas, err := ServerVersionGTE(ReadyReplicaVersion, c.Discovery())
if err != nil {
Logf("Error getting the server version: %v", err)
return err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var waitForSuccessError error
badPods := []api.Pod{}
desiredPods := 0
go func() {
waitForSuccessError = WaitForPodsSuccess(c, ns, ignoreLabels, timeout)
wg.Done()
}()
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.ReplicationControllers(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
if hasReadyReplicas {
for _, rc := range rcList.Items {
replicas += rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
return false, nil
}
for _, rs := range rsList.Items {
replicas += rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk := int32(0)
badPods = []api.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
Logf("%v in state %v, ignoring", pod.Name, pod.Status.Phase)
continue
}
if res, err := PodRunningReady(&pod); res && err == nil {
nOk++
} else {
if pod.Status.Phase != api.PodFailed {
Logf("The status of Pod %s is %s, waiting for it to be either Running or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
} else if _, ok := pod.Annotations[api.CreatedByAnnotation]; !ok {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
if hasReadyReplicas {
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
}
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
logPodStates(badPods)
return false, nil
}) != nil {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, timeout))
}
wg.Wait()
if waitForSuccessError != nil {
return waitForSuccessError
}
return nil
}
func podFromManifest(filename string) (*api.Pod, error) {
var pod api.Pod
Logf("Parsing pod from %v", filename)
data := ReadOrDie(filename)
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, err
}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &pod); err != nil {
return nil, err
}
return &pod, nil
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func RunKubernetesServiceTestContainer(c *client.Client, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
p, err := podFromManifest(path)
if err != nil {
Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Pods(ns).Create(p); err != nil {
Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Pods(ns).Delete(p.Name, nil); err != nil {
Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, PodRunningReady); err != nil {
Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
Logf("Output of clusterapi-tester:\n%v", logs)
}
}
func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
Logf("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c *client.Client, ns string) {
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return
}
Logf("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "")
}
}
}
func LogPodsWithLabels(c *client.Client, ns string, match map[string]string) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
Logf("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "")
}
}
func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Namespaces().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Namespaces().Delete(nsName)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.ServiceAccountHasSecrets)
return err
}
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, Poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.Pods(api.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s", len(conditionNotMatch), desc)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForFederationApiserverReady waits for the federation apiserver to be ready.
// It tests the readiness by sending a GET request and expecting a non error response.
func WaitForFederationApiserverReady(c *federation_release_1_4.Clientset) error {
return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
_, err := c.Federation().Clusters().List(api.ListOptions{})
if err != nil {
return false, nil
}
return true, nil
})
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName)
if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: api.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *api.Namespace
if err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
var err error
got, err = c.Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
return nil, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c *client.Client, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == api.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c *client.Client, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
if err := c.Namespaces().Delete(namespace); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
// but they were all undergoing deletion (kubelet is probably culprit)
if missingTimestamp == 0 {
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c *client.Client, namespace string) {
namespaceList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == api.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c *client.Client, namespace string) {
ns, err := c.Namespaces().Get(namespace)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c *client.Client, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.Pods(namespace).List(api.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c *client.Client, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
groupVersionResources, err := c.Discovery().ServerPreferredNamespacedResources()
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for _, gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersion(gvr.GroupVersion())
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*runtime.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *runtime.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*api.Pod)
newPod := newer.(*api.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *api.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := api.GetPodCondition(&pod.Status, api.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == api.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == api.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == api.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c *client.Client, pod *api.Pod) error {
// this short-cicuit is needed for cases when we pass a list of pods instead
// of newly created pod (eg. VerifyPods) which means we are getting already
// running pod for which waiting does not make sense and will always fail
if pod.Status.Phase == api.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c *client.Client, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c *client.Client, podName, namespace, resourceVersion string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunning)
return err
}
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodCompleted)
return err
}
func waitTimeoutForPodReadyInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunningAndReady)
return err
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c *client.Client, ns, podName, resourceVersion string) error {
w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(PodStartTimeout, w, client.PodNotPending)
return err
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error {
return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, contName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) {
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
if !ok {
Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName)
} else {
if ci.State.Terminated != nil {
if ci.State.Terminated.ExitCode == 0 {
By("Saw pod success")
return true, nil
}
return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated)
}
Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace)
}
return false, nil
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, slowPodStartTimeout)
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *api.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c *client.Client, ns, name string, timeout time.Duration) error {
options := api.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector()}
w, err := c.ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(unversioned.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *api.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
rc.Spec.Replicas == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, rc.Spec.Replicas, rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that.
// Additionally, there can be non-zero grace period, so we are setting 10 minutes
// to be on the safe size.
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
switch {
case err == nil:
if !exist {
return false, nil
}
Logf("Service %s in namespace %s found.", name, namespace)
return true, nil
case apierrs.IsNotFound(err):
if exist {
return false, nil
}
Logf("Service %s in namespace %s disappeared.", name, namespace)
return true, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.Endpoints(namespace).List(api.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *api.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
func WaitForEndpoint(c *client.Client, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Endpoints(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c *client.Client
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *api.PodList
}
func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := api.ListOptions{LabelSelector: r.label}
currentPods, err := r.c.Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c)
if err != nil {
return false, err
}
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Get().
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := version.Parse(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.GTE(v), nil
}
func SkipUnlessKubectlVersionGTE(v semver.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v semver.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.GTE(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (semver.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return semver.Version{}, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return version.Parse(matches[1])
}
func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodList, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []api.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c *client.Client, pods *api.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p api.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return nil
}
func ServiceResponding(c *client.Client, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
body, err := proxyRequest.Namespace(ns).
Name(name).
Do().
Raw()
if err != nil {
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s\n", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s\n", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeName != "" {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := restclientConfig(TestContext.KubeContext)
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) {
c, err := restclientConfig(federatedKubeContext)
if err != nil {
return nil, fmt.Errorf("error creating federation client config: %v", err.Error())
}
cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig()
if cfg != nil {
//TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422
cfg.NegotiatedSerializer = api.Codecs
}
if err != nil {
return cfg, fmt.Errorf("error creating federation client config: %v", err.Error())
}
return cfg, nil
}
func loadClientFromConfig(config *restclient.Config) (*client.Client, error) {
c, err := client.New(config)
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
if c.Client.Timeout == 0 {
c.Client.Timeout = SingleCallTimeout
}
return c, nil
}
func setTimeouts(cs ...*http.Client) {
for _, client := range cs {
if client.Timeout == 0 {
client.Timeout = SingleCallTimeout
}
}
}
func LoadFederationClientset_1_4() (*federation_release_1_4.Clientset, error) {
config, err := LoadFederatedConfig(&clientcmd.ConfigOverrides{})
if err != nil {
return nil, err
}
c, err := federation_release_1_4.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating federation clientset: %v", err.Error())
}
// Set timeout for each client in the set.
setTimeouts(c.DiscoveryClient.Client, c.FederationClient.Client, c.CoreClient.Client, c.ExtensionsClient.Client)
return c, nil
}
func LoadClient() (*client.Client, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return loadClientFromConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c *client.Client, podID string) error
// ValidateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *api.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *api.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
podClient := f.PodClient()
ns := f.Namespace.Name
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
podClient.Create(pod)
// Wait for client pod to complete. All containers should succeed.
for _, container := range pod.Spec.Containers {
if err := WaitForPodSuccessInNamespace(f.Client, pod.Name, container.Name, ns); err != nil {
return fmt.Errorf("expected container %s success: %v", container.Name, err)
}
}
// Grab its logs. Get host first.
podStatus, err := podClient.Get(pod.Name)
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.Client, ns, pod.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) Print(ignorePhases sets.String) {
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
Logf("Pod %v was deleted, had phase %v and host %v", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
Logf(msg)
}
}
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
Logf("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
Logf("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
dnsDefault := api.DNSDefault
if config.DNSPolicy == nil {
config.DNSPolicy = &dnsDefault
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) Print(name string) {
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
startupStatus.Print(config.Name)
}
promPushRunningPending(startupStatus.Running, startupStatus.Pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
DumpNodeDebugInfo(config.Client, startupStatus.ContainerRestartNodes.List())
// Get the logs from the failed containers to help diagnose what caused them to fail
LogFailedContainers(config.Client, config.Namespace)
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
Logf("%v, pods that changed since the last iteration:", errorStr)
Diff(oldPods, pods).Print(sets.NewString())
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
dumpPodDebugInfo(config.Client, pods)
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
ExpectNoError(err)
}
Logf("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
}
}
func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
badNodes := sets.NewString()
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
if p.Spec.NodeName != "" {
Logf("Pod %v assigned to host %v (IP: %v) in %v", p.Name, p.Spec.NodeName, p.Status.HostIP, p.Status.Phase)
badNodes.Insert(p.Spec.NodeName)
} else {
Logf("Pod %v still unassigned", p.Name)
}
}
}
DumpNodeDebugInfo(c, badNodes.List())
}
func DumpAllNamespaceInfo(c *client.Client, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := c.Events(namespace).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.Nodes().List(api.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []api.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c *client.Client) {
pods, err := c.Pods("").List(api.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names)
}
func DumpNodeDebugInfo(c *client.Client, nodeNames []string) {
for _, n := range nodeNames {
Logf("\nLogging node info for node %v", n)
node, err := c.Nodes().Get(n)
if err != nil {
Logf("Error getting node info %v", err)
}
Logf("Node Info: %v", node)
Logf("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
Logf("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
Logf("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
Logf("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c *client.Client, nodeName string) []api.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": api.NamespaceAll,
"source": "kubelet",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
events, err := c.Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []api.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *api.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, api.NodeReady, true)
networkReady := IsNodeConditionUnset(node, api.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, api.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node api.Node) bool {
return isNodeSchedulable(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c *client.Client) error {
return wait.PollImmediate(30*time.Second, 4*time.Hour, func() (bool, error) {
opts := api.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(),
}
nodes, err := c.Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
// Ignore the error here - it will be retried.
return false, nil
}
schedulable := 0
for _, node := range nodes.Items {
if isNodeSchedulable(&node) {
schedulable++
}
}
if schedulable != len(nodes.Items) {
Logf("%d/%d nodes schedulable (polling after 30s)", schedulable, len(nodes.Items))
return false, nil
}
return true, nil
})
}
func AddOrUpdateLabelOnNode(c *client.Client, nodeName string, labelKey string, labelValue string) {
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, labelKey, labelValue)
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add a label %v:%v to %v", labelKey, labelValue, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
}
func ExpectNodeHasLabel(c *client.Client, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c *client.Client, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
var nodeUpdated *api.Node
var node *api.Node
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err = c.Nodes().Get(nodeName)
ExpectNoError(err)
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
return
}
delete(node.Labels, labelKey)
nodeUpdated, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove a label %v from %v", labelKey, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
By("verifying the node doesn't have the label " + labelKey)
if nodeUpdated.Labels != nil && len(nodeUpdated.Labels[labelKey]) != 0 {
Failf("Failed removing label " + labelKey + " of the node " + nodeName)
}
}
func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) {
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
var newTaints []api.Taint
updated := false
for _, existingTaint := range nodeTaints {
if taint.MatchTaint(existingTaint) {
newTaints = append(newTaints, taint)
updated = true
continue
}
newTaints = append(newTaints, existingTaint)
}
if !updated {
newTaints = append(newTaints, taint)
}
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
_, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %v to %v", taint, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func taintExists(taints []api.Taint, taintToFind api.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
func ExpectNodeHasTaint(c *client.Client, nodeName string, taint api.Taint) {
By("verifying the node has the taint " + taint.ToString())
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, error) {
newTaints := []api.Taint{}
found := false
for _, oldTaint := range oldTaints {
if oldTaint.MatchTaint(taintToDelete) {
found = true
continue
}
newTaints = append(newTaints, taintToDelete)
}
if !found {
return nil, fmt.Errorf("taint %s not found.", taintToDelete.ToString())
}
return newTaints, nil
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) {
By("removing the taint " + taint.ToString() + " off the node " + nodeName)
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 {
return
}
if !taintExists(nodeTaints, taint) {
return
}
newTaints, err := deleteTaint(nodeTaints, taint)
ExpectNoError(err)
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
_, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %s to node %v", taint.ToString(), nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
nodeUpdated, err := c.Nodes().Get(nodeName)
ExpectNoError(err)
By("verifying the node doesn't have the taint " + taint.ToString())
taintsGot, err := api.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
ExpectNoError(err)
if taintExists(taintsGot, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForRCPodsRunning(c, ns, name)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error {
rc, err := c.ReplicationControllers(ns).Get(rcName)
if err != nil {
return err
}
selector := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
err = WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err)
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) {
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := api.ListOptions{LabelSelector: label}
pods, err = c.Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// DeleteRCAndPods a Replication Controller and all pods it spawned
func DeleteRCAndPods(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperForReplicationController(c, 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
ps, err := podStoreForRC(c, rc)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping RC: %s: %v", name, err)
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
func DeleteRCAndWaitForGC(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s, will wait for the garbage collector to delete the pods", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
ps, err := podStoreForRC(c, rc)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &api.DeleteOptions{OrphanDependents: &falseVar}
err = c.ReplicationControllers(ns).Delete(name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
if err != nil {
return err
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
var interval, timeout time.Duration
switch {
case rc.Spec.Replicas < 100:
interval = 100 * time.Millisecond
case rc.Spec.Replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if rc.Spec.Replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(rc.Spec.Replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// podStoreForRC creates a PodStore that monitors pods belong to the rc. It
// waits until the reflector does a List() before returning.
func podStoreForRC(c *client.Client, rc *api.ReplicationController) (*PodStore, error) {
labels := labels.SelectorFromSet(rc.Spec.Selector)
ps := NewPodStore(c, rc.Namespace, labels, fields.Everything())
err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
if len(ps.reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(c, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(Poll, 2*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
Logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
Logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
Logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
Logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == deployment.Spec.Replicas &&
deployment.Status.UpdatedReplicas == deployment.Spec.Replicas &&
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil || newRS == nil || !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, err
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision ||
newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision ||
deployment.Spec.Template.Spec.Containers[0].Image != image || newRS.Spec.Template.Spec.Containers[0].Image != image {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, nil, newRS)
}
if newRS == nil {
return fmt.Errorf("deployment %s failed to create new RS: %v", deploymentName, err)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s (got %s / %s) and new RS %s (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error {
return wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Annotations[deploymentutil.OverlapAnnotation] == expected {
return true, nil
}
return false, nil
})
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds), time.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
Logf("Deployment: %+v. Selector = %+v", *deployment, deployment.Spec.Selector)
for i := range allOldRSs {
Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, *allOldRSs[i], allOldRSs[i].Spec.Selector)
}
if newRS != nil {
Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, *newRS, newRS.Spec.Selector)
} else {
Logf("New ReplicaSet of deployment %s is nil.", deployment.Name)
}
}
func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
minReadySeconds := deployment.Spec.MinReadySeconds
podList, err := deploymentutil.ListPods(deployment,
func(namespace string, options api.ListOptions) (*api.PodList, error) {
return c.Core().Pods(namespace).List(options)
})
if err != nil {
Logf("Failed to list pods of deployment %s: %v", deployment.Name, err)
return
}
if err == nil {
for _, pod := range podList.Items {
availability := "not available"
if deploymentutil.IsPodAvailable(&pod, minReadySeconds, time.Now()) {
availability = "available"
}
Logf("Pod %s is %s: %+v", pod.Name, availability, pod)
}
}
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("Updating deployment %s", name)
return true, nil
}
return false, nil
})
return deployment, err
}
// FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a
// struct containing the restart and failure information
func FailedContainers(pod *api.Pod) map[string]ContainerFailures {
var state ContainerFailures
states := make(map[string]ContainerFailures)
statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return nil
} else {
for _, status := range statuses {
if status.State.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.State.Terminated}
} else if status.LastTerminationState.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Terminated}
}
if status.RestartCount > 0 {
var ok bool
if state, ok = states[status.ContainerID]; !ok {
state = ContainerFailures{}
}
state.Restarts = int(status.RestartCount)
states[status.ContainerID] = state
}
}
}
return states
}
// Prints the histogram of the events and returns the number of bad events.
func BadEvents(events []*api.Event) int {
type histogramKey struct {
reason string
source string
}
histogram := make(map[histogramKey]int)
for _, e := range events {
histogram[histogramKey{reason: e.Reason, source: e.Source.Component}]++
}
for key, number := range histogram {
Logf("- reason: %s, source: %s -> %d", key.reason, key.source, number)
}
badPatterns := []string{"kill", "fail"}
badEvents := 0
for key, number := range histogram {
for _, s := range badPatterns {
if strings.Contains(key.reason, s) {
Logf("WARNING %d events from %s with reason: %s", number, key.source, key.reason)
badEvents += number
break
}
}
}
return badEvents
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == api.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *api.Node) error {
result, err := IssueSSHCommandWithResult(cmd, provider, node)
if result != nil {
LogSSHResult(*result)
}
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: api.PullIfNotPresent,
},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
key := filepath.Join(keydir, keyfile)
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podNames[ix])
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == api.ConditionTrue) == wantTrue {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether not-ready nodes can be ignored while checking if all nodes are
// ready (we allow e.g. for incorrect provisioning of some small percentage of nodes
// while validating cluster, and those nodes may never become healthy).
// Currently we allow only for:
// - not present CNI plugins on node
// TODO: we should extend it for other reasons.
func allowedNotReadyReasons(nodes []*api.Node) bool {
for _, node := range nodes {
index, condition := api.GetNodeCondition(&node.Status, api.NodeReady)
if index == -1 ||
!strings.Contains(condition.Reason, "could not locate kubenet required CNI plugins") {
return false
}
}
return true
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []*api.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notReady) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notReady), nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []api.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == api.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(&node) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) {
var l []api.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartApiserver(c *client.Client) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c *client.Client) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node api.Node) bool {
return IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) {
node, err := client.Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client *client.Client, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingress(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, error) {
svc, err := client.Services(ns).Get(name)
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *api.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
rcs, err := client.ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
return err
}
rc, err := client.ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
if replicas == 0 {
ps, err := podStoreForRC(client, rc)
if err != nil {
return err
}
defer ps.Stop()
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
} else {
if err := WaitForPodsWithLabelRunning(
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for ix := range list.Items {
item := list.Items[ix]
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *api.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template)
}
return nil
}
func CheckPodHashLabel(pods *api.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
if len(invalidPod) == 0 {
invalidPod = "unexpected pods missing required pod-hash-template:"
}
invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod)
}
}
if len(invalidPod) > 0 {
return fmt.Errorf("%s", invalidPod)
}
return nil
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
} else {
result = c.Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c *client.Client, node, resource string) (*api.PodList, error) {
result := &api.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &api.PodList{}, err
}
if err = client.Into(result); err != nil {
return &api.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []api.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName)
ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip)
return
}
// CheckConnectivityToHost launches a pod running wget on the
// specified node to test connectivity to the specified host. An
// error will be returned if the host is not reachable from the pod.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"wget", fmt.Sprintf("--timeout=%d", timeout), "-s", host},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
defer podClient.Delete(podName, nil)
err = WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.Client, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s \"wget\" logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
func CoreDump(dir string) {
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
}
}
func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*api.Pod)) (*api.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.Pods(ns).Get(name)
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c *client.Client, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) {
pods, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
return []*api.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*api.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c *client.Client, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]api.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c *client.Client) (sets.String, *api.NodeList) {
nodes := &api.NodeList{}
masters := sets.NewString()
all, _ := c.Nodes().List(api.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(&n) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func CreateFileForGoBinData(gobindataPath, outputFilename string) error {
data := ReadOrDie(gobindataPath)
if len(data) == 0 {
return fmt.Errorf("Failed to read gobindata from %v", gobindataPath)
}
fullPath := filepath.Join(TestContext.OutputDir, outputFilename)
err := os.MkdirAll(filepath.Dir(fullPath), 0777)
if err != nil {
return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err)
}
err = ioutil.WriteFile(fullPath, data, 0644)
if err != nil {
return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err)
}
return nil
}
func ListNamespaceEvents(c *client.Client, ns string) error {
ls, err := c.Events(ns).List(api.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
|
package main
import (
"context"
"log"
"os"
"github.com/nimona/go-nimona/blx"
"github.com/nimona/go-nimona/dht"
"github.com/nimona/go-nimona/mesh"
"github.com/nimona/go-nimona/net"
"github.com/nimona/go-nimona/net/protocol"
)
func main() {
peerID := os.Getenv("PEER_ID")
if peerID == "" {
log.Fatal("Missing PEER_ID")
}
bs := []string{}
port := 0
if peerID == "bootstrap" {
port = 26801
} else {
bs = append(bs, "tcp:localhost:26801/router/wire")
}
ctx := context.Background()
tcp := net.NewTransportTCP("0.0.0.0", port)
net := net.New(ctx)
rtr := protocol.NewRouter()
pbs, _ := mesh.NewPubSub()
reg, _ := mesh.NewRegisty(peerID, pbs)
msh, _ := mesh.NewMesh(net, pbs, reg)
msg, _ := mesh.NewMessenger(msh)
dht.NewDHT(pbs, peerID, true, bsp...)
blx.NewBlockExchange(pbs)
net.AddProtocols(wre)
rtr.AddRoute(wre)
net.AddTransport(tcp, rtr)
}
Remove examples
|
// Package broadcast implements a generic system where a user can generate
// content which can then be consumed by multiple other users. This package
// mostly handles whether or not a user is broadcasting, and what id they are
// broadcasting to
//
// - A user can only have a single broadcast at a time
//
// - It must be periodically verified that a user is still broadcasting
//
// - A signature is given when starting a broadcast which can optionally be
// later used to authenticate a broadcast ID
//
package broadcast
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"strings"
"github.com/mediocregopher/mediocre-api/common"
"github.com/mediocregopher/mediocre-api/room"
"github.com/mediocregopher/radix.v2/redis"
"github.com/mediocregopher/radix.v2/util"
)
// Errors which can be expected from various methods in this package
var (
ErrUserIsBroadcasting = common.ExpectedErr{400, "user already broadcasting"}
ErrInvalidID = common.ExpectedErr{400, "invalid broadcast.ID"}
ErrBroadcastEnded = common.ExpectedErr{400, "broadcast already ended"}
)
// EXPIREEQUAL KEY SECONDS VALUE
// Sets the given key's expire time to the given seconds, but only if the key's
// current value is equal to VALUE. Returns 1 if set, 0 if not
var expireEqual = `
local v = redis.call('GET', KEYS[1])
if v == ARGV[2] then
redis.call('EXPIRE', KEYS[1], ARGV[1])
return 1
else
return 0
end
`
// DELEQUAL KEY VALUE
// Deletes the given key, but only if the key's current value is equal to VALUE.
// Returns 1 if the key was deleted, 0 otherwise
var delEqual = `
local v = redis.call('GET', KEYS[1])
if v == ARGV[1] then
return redis.call('DEL', KEYS[1])
else
return 0
end
`
// System holds on to a room.System and implements a broadcast system around it,
// using the room.System to track what users are in what broadcasts
type System struct {
c util.Cmder
*room.System
// When set a signature will be generated for broadcast IDs which can be
// used to authenticate that they are legitimate
Secret []byte
// Prefix can be filled in on a System returned from New, and is used as
// part of a prefix on all keys used by this system. Useful if you want to
// have two broadcast Systems using the same Cmder
Prefix string
// This is the amount of seconds which is allowed to elapse with no
// StillBroadcasting calls for a broadcast before it is considered dead.
// Defaults to 30
AlivenessPeriod int
}
// New returns a new initialized system
func New(c util.Cmder) *System {
return &System{
c: c,
AlivenessPeriod: 30,
}
}
// ID represents the unique identifier for a broadcast. IDs have certain data
// embedded in them, and methods for retrieving that data
type ID string
// User returns the name of the user encoded into the id
func (id ID) User() string {
idDec, err := base64.StdEncoding.DecodeString(string(id))
if err != nil {
return ""
}
idStr := string(idDec)
i := strings.LastIndex(idStr, ":")
if i < 0 {
return ""
}
return idStr[:i]
}
// NewID returns a new broadcast ID for the given user, along with a signature
// which can verify that the holder of the id is the true owner. This method
// makes no database changes, see StartBroadcast if that's what you're looking
// for. The signature will be empty string if Secret is not set on the System
func (s *System) NewID(user string) (ID, string) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
benc := base64.StdEncoding.EncodeToString(b)
id := user + ":" + benc
id64 := base64.StdEncoding.EncodeToString([]byte(id))
var sig string
if s.Secret != nil {
h := hmac.New(sha1.New, s.Secret)
h.Write([]byte(id))
sig = base64.StdEncoding.EncodeToString(h.Sum(nil))
}
return ID(id64), sig
}
// Verify returns wheither or not the given sig is the valid signature for the
// given ID, i.e. they were both returned from the same call to NewID or
// StartBroadcast. Returns false if Secret is not set on the System
func (s *System) Verify(id ID, sig string) bool {
if s.Secret == nil {
return false
}
idDec, err := base64.StdEncoding.DecodeString(string(id))
if err != nil {
return false
}
h := hmac.New(sha1.New, s.Secret)
h.Write(idDec)
realSig := base64.StdEncoding.EncodeToString(h.Sum(nil))
return realSig == sig
}
func (s *System) userKey(user string) string {
k := "broadcast:" + s.Prefix + ":user:{" + user + "}"
return k
}
// StartBroadcast returns a unique broadcast id for the user to use, and the
// signature for that id which can be used to verify they are the real
// broadcaster. The signature will be empty string if Secret is not set on the
// System. This will error if the user is already broadcasting
func (s *System) StartBroadcast(user string) (ID, string, error) {
id, sig := s.NewID(user)
ukey := s.userKey(user)
r := s.c.Cmd("SET", ukey, id, "EX", s.AlivenessPeriod, "NX")
if r.Err != nil {
return "", "", r.Err
} else if r.IsType(redis.Nil) {
return "", "", ErrUserIsBroadcasting
}
return id, sig, nil
}
// StillAlive records that the broadcast is still actively going. This must be
// called periodically or the user will no longer be considered broadcasting,
// see AlivenessPeriod
func (s *System) StillAlive(id ID) error {
user := id.User()
if user == "" {
return ErrInvalidID
}
key := s.userKey(user)
i, err := util.LuaEval(s.c, expireEqual, 1, key, s.AlivenessPeriod, string(id)).Int()
if err != nil {
return err
}
if i == 0 {
return ErrBroadcastEnded
}
return nil
}
// Ended records that a broadcast has ended and that the user is no longer
// broadcasting
func (s *System) Ended(id ID) error {
user := id.User()
if user == "" {
return ErrInvalidID
}
key := s.userKey(user)
i, err := util.LuaEval(s.c, delEqual, 1, key, string(id)).Int()
if err != nil {
return err
}
if i == 0 {
return ErrBroadcastEnded
}
return nil
}
// GetBroadcastID returns the currently active broadcast id for the user, or
// empty string if they are not broadcasting. An error is only returned in the
// case of a database error
func (s *System) GetBroadcastID(user string) (ID, error) {
key := s.userKey(user)
r := s.c.Cmd("GET", key)
if r.IsType(redis.Nil) {
return "", nil
}
idStr, err := r.Str()
if err != nil {
return "", err
}
id := ID(idStr)
if id.User() != user {
// This isn't expected to happen, but I'd like to enforce that any ID
// returned from this package is a valid one
return "", ErrInvalidID
}
return id, nil
}
use base64.URLEncoding instead of base64.StdEncoding in room/broadcast, since std may include a forward slash, which makes using broadcast ids or sigs in urls basically impossible
// Package broadcast implements a generic system where a user can generate
// content which can then be consumed by multiple other users. This package
// mostly handles whether or not a user is broadcasting, and what id they are
// broadcasting to
//
// - A user can only have a single broadcast at a time
//
// - It must be periodically verified that a user is still broadcasting
//
// - A signature is given when starting a broadcast which can optionally be
// later used to authenticate a broadcast ID
//
package broadcast
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"strings"
"github.com/mediocregopher/mediocre-api/common"
"github.com/mediocregopher/mediocre-api/room"
"github.com/mediocregopher/radix.v2/redis"
"github.com/mediocregopher/radix.v2/util"
)
// Errors which can be expected from various methods in this package
var (
ErrUserIsBroadcasting = common.ExpectedErr{400, "user already broadcasting"}
ErrInvalidID = common.ExpectedErr{400, "invalid broadcast.ID"}
ErrBroadcastEnded = common.ExpectedErr{400, "broadcast already ended"}
)
// EXPIREEQUAL KEY SECONDS VALUE
// Sets the given key's expire time to the given seconds, but only if the key's
// current value is equal to VALUE. Returns 1 if set, 0 if not
var expireEqual = `
local v = redis.call('GET', KEYS[1])
if v == ARGV[2] then
redis.call('EXPIRE', KEYS[1], ARGV[1])
return 1
else
return 0
end
`
// DELEQUAL KEY VALUE
// Deletes the given key, but only if the key's current value is equal to VALUE.
// Returns 1 if the key was deleted, 0 otherwise
var delEqual = `
local v = redis.call('GET', KEYS[1])
if v == ARGV[1] then
return redis.call('DEL', KEYS[1])
else
return 0
end
`
// System holds on to a room.System and implements a broadcast system around it,
// using the room.System to track what users are in what broadcasts
type System struct {
c util.Cmder
*room.System
// When set a signature will be generated for broadcast IDs which can be
// used to authenticate that they are legitimate
Secret []byte
// Prefix can be filled in on a System returned from New, and is used as
// part of a prefix on all keys used by this system. Useful if you want to
// have two broadcast Systems using the same Cmder
Prefix string
// This is the amount of seconds which is allowed to elapse with no
// StillBroadcasting calls for a broadcast before it is considered dead.
// Defaults to 30
AlivenessPeriod int
}
// New returns a new initialized system
func New(c util.Cmder) *System {
return &System{
c: c,
AlivenessPeriod: 30,
}
}
// ID represents the unique identifier for a broadcast. IDs have certain data
// embedded in them, and methods for retrieving that data
type ID string
// User returns the name of the user encoded into the id
func (id ID) User() string {
idDec, err := base64.URLEncoding.DecodeString(string(id))
if err != nil {
return ""
}
idStr := string(idDec)
i := strings.LastIndex(idStr, ":")
if i < 0 {
return ""
}
return idStr[:i]
}
// NewID returns a new broadcast ID for the given user, along with a signature
// which can verify that the holder of the id is the true owner. This method
// makes no database changes, see StartBroadcast if that's what you're looking
// for. The signature will be empty string if Secret is not set on the System
func (s *System) NewID(user string) (ID, string) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
benc := base64.URLEncoding.EncodeToString(b)
id := user + ":" + benc
id64 := base64.URLEncoding.EncodeToString([]byte(id))
var sig string
if s.Secret != nil {
h := hmac.New(sha1.New, s.Secret)
h.Write([]byte(id))
sig = base64.URLEncoding.EncodeToString(h.Sum(nil))
}
return ID(id64), sig
}
// Verify returns wheither or not the given sig is the valid signature for the
// given ID, i.e. they were both returned from the same call to NewID or
// StartBroadcast. Returns false if Secret is not set on the System
func (s *System) Verify(id ID, sig string) bool {
if s.Secret == nil {
return false
}
idDec, err := base64.URLEncoding.DecodeString(string(id))
if err != nil {
return false
}
h := hmac.New(sha1.New, s.Secret)
h.Write(idDec)
realSig := base64.URLEncoding.EncodeToString(h.Sum(nil))
return realSig == sig
}
func (s *System) userKey(user string) string {
k := "broadcast:" + s.Prefix + ":user:{" + user + "}"
return k
}
// StartBroadcast returns a unique broadcast id for the user to use, and the
// signature for that id which can be used to verify they are the real
// broadcaster. The signature will be empty string if Secret is not set on the
// System. This will error if the user is already broadcasting
func (s *System) StartBroadcast(user string) (ID, string, error) {
id, sig := s.NewID(user)
ukey := s.userKey(user)
r := s.c.Cmd("SET", ukey, id, "EX", s.AlivenessPeriod, "NX")
if r.Err != nil {
return "", "", r.Err
} else if r.IsType(redis.Nil) {
return "", "", ErrUserIsBroadcasting
}
return id, sig, nil
}
// StillAlive records that the broadcast is still actively going. This must be
// called periodically or the user will no longer be considered broadcasting,
// see AlivenessPeriod
func (s *System) StillAlive(id ID) error {
user := id.User()
if user == "" {
return ErrInvalidID
}
key := s.userKey(user)
i, err := util.LuaEval(s.c, expireEqual, 1, key, s.AlivenessPeriod, string(id)).Int()
if err != nil {
return err
}
if i == 0 {
return ErrBroadcastEnded
}
return nil
}
// Ended records that a broadcast has ended and that the user is no longer
// broadcasting
func (s *System) Ended(id ID) error {
user := id.User()
if user == "" {
return ErrInvalidID
}
key := s.userKey(user)
i, err := util.LuaEval(s.c, delEqual, 1, key, string(id)).Int()
if err != nil {
return err
}
if i == 0 {
return ErrBroadcastEnded
}
return nil
}
// GetBroadcastID returns the currently active broadcast id for the user, or
// empty string if they are not broadcasting. An error is only returned in the
// case of a database error
func (s *System) GetBroadcastID(user string) (ID, error) {
key := s.userKey(user)
r := s.c.Cmd("GET", key)
if r.IsType(redis.Nil) {
return "", nil
}
idStr, err := r.Str()
if err != nil {
return "", err
}
id := ID(idStr)
if id.User() != user {
// This isn't expected to happen, but I'd like to enforce that any ID
// returned from this package is a valid one
return "", ErrInvalidID
}
return id, nil
}
|
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package feed
import (
"fmt"
"html"
"net/http"
"net/url"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/util"
"github.com/gorilla/feeds"
)
func toBranchLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/branch/" + util.PathEscapeSegments(act.GetBranch())
}
func toTagLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/tag/" + util.PathEscapeSegments(act.GetTag())
}
func toIssueLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/issues/" + url.PathEscape(act.GetIssueInfos()[0])
}
func toPullLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/pulls/" + url.PathEscape(act.GetIssueInfos()[0])
}
func toSrcLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/" + util.PathEscapeSegments(act.GetBranch())
}
func toReleaseLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/releases/tag/" + util.PathEscapeSegments(act.GetBranch())
}
// renderMarkdown creates a minimal markdown render context from an action.
// If rendering fails, the original markdown text is returned
func renderMarkdown(ctx *context.Context, act *activities_model.Action, content string) string {
markdownCtx := &markup.RenderContext{
Ctx: ctx,
URLPrefix: act.GetRepoLink(),
Type: markdown.MarkupName,
Metas: map[string]string{
"user": act.GetRepoUserName(),
"repo": act.GetRepoName(),
},
}
markdown, err := markdown.RenderString(markdownCtx, content)
if err != nil {
return content
}
return markdown
}
// feedActionsToFeedItems convert gitea's Action feed to feeds Item
func feedActionsToFeedItems(ctx *context.Context, actions activities_model.ActionList) (items []*feeds.Item, err error) {
for _, act := range actions {
act.LoadActUser()
var content, desc, title string
link := &feeds.Link{Href: act.GetCommentLink()}
// title
title = act.ActUser.DisplayName() + " "
switch act.OpType {
case activities_model.ActionCreateRepo:
title += ctx.TrHTMLEscapeArgs("action.create_repo", act.GetRepoAbsoluteLink(), act.ShortRepoPath())
link.Href = act.GetRepoAbsoluteLink()
case activities_model.ActionRenameRepo:
title += ctx.TrHTMLEscapeArgs("action.rename_repo", act.GetContent(), act.GetRepoAbsoluteLink(), act.ShortRepoPath())
link.Href = act.GetRepoAbsoluteLink()
case activities_model.ActionCommitRepo:
link.Href = toBranchLink(act)
if len(act.Content) != 0 {
title += ctx.TrHTMLEscapeArgs("action.commit_repo", act.GetRepoAbsoluteLink(), link.Href, act.GetBranch(), act.ShortRepoPath())
} else {
title += ctx.TrHTMLEscapeArgs("action.create_branch", act.GetRepoAbsoluteLink(), link.Href, act.GetBranch(), act.ShortRepoPath())
}
case activities_model.ActionCreateIssue:
link.Href = toIssueLink(act)
title += ctx.TrHTMLEscapeArgs("action.create_issue", link.Href, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCreatePullRequest:
link.Href = toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.create_pull_request", link.Href, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionTransferRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.transfer_repo", act.GetContent(), act.GetRepoAbsoluteLink(), act.ShortRepoPath())
case activities_model.ActionPushTag:
link.Href = toTagLink(act)
title += ctx.TrHTMLEscapeArgs("action.push_tag", act.GetRepoAbsoluteLink(), link.Href, act.GetTag(), act.ShortRepoPath())
case activities_model.ActionCommentIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.comment_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionMergePullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.merge_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCloseIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.close_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionReopenIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.reopen_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionClosePullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.close_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionReopenPullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.reopen_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionDeleteTag:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.delete_tag", act.GetRepoAbsoluteLink(), act.GetTag(), act.ShortRepoPath())
case activities_model.ActionDeleteBranch:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.delete_branch", act.GetRepoAbsoluteLink(), html.EscapeString(act.GetBranch()), act.ShortRepoPath())
case activities_model.ActionMirrorSyncPush:
srcLink := toSrcLink(act)
if link.Href == "#" {
link.Href = srcLink
}
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_push", act.GetRepoAbsoluteLink(), srcLink, act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionMirrorSyncCreate:
srcLink := toSrcLink(act)
if link.Href == "#" {
link.Href = srcLink
}
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_create", act.GetRepoAbsoluteLink(), srcLink, act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionMirrorSyncDelete:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_delete", act.GetRepoAbsoluteLink(), act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionApprovePullRequest:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.approve_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionRejectPullRequest:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.reject_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCommentPull:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.comment_pull", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionPublishRelease:
releaseLink := toReleaseLink(act)
if link.Href == "#" {
link.Href = releaseLink
}
title += ctx.TrHTMLEscapeArgs("action.publish_release", act.GetRepoAbsoluteLink(), releaseLink, act.ShortRepoPath(), act.Content)
case activities_model.ActionPullReviewDismissed:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.review_dismissed", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath(), act.GetIssueInfos()[1])
case activities_model.ActionStarRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.starred_repo", act.GetRepoAbsoluteLink(), act.GetRepoPath())
case activities_model.ActionWatchRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.watched_repo", act.GetRepoAbsoluteLink(), act.GetRepoPath())
default:
return nil, fmt.Errorf("unknown action type: %v", act.OpType)
}
// description & content
{
switch act.OpType {
case activities_model.ActionCommitRepo, activities_model.ActionMirrorSyncPush:
push := templates.ActionContent2Commits(act)
repoLink := act.GetRepoAbsoluteLink()
for _, commit := range push.Commits {
if len(desc) != 0 {
desc += "\n\n"
}
desc += fmt.Sprintf("<a href=\"%s\">%s</a>\n%s",
html.EscapeString(fmt.Sprintf("%s/commit/%s", act.GetRepoAbsoluteLink(), commit.Sha1)),
commit.Sha1,
templates.RenderCommitMessage(ctx, commit.Message, repoLink, nil),
)
}
if push.Len > 1 {
link = &feeds.Link{Href: fmt.Sprintf("%s/%s", setting.AppSubURL, push.CompareURL)}
} else if push.Len == 1 {
link = &feeds.Link{Href: fmt.Sprintf("%s/commit/%s", act.GetRepoAbsoluteLink(), push.Commits[0].Sha1)}
}
case activities_model.ActionCreateIssue, activities_model.ActionCreatePullRequest:
desc = strings.Join(act.GetIssueInfos(), "#")
content = renderMarkdown(ctx, act, act.GetIssueContent())
case activities_model.ActionCommentIssue, activities_model.ActionApprovePullRequest, activities_model.ActionRejectPullRequest, activities_model.ActionCommentPull:
desc = act.GetIssueTitle()
comment := act.GetIssueInfos()[1]
if len(comment) != 0 {
desc += "\n\n" + renderMarkdown(ctx, act, comment)
}
case activities_model.ActionMergePullRequest:
desc = act.GetIssueInfos()[1]
case activities_model.ActionCloseIssue, activities_model.ActionReopenIssue, activities_model.ActionClosePullRequest, activities_model.ActionReopenPullRequest:
desc = act.GetIssueTitle()
case activities_model.ActionPullReviewDismissed:
desc = ctx.Tr("action.review_dismissed_reason") + "\n\n" + act.GetIssueInfos()[2]
}
}
if len(content) == 0 {
content = desc
}
items = append(items, &feeds.Item{
Title: title,
Link: link,
Description: desc,
Author: &feeds.Author{
Name: act.ActUser.DisplayName(),
Email: act.ActUser.GetEmail(),
},
Id: strconv.FormatInt(act.ID, 10),
Created: act.CreatedUnix.AsTime(),
Content: content,
})
}
return items, err
}
// GetFeedType return if it is a feed request and altered name and feed type.
func GetFeedType(name string, req *http.Request) (bool, string, string) {
if strings.HasSuffix(name, ".rss") ||
strings.Contains(req.Header.Get("Accept"), "application/rss+xml") {
return true, strings.TrimSuffix(name, ".rss"), "rss"
}
if strings.HasSuffix(name, ".atom") ||
strings.Contains(req.Header.Get("Accept"), "application/atom+xml") {
return true, strings.TrimSuffix(name, ".atom"), "atom"
}
return false, name, ""
}
Make rss/atom identifier globally unique (#21550)
This field should be globally unique.
[RSS
reference](https://www.rssboard.org/rss-specification#ltguidgtSubelementOfLtitemgt).
### Before
```xml
<item>
<title>abc opened issue <a href="https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg/issues/1">abc/defg#1</a></title>
<link>https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg/issues/1</link>
<description>1#Colors</description>
<content:encoded><![CDATA[<p><code>#FF0000<span class="color-preview" style="background-color: #FF0000"></span></code></p>
]]></content:encoded>
<author>abc</author>
<guid>2</guid>
<pubDate>Mon, 17 Oct 2022 16:06:08 +0000</pubDate>
</item>
<item>
<title>abc created repository <a href="https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg">abc/defg</a></title>
<link>https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg</link>
<description></description>
<author>abc</author>
<guid>1</guid>
<pubDate>Mon, 17 Oct 2022 16:05:43 +0000</pubDate>
</item>
```
### After
```xml
<item>
<title>abc opened issue <a href="https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg/issues/1">abc/defg#1</a></title>
<link>https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg/issues/1</link>
<description>1#Colors</description>
<content:encoded><![CDATA[<p><code>#FF0000<span class="color-preview" style="background-color: #FF0000"></span></code></p>
]]></content:encoded>
<author>abc</author>
<guid>2: https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg/issues/1</guid>
<pubDate>Mon, 17 Oct 2022 16:06:08 +0000</pubDate>
</item>
<item>
<title>abc created repository <a href="https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg">abc/defg</a></title>
<link>https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg</link>
<description></description>
<author>abc</author>
<guid>1: https://3000-yardenshoham-gitea-3pzuhkduf6t.ws-eu72.gitpod.io/abc/defg</guid>
<pubDate>Mon, 17 Oct 2022 16:05:43 +0000</pubDate>
</item>
```
* Fixes #21542
Signed-off-by: Yarden Shoham <0b3acf1045a60ccc22a11aaedae8f612108f28a6@gmail.com>
Co-authored-by: Lauris BH <f3041c4c693c07149804ea655e75ee8bb856bd26@nix.lv>
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package feed
import (
"fmt"
"html"
"net/http"
"net/url"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/util"
"github.com/gorilla/feeds"
)
func toBranchLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/branch/" + util.PathEscapeSegments(act.GetBranch())
}
func toTagLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/tag/" + util.PathEscapeSegments(act.GetTag())
}
func toIssueLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/issues/" + url.PathEscape(act.GetIssueInfos()[0])
}
func toPullLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/pulls/" + url.PathEscape(act.GetIssueInfos()[0])
}
func toSrcLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/src/" + util.PathEscapeSegments(act.GetBranch())
}
func toReleaseLink(act *activities_model.Action) string {
return act.GetRepoAbsoluteLink() + "/releases/tag/" + util.PathEscapeSegments(act.GetBranch())
}
// renderMarkdown creates a minimal markdown render context from an action.
// If rendering fails, the original markdown text is returned
func renderMarkdown(ctx *context.Context, act *activities_model.Action, content string) string {
markdownCtx := &markup.RenderContext{
Ctx: ctx,
URLPrefix: act.GetRepoLink(),
Type: markdown.MarkupName,
Metas: map[string]string{
"user": act.GetRepoUserName(),
"repo": act.GetRepoName(),
},
}
markdown, err := markdown.RenderString(markdownCtx, content)
if err != nil {
return content
}
return markdown
}
// feedActionsToFeedItems convert gitea's Action feed to feeds Item
func feedActionsToFeedItems(ctx *context.Context, actions activities_model.ActionList) (items []*feeds.Item, err error) {
for _, act := range actions {
act.LoadActUser()
var content, desc, title string
link := &feeds.Link{Href: act.GetCommentLink()}
// title
title = act.ActUser.DisplayName() + " "
switch act.OpType {
case activities_model.ActionCreateRepo:
title += ctx.TrHTMLEscapeArgs("action.create_repo", act.GetRepoAbsoluteLink(), act.ShortRepoPath())
link.Href = act.GetRepoAbsoluteLink()
case activities_model.ActionRenameRepo:
title += ctx.TrHTMLEscapeArgs("action.rename_repo", act.GetContent(), act.GetRepoAbsoluteLink(), act.ShortRepoPath())
link.Href = act.GetRepoAbsoluteLink()
case activities_model.ActionCommitRepo:
link.Href = toBranchLink(act)
if len(act.Content) != 0 {
title += ctx.TrHTMLEscapeArgs("action.commit_repo", act.GetRepoAbsoluteLink(), link.Href, act.GetBranch(), act.ShortRepoPath())
} else {
title += ctx.TrHTMLEscapeArgs("action.create_branch", act.GetRepoAbsoluteLink(), link.Href, act.GetBranch(), act.ShortRepoPath())
}
case activities_model.ActionCreateIssue:
link.Href = toIssueLink(act)
title += ctx.TrHTMLEscapeArgs("action.create_issue", link.Href, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCreatePullRequest:
link.Href = toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.create_pull_request", link.Href, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionTransferRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.transfer_repo", act.GetContent(), act.GetRepoAbsoluteLink(), act.ShortRepoPath())
case activities_model.ActionPushTag:
link.Href = toTagLink(act)
title += ctx.TrHTMLEscapeArgs("action.push_tag", act.GetRepoAbsoluteLink(), link.Href, act.GetTag(), act.ShortRepoPath())
case activities_model.ActionCommentIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.comment_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionMergePullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.merge_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCloseIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.close_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionReopenIssue:
issueLink := toIssueLink(act)
if link.Href == "#" {
link.Href = issueLink
}
title += ctx.TrHTMLEscapeArgs("action.reopen_issue", issueLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionClosePullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.close_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionReopenPullRequest:
pullLink := toPullLink(act)
if link.Href == "#" {
link.Href = pullLink
}
title += ctx.TrHTMLEscapeArgs("action.reopen_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionDeleteTag:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.delete_tag", act.GetRepoAbsoluteLink(), act.GetTag(), act.ShortRepoPath())
case activities_model.ActionDeleteBranch:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.delete_branch", act.GetRepoAbsoluteLink(), html.EscapeString(act.GetBranch()), act.ShortRepoPath())
case activities_model.ActionMirrorSyncPush:
srcLink := toSrcLink(act)
if link.Href == "#" {
link.Href = srcLink
}
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_push", act.GetRepoAbsoluteLink(), srcLink, act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionMirrorSyncCreate:
srcLink := toSrcLink(act)
if link.Href == "#" {
link.Href = srcLink
}
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_create", act.GetRepoAbsoluteLink(), srcLink, act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionMirrorSyncDelete:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.mirror_sync_delete", act.GetRepoAbsoluteLink(), act.GetBranch(), act.ShortRepoPath())
case activities_model.ActionApprovePullRequest:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.approve_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionRejectPullRequest:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.reject_pull_request", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionCommentPull:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.comment_pull", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath())
case activities_model.ActionPublishRelease:
releaseLink := toReleaseLink(act)
if link.Href == "#" {
link.Href = releaseLink
}
title += ctx.TrHTMLEscapeArgs("action.publish_release", act.GetRepoAbsoluteLink(), releaseLink, act.ShortRepoPath(), act.Content)
case activities_model.ActionPullReviewDismissed:
pullLink := toPullLink(act)
title += ctx.TrHTMLEscapeArgs("action.review_dismissed", pullLink, act.GetIssueInfos()[0], act.ShortRepoPath(), act.GetIssueInfos()[1])
case activities_model.ActionStarRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.starred_repo", act.GetRepoAbsoluteLink(), act.GetRepoPath())
case activities_model.ActionWatchRepo:
link.Href = act.GetRepoAbsoluteLink()
title += ctx.TrHTMLEscapeArgs("action.watched_repo", act.GetRepoAbsoluteLink(), act.GetRepoPath())
default:
return nil, fmt.Errorf("unknown action type: %v", act.OpType)
}
// description & content
{
switch act.OpType {
case activities_model.ActionCommitRepo, activities_model.ActionMirrorSyncPush:
push := templates.ActionContent2Commits(act)
repoLink := act.GetRepoAbsoluteLink()
for _, commit := range push.Commits {
if len(desc) != 0 {
desc += "\n\n"
}
desc += fmt.Sprintf("<a href=\"%s\">%s</a>\n%s",
html.EscapeString(fmt.Sprintf("%s/commit/%s", act.GetRepoAbsoluteLink(), commit.Sha1)),
commit.Sha1,
templates.RenderCommitMessage(ctx, commit.Message, repoLink, nil),
)
}
if push.Len > 1 {
link = &feeds.Link{Href: fmt.Sprintf("%s/%s", setting.AppSubURL, push.CompareURL)}
} else if push.Len == 1 {
link = &feeds.Link{Href: fmt.Sprintf("%s/commit/%s", act.GetRepoAbsoluteLink(), push.Commits[0].Sha1)}
}
case activities_model.ActionCreateIssue, activities_model.ActionCreatePullRequest:
desc = strings.Join(act.GetIssueInfos(), "#")
content = renderMarkdown(ctx, act, act.GetIssueContent())
case activities_model.ActionCommentIssue, activities_model.ActionApprovePullRequest, activities_model.ActionRejectPullRequest, activities_model.ActionCommentPull:
desc = act.GetIssueTitle()
comment := act.GetIssueInfos()[1]
if len(comment) != 0 {
desc += "\n\n" + renderMarkdown(ctx, act, comment)
}
case activities_model.ActionMergePullRequest:
desc = act.GetIssueInfos()[1]
case activities_model.ActionCloseIssue, activities_model.ActionReopenIssue, activities_model.ActionClosePullRequest, activities_model.ActionReopenPullRequest:
desc = act.GetIssueTitle()
case activities_model.ActionPullReviewDismissed:
desc = ctx.Tr("action.review_dismissed_reason") + "\n\n" + act.GetIssueInfos()[2]
}
}
if len(content) == 0 {
content = desc
}
items = append(items, &feeds.Item{
Title: title,
Link: link,
Description: desc,
Author: &feeds.Author{
Name: act.ActUser.DisplayName(),
Email: act.ActUser.GetEmail(),
},
Id: fmt.Sprintf("%v: %v", strconv.FormatInt(act.ID, 10), link.Href),
Created: act.CreatedUnix.AsTime(),
Content: content,
})
}
return items, err
}
// GetFeedType return if it is a feed request and altered name and feed type.
func GetFeedType(name string, req *http.Request) (bool, string, string) {
if strings.HasSuffix(name, ".rss") ||
strings.Contains(req.Header.Get("Accept"), "application/rss+xml") {
return true, strings.TrimSuffix(name, ".rss"), "rss"
}
if strings.HasSuffix(name, ".atom") ||
strings.Contains(req.Header.Get("Accept"), "application/atom+xml") {
return true, strings.TrimSuffix(name, ".atom"), "atom"
}
return false, name, ""
}
|
package echo
import (
"encoding/xml"
"fmt"
"html/template"
"strconv"
"sync"
)
var (
mutex sync.RWMutex
emptyHTML = template.HTML(``)
emptyJS = template.JS(``)
emptyCSS = template.CSS(``)
emptyHTMLAttr = template.HTMLAttr(``)
emptyStore = Store{}
)
type Store map[string]interface{}
func (s Store) Set(key string, value interface{}) Store {
mutex.Lock()
s[key] = value
mutex.Unlock()
return s
}
func (s Store) Get(key string, defaults ...interface{}) interface{} {
mutex.RLock()
defer mutex.RUnlock()
if v, y := s[key]; y {
if v == nil && len(defaults) > 0 {
return defaults[0]
}
return v
}
if len(defaults) > 0 {
return defaults[0]
}
return nil
}
func (s Store) String(key string, defaults ...interface{}) string {
if v, y := s.Get(key, defaults...).(string); y {
return v
}
return ``
}
func (s Store) HTML(key string, defaults ...interface{}) template.HTML {
val := s.Get(key, defaults...)
if v, y := val.(template.HTML); y {
return v
}
if v, y := val.(string); y {
return template.HTML(v)
}
return emptyHTML
}
func (s Store) HTMLAttr(key string, defaults ...interface{}) template.HTMLAttr {
val := s.Get(key, defaults...)
if v, y := val.(template.HTMLAttr); y {
return v
}
if v, y := val.(string); y {
return template.HTMLAttr(v)
}
return emptyHTMLAttr
}
func (s Store) JS(key string, defaults ...interface{}) template.JS {
val := s.Get(key, defaults...)
if v, y := val.(template.JS); y {
return v
}
if v, y := val.(string); y {
return template.JS(v)
}
return emptyJS
}
func (s Store) CSS(key string, defaults ...interface{}) template.CSS {
val := s.Get(key, defaults...)
if v, y := val.(template.CSS); y {
return v
}
if v, y := val.(string); y {
return template.CSS(v)
}
return emptyCSS
}
func (s Store) Bool(key string, defaults ...interface{}) bool {
if v, y := s.Get(key, defaults...).(bool); y {
return v
}
return false
}
func (s Store) Float64(key string, defaults ...interface{}) float64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case float64:
return v
case int64:
return float64(v)
case uint64:
return float64(v)
case float32:
return float64(v)
case int32:
return float64(v)
case uint32:
return float64(v)
case int:
return float64(v)
case uint:
return float64(v)
case string:
i, _ := strconv.ParseFloat(v, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseFloat(s, 64)
return i
}
}
func (s Store) Float32(key string, defaults ...interface{}) float32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case float32:
return v
case int32:
return float32(v)
case uint32:
return float32(v)
case string:
f, _ := strconv.ParseFloat(v, 32)
return float32(f)
default:
s := fmt.Sprint(val)
f, _ := strconv.ParseFloat(s, 32)
return float32(f)
}
}
func (s Store) Int8(key string, defaults ...interface{}) int8 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int8:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 8)
return int8(i)
default:
s := fmt.Sprint(val)
i, _ := strconv.ParseInt(s, 10, 8)
return int8(i)
}
}
func (s Store) Int16(key string, defaults ...interface{}) int16 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int16:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 16)
return int16(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 16)
return int16(i)
}
}
func (s Store) Int(key string, defaults ...interface{}) int {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int:
return v
case string:
i, _ := strconv.Atoi(v)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.Atoi(s)
return i
}
}
func (s Store) Int32(key string, defaults ...interface{}) int32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int32:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 32)
return int32(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 32)
return int32(i)
}
}
func (s Store) Int64(key string, defaults ...interface{}) int64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int64:
return v
case int32:
return int64(v)
case uint32:
return int64(v)
case int:
return int64(v)
case uint:
return int64(v)
case string:
i, _ := strconv.ParseInt(v, 10, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 64)
return i
}
}
func (s Store) Decr(key string, n int64, defaults ...interface{}) int64 {
v, _ := s.Get(key, defaults...).(int64)
v -= n
s.Set(key, v)
return v
}
func (s Store) Incr(key string, n int64, defaults ...interface{}) int64 {
v, _ := s.Get(key, defaults...).(int64)
v += n
s.Set(key, v)
return v
}
func (s Store) Uint8(key string, defaults ...interface{}) uint8 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint8:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 8)
return uint8(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 8)
return uint8(i)
}
}
func (s Store) Uint16(key string, defaults ...interface{}) uint16 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint16:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 16)
return uint16(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 16)
return uint16(i)
}
}
func (s Store) Uint(key string, defaults ...interface{}) uint {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 32)
return uint(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 32)
return uint(i)
}
}
func (s Store) Uint32(key string, defaults ...interface{}) uint32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint32:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 32)
return uint32(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 32)
return uint32(i)
}
}
func (s Store) Uint64(key string, defaults ...interface{}) uint64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint64:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 64)
return i
}
}
func (s Store) Store(key string, defaults ...interface{}) Store {
val := s.Get(key, defaults...)
switch v := val.(type) {
case Store:
return v
case map[string]interface{}:
return Store(v)
case map[string]uint64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]uint:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]uint32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]float32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]float64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]string:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
default:
return emptyStore
}
}
func (s Store) Delete(keys ...string) {
mutex.Lock()
for _, key := range keys {
if _, y := s[key]; y {
delete(s, key)
}
}
mutex.Unlock()
}
// MarshalXML allows type Store to be used with xml.Marshal
func (s Store) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if start.Name.Local == `Store` {
start.Name.Local = `Map`
}
if err := e.EncodeToken(start); err != nil {
return err
}
for key, value := range s {
elem := xml.StartElement{
Name: xml.Name{Space: ``, Local: key},
Attr: []xml.Attr{},
}
if err := e.EncodeElement(value, elem); err != nil {
return err
}
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
// ToData conversion to *RawData
func (s Store) ToData() *RawData {
var info, zone, data interface{}
if v, y := s["Data"]; y {
data = v
}
if v, y := s["Zone"]; y {
zone = v
}
if v, y := s["Info"]; y {
info = v
}
var code State
if v, y := s["Code"]; y {
if c, y := v.(int); y {
code = State(c)
} else if c, y := v.(State); y {
code = c
}
}
return &RawData{
Code: code,
Info: info,
Zone: zone,
Data: data,
}
}
func (s Store) DeepMerge(source Store) {
for k, value := range source {
var (
destValue interface{}
ok bool
)
if destValue, ok = s[k]; !ok {
s[k] = value
continue
}
sourceM, sourceOk := value.(H)
destM, destOk := destValue.(H)
if sourceOk && sourceOk == destOk {
destM.DeepMerge(sourceM)
} else {
s[k] = value
}
}
}
func (s Store) Clone() Store {
r := make(Store)
for k, value := range s {
switch v := value.(type) {
case Store:
r[k] = v.Clone()
case []Store:
vCopy := make([]Store, len(v))
for i, row := range v {
vCopy[i] = row.Clone()
}
r[k] = vCopy
default:
r[k] = value
}
}
return r
}
update
package echo
import (
"encoding/xml"
"fmt"
"html/template"
"strconv"
"sync"
)
var (
mutex sync.RWMutex
emptyHTML = template.HTML(``)
emptyJS = template.JS(``)
emptyCSS = template.CSS(``)
emptyHTMLAttr = template.HTMLAttr(``)
emptyStore = Store{}
)
type Store map[string]interface{}
func (s Store) Set(key string, value interface{}) Store {
mutex.Lock()
s[key] = value
mutex.Unlock()
return s
}
func (s Store) Has(key string) bool {
mutex.RLock()
defer mutex.RUnlock()
_, y := s[key]
return y
}
func (s Store) Get(key string, defaults ...interface{}) interface{} {
mutex.RLock()
defer mutex.RUnlock()
if v, y := s[key]; y {
if v == nil && len(defaults) > 0 {
return defaults[0]
}
return v
}
if len(defaults) > 0 {
return defaults[0]
}
return nil
}
func (s Store) String(key string, defaults ...interface{}) string {
if v, y := s.Get(key, defaults...).(string); y {
return v
}
return ``
}
func (s Store) HTML(key string, defaults ...interface{}) template.HTML {
val := s.Get(key, defaults...)
if v, y := val.(template.HTML); y {
return v
}
if v, y := val.(string); y {
return template.HTML(v)
}
return emptyHTML
}
func (s Store) HTMLAttr(key string, defaults ...interface{}) template.HTMLAttr {
val := s.Get(key, defaults...)
if v, y := val.(template.HTMLAttr); y {
return v
}
if v, y := val.(string); y {
return template.HTMLAttr(v)
}
return emptyHTMLAttr
}
func (s Store) JS(key string, defaults ...interface{}) template.JS {
val := s.Get(key, defaults...)
if v, y := val.(template.JS); y {
return v
}
if v, y := val.(string); y {
return template.JS(v)
}
return emptyJS
}
func (s Store) CSS(key string, defaults ...interface{}) template.CSS {
val := s.Get(key, defaults...)
if v, y := val.(template.CSS); y {
return v
}
if v, y := val.(string); y {
return template.CSS(v)
}
return emptyCSS
}
func (s Store) Bool(key string, defaults ...interface{}) bool {
if v, y := s.Get(key, defaults...).(bool); y {
return v
}
return false
}
func (s Store) Float64(key string, defaults ...interface{}) float64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case float64:
return v
case int64:
return float64(v)
case uint64:
return float64(v)
case float32:
return float64(v)
case int32:
return float64(v)
case uint32:
return float64(v)
case int:
return float64(v)
case uint:
return float64(v)
case string:
i, _ := strconv.ParseFloat(v, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseFloat(s, 64)
return i
}
}
func (s Store) Float32(key string, defaults ...interface{}) float32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case float32:
return v
case int32:
return float32(v)
case uint32:
return float32(v)
case string:
f, _ := strconv.ParseFloat(v, 32)
return float32(f)
default:
s := fmt.Sprint(val)
f, _ := strconv.ParseFloat(s, 32)
return float32(f)
}
}
func (s Store) Int8(key string, defaults ...interface{}) int8 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int8:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 8)
return int8(i)
default:
s := fmt.Sprint(val)
i, _ := strconv.ParseInt(s, 10, 8)
return int8(i)
}
}
func (s Store) Int16(key string, defaults ...interface{}) int16 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int16:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 16)
return int16(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 16)
return int16(i)
}
}
func (s Store) Int(key string, defaults ...interface{}) int {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int:
return v
case string:
i, _ := strconv.Atoi(v)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.Atoi(s)
return i
}
}
func (s Store) Int32(key string, defaults ...interface{}) int32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int32:
return v
case string:
i, _ := strconv.ParseInt(v, 10, 32)
return int32(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 32)
return int32(i)
}
}
func (s Store) Int64(key string, defaults ...interface{}) int64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case int64:
return v
case int32:
return int64(v)
case uint32:
return int64(v)
case int:
return int64(v)
case uint:
return int64(v)
case string:
i, _ := strconv.ParseInt(v, 10, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseInt(s, 10, 64)
return i
}
}
func (s Store) Decr(key string, n int64, defaults ...interface{}) int64 {
v, _ := s.Get(key, defaults...).(int64)
v -= n
s.Set(key, v)
return v
}
func (s Store) Incr(key string, n int64, defaults ...interface{}) int64 {
v, _ := s.Get(key, defaults...).(int64)
v += n
s.Set(key, v)
return v
}
func (s Store) Uint8(key string, defaults ...interface{}) uint8 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint8:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 8)
return uint8(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 8)
return uint8(i)
}
}
func (s Store) Uint16(key string, defaults ...interface{}) uint16 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint16:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 16)
return uint16(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 16)
return uint16(i)
}
}
func (s Store) Uint(key string, defaults ...interface{}) uint {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 32)
return uint(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 32)
return uint(i)
}
}
func (s Store) Uint32(key string, defaults ...interface{}) uint32 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint32:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 32)
return uint32(i)
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 32)
return uint32(i)
}
}
func (s Store) Uint64(key string, defaults ...interface{}) uint64 {
val := s.Get(key, defaults...)
switch v := val.(type) {
case uint64:
return v
case string:
i, _ := strconv.ParseUint(v, 10, 64)
return i
default:
s := fmt.Sprint(v)
i, _ := strconv.ParseUint(s, 10, 64)
return i
}
}
func (s Store) Store(key string, defaults ...interface{}) Store {
val := s.Get(key, defaults...)
switch v := val.(type) {
case Store:
return v
case map[string]interface{}:
return Store(v)
case map[string]uint64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]uint:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]uint32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]int32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]float32:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]float64:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
case map[string]string:
r := Store{}
for k, a := range v {
r[k] = interface{}(a)
}
return r
default:
return emptyStore
}
}
func (s Store) Delete(keys ...string) {
mutex.Lock()
for _, key := range keys {
if _, y := s[key]; y {
delete(s, key)
}
}
mutex.Unlock()
}
// MarshalXML allows type Store to be used with xml.Marshal
func (s Store) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if start.Name.Local == `Store` {
start.Name.Local = `Map`
}
if err := e.EncodeToken(start); err != nil {
return err
}
for key, value := range s {
elem := xml.StartElement{
Name: xml.Name{Space: ``, Local: key},
Attr: []xml.Attr{},
}
if err := e.EncodeElement(value, elem); err != nil {
return err
}
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
// ToData conversion to *RawData
func (s Store) ToData() *RawData {
var info, zone, data interface{}
if v, y := s["Data"]; y {
data = v
}
if v, y := s["Zone"]; y {
zone = v
}
if v, y := s["Info"]; y {
info = v
}
var code State
if v, y := s["Code"]; y {
if c, y := v.(int); y {
code = State(c)
} else if c, y := v.(State); y {
code = c
}
}
return &RawData{
Code: code,
Info: info,
Zone: zone,
Data: data,
}
}
func (s Store) DeepMerge(source Store) {
for k, value := range source {
var (
destValue interface{}
ok bool
)
if destValue, ok = s[k]; !ok {
s[k] = value
continue
}
sourceM, sourceOk := value.(H)
destM, destOk := destValue.(H)
if sourceOk && sourceOk == destOk {
destM.DeepMerge(sourceM)
} else {
s[k] = value
}
}
}
func (s Store) Clone() Store {
r := make(Store)
for k, value := range s {
switch v := value.(type) {
case Store:
r[k] = v.Clone()
case []Store:
vCopy := make([]Store, len(v))
for i, row := range v {
vCopy[i] = row.Clone()
}
r[k] = vCopy
default:
r[k] = value
}
}
return r
}
|
// +build linux
// +build cgo
package shared
import (
"fmt"
"io"
"os"
"strings"
"github.com/gorilla/websocket"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/logger"
)
/*
#include "../shared/netns_getifaddrs.c"
*/
// #cgo CFLAGS: -std=gnu11 -Wvla
import "C"
func NetnsGetifaddrs(initPID int32) (map[string]api.ContainerStateNetwork, error) {
var netnsid_aware C.bool
var ifaddrs *C.struct_netns_ifaddrs
var netnsID C.__s32
if initPID > 0 {
f, err := os.Open(fmt.Sprintf("/proc/%d/ns/net", initPID))
if err != nil {
return nil, err
}
defer f.Close()
netnsID = C.netns_get_nsid(C.__s32(f.Fd()))
if netnsID < 0 {
return nil, fmt.Errorf("Failed to retrieve network namespace id")
}
} else {
netnsID = -1
}
ret := C.netns_getifaddrs(&ifaddrs, netnsID, &netnsid_aware)
if ret < 0 {
return nil, fmt.Errorf("Failed to retrieve network interfaces and addresses")
}
defer C.netns_freeifaddrs(ifaddrs)
if netnsID >= 0 && !netnsid_aware {
return nil, fmt.Errorf("Netlink requests are not fully network namespace id aware")
}
// We're using the interface name as key here but we should really
// switch to the ifindex at some point to handle ip aliasing correctly.
networks := map[string]api.ContainerStateNetwork{}
for addr := ifaddrs; addr != nil; addr = addr.ifa_next {
var address [C.INET6_ADDRSTRLEN]C.char
addNetwork, networkExists := networks[C.GoString(addr.ifa_name)]
if !networkExists {
addNetwork = api.ContainerStateNetwork{
Addresses: []api.ContainerStateNetworkAddress{},
Counters: api.ContainerStateNetworkCounters{},
}
}
if addr.ifa_addr != nil && (addr.ifa_addr.sa_family == C.AF_INET || addr.ifa_addr.sa_family == C.AF_INET6) {
netState := "down"
netType := "unknown"
if (addr.ifa_flags & C.IFF_BROADCAST) > 0 {
netType = "broadcast"
}
if (addr.ifa_flags & C.IFF_LOOPBACK) > 0 {
netType = "loopback"
}
if (addr.ifa_flags & C.IFF_POINTOPOINT) > 0 {
netType = "point-to-point"
}
if (addr.ifa_flags & C.IFF_UP) > 0 {
netState = "up"
}
family := "inet"
if addr.ifa_addr.sa_family == C.AF_INET6 {
family = "inet6"
}
addr_ptr := C.get_addr_ptr(addr.ifa_addr)
if addr_ptr == nil {
return nil, fmt.Errorf("Failed to retrieve valid address pointer")
}
address_str := C.inet_ntop(C.int(addr.ifa_addr.sa_family), addr_ptr, &address[0], C.INET6_ADDRSTRLEN)
if address_str == nil {
return nil, fmt.Errorf("Failed to retrieve address string")
}
if addNetwork.Addresses == nil {
addNetwork.Addresses = []api.ContainerStateNetworkAddress{}
}
goAddrString := C.GoString(address_str)
scope := "global"
if strings.HasPrefix(goAddrString, "127") {
scope = "local"
}
if goAddrString == "::1" {
scope = "local"
}
if strings.HasPrefix(goAddrString, "169.254") {
scope = "link"
}
if strings.HasPrefix(goAddrString, "fe80:") {
scope = "link"
}
address := api.ContainerStateNetworkAddress{}
address.Family = family
address.Address = goAddrString
address.Netmask = fmt.Sprintf("%d", int(addr.ifa_prefixlen))
address.Scope = scope
addNetwork.Addresses = append(addNetwork.Addresses, address)
addNetwork.State = netState
addNetwork.Type = netType
addNetwork.Mtu = int(addr.ifa_mtu)
} else if addr.ifa_addr != nil && addr.ifa_addr.sa_family == C.AF_PACKET {
if (addr.ifa_flags & C.IFF_LOOPBACK) == 0 {
var buf [1024]C.char
hwaddr := C.get_packet_address(addr.ifa_addr, &buf[0], 1024)
if hwaddr == nil {
return nil, fmt.Errorf("Failed to retrieve hardware address")
}
addNetwork.Hwaddr = C.GoString(hwaddr)
}
}
if addr.ifa_stats_type == C.IFLA_STATS64 {
addNetwork.Counters.BytesReceived = int64(addr.ifa_stats64.rx_bytes)
addNetwork.Counters.BytesSent = int64(addr.ifa_stats64.tx_bytes)
addNetwork.Counters.PacketsReceived = int64(addr.ifa_stats64.rx_packets)
addNetwork.Counters.PacketsSent = int64(addr.ifa_stats64.tx_packets)
}
ifName := C.GoString(addr.ifa_name)
networks[ifName] = addNetwork
}
return networks, nil
}
func WebsocketExecMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser, exited chan bool, fd int) (chan bool, chan bool) {
readDone := make(chan bool, 1)
writeDone := make(chan bool, 1)
go defaultWriter(conn, w, writeDone)
go func(conn *websocket.Conn, r io.ReadCloser) {
in := ExecReaderToChannel(r, -1, exited, fd)
for {
buf, ok := <-in
if !ok {
r.Close()
logger.Debugf("sending write barrier")
conn.WriteMessage(websocket.TextMessage, []byte{})
readDone <- true
return
}
w, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
logger.Debugf("Got error getting next writer %s", err)
break
}
_, err = w.Write(buf)
w.Close()
if err != nil {
logger.Debugf("Got err writing %s", err)
break
}
}
closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
conn.WriteMessage(websocket.CloseMessage, closeMsg)
readDone <- true
r.Close()
}(conn, r)
return readDone, writeDone
}
shared/network: Fix reporting of down interfaces
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
// +build linux
// +build cgo
package shared
import (
"fmt"
"io"
"os"
"strings"
"github.com/gorilla/websocket"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/logger"
)
/*
#include "../shared/netns_getifaddrs.c"
*/
// #cgo CFLAGS: -std=gnu11 -Wvla
import "C"
func NetnsGetifaddrs(initPID int32) (map[string]api.ContainerStateNetwork, error) {
var netnsid_aware C.bool
var ifaddrs *C.struct_netns_ifaddrs
var netnsID C.__s32
if initPID > 0 {
f, err := os.Open(fmt.Sprintf("/proc/%d/ns/net", initPID))
if err != nil {
return nil, err
}
defer f.Close()
netnsID = C.netns_get_nsid(C.__s32(f.Fd()))
if netnsID < 0 {
return nil, fmt.Errorf("Failed to retrieve network namespace id")
}
} else {
netnsID = -1
}
ret := C.netns_getifaddrs(&ifaddrs, netnsID, &netnsid_aware)
if ret < 0 {
return nil, fmt.Errorf("Failed to retrieve network interfaces and addresses")
}
defer C.netns_freeifaddrs(ifaddrs)
if netnsID >= 0 && !netnsid_aware {
return nil, fmt.Errorf("Netlink requests are not fully network namespace id aware")
}
// We're using the interface name as key here but we should really
// switch to the ifindex at some point to handle ip aliasing correctly.
networks := map[string]api.ContainerStateNetwork{}
for addr := ifaddrs; addr != nil; addr = addr.ifa_next {
var address [C.INET6_ADDRSTRLEN]C.char
addNetwork, networkExists := networks[C.GoString(addr.ifa_name)]
if !networkExists {
addNetwork = api.ContainerStateNetwork{
Addresses: []api.ContainerStateNetworkAddress{},
Counters: api.ContainerStateNetworkCounters{},
}
}
// Interface flags
netState := "down"
netType := "unknown"
if (addr.ifa_flags & C.IFF_BROADCAST) > 0 {
netType = "broadcast"
}
if (addr.ifa_flags & C.IFF_LOOPBACK) > 0 {
netType = "loopback"
}
if (addr.ifa_flags & C.IFF_POINTOPOINT) > 0 {
netType = "point-to-point"
}
if (addr.ifa_flags & C.IFF_UP) > 0 {
netState = "up"
}
addNetwork.State = netState
addNetwork.Type = netType
addNetwork.Mtu = int(addr.ifa_mtu)
// Addresses
if addr.ifa_addr != nil && (addr.ifa_addr.sa_family == C.AF_INET || addr.ifa_addr.sa_family == C.AF_INET6) {
family := "inet"
if addr.ifa_addr.sa_family == C.AF_INET6 {
family = "inet6"
}
addr_ptr := C.get_addr_ptr(addr.ifa_addr)
if addr_ptr == nil {
return nil, fmt.Errorf("Failed to retrieve valid address pointer")
}
address_str := C.inet_ntop(C.int(addr.ifa_addr.sa_family), addr_ptr, &address[0], C.INET6_ADDRSTRLEN)
if address_str == nil {
return nil, fmt.Errorf("Failed to retrieve address string")
}
if addNetwork.Addresses == nil {
addNetwork.Addresses = []api.ContainerStateNetworkAddress{}
}
goAddrString := C.GoString(address_str)
scope := "global"
if strings.HasPrefix(goAddrString, "127") {
scope = "local"
}
if goAddrString == "::1" {
scope = "local"
}
if strings.HasPrefix(goAddrString, "169.254") {
scope = "link"
}
if strings.HasPrefix(goAddrString, "fe80:") {
scope = "link"
}
address := api.ContainerStateNetworkAddress{}
address.Family = family
address.Address = goAddrString
address.Netmask = fmt.Sprintf("%d", int(addr.ifa_prefixlen))
address.Scope = scope
addNetwork.Addresses = append(addNetwork.Addresses, address)
} else if addr.ifa_addr != nil && addr.ifa_addr.sa_family == C.AF_PACKET {
if (addr.ifa_flags & C.IFF_LOOPBACK) == 0 {
var buf [1024]C.char
hwaddr := C.get_packet_address(addr.ifa_addr, &buf[0], 1024)
if hwaddr == nil {
return nil, fmt.Errorf("Failed to retrieve hardware address")
}
addNetwork.Hwaddr = C.GoString(hwaddr)
}
}
if addr.ifa_stats_type == C.IFLA_STATS64 {
addNetwork.Counters.BytesReceived = int64(addr.ifa_stats64.rx_bytes)
addNetwork.Counters.BytesSent = int64(addr.ifa_stats64.tx_bytes)
addNetwork.Counters.PacketsReceived = int64(addr.ifa_stats64.rx_packets)
addNetwork.Counters.PacketsSent = int64(addr.ifa_stats64.tx_packets)
}
ifName := C.GoString(addr.ifa_name)
networks[ifName] = addNetwork
}
return networks, nil
}
func WebsocketExecMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser, exited chan bool, fd int) (chan bool, chan bool) {
readDone := make(chan bool, 1)
writeDone := make(chan bool, 1)
go defaultWriter(conn, w, writeDone)
go func(conn *websocket.Conn, r io.ReadCloser) {
in := ExecReaderToChannel(r, -1, exited, fd)
for {
buf, ok := <-in
if !ok {
r.Close()
logger.Debugf("sending write barrier")
conn.WriteMessage(websocket.TextMessage, []byte{})
readDone <- true
return
}
w, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
logger.Debugf("Got error getting next writer %s", err)
break
}
_, err = w.Write(buf)
w.Close()
if err != nil {
logger.Debugf("Got err writing %s", err)
break
}
}
closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
conn.WriteMessage(websocket.CloseMessage, closeMsg)
readDone <- true
r.Close()
}(conn, r)
return readDone, writeDone
}
|
package node
import (
"errors"
"github.com/MG-RAST/Shock/shock-server/conf"
"github.com/MG-RAST/Shock/shock-server/db"
"io/ioutil"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"path/filepath"
)
func Initialize() {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
DB.EnsureIndex(mgo.Index{Key: []string{"id"}, Unique: true})
}
func dbDelete(q bson.M) (err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
_, err = DB.RemoveAll(q)
session.Close()
return
}
func dbUpsert(n *Node) (err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
_, err = DB.Upsert(bson.M{"id": n.Id}, &n)
session.Close()
return
}
func dbFind(q bson.M, results *Nodes, options map[string]int) (count int, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
if limit, has := options["limit"]; has {
if offset, has := options["offset"]; has {
query := DB.Find(q)
if count, err = query.Count(); err != nil {
return 0, err
}
err = query.Limit(limit).Skip(offset).All(results)
return
} else {
return 0, errors.New("store.db.Find options limit and offset must be used together")
}
}
err = DB.Find(q).All(results)
session.Close()
return
}
func Load(id string, uuid string) (n *Node, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
n = new(Node)
if err = DB.Find(bson.M{"id": id}).One(&n); err == nil {
rights := n.Acl.Check(uuid)
if !rights["read"] {
return nil, errors.New("User Unauthorized")
}
return n, nil
} else {
return nil, err
}
session.Close()
return
}
func LoadUnauth(id string) (n *Node, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
n = new(Node)
if err = DB.Find(bson.M{"id": id}).One(&n); err == nil {
return n, nil
} else {
return nil, err
}
session.Close()
return
}
func LoadNodes(ids []string) (n Nodes, err error) {
if _, err = dbFind(bson.M{"id": bson.M{"$in": ids}}, &n, nil); err == nil {
return n, err
}
return nil, err
}
func ReloadFromDisk(path string) (err error) {
id := filepath.Base(path)
nbson, err := ioutil.ReadFile(path + "/" + id + ".bson")
if err != nil {
return
}
node := new(Node)
if err = bson.Unmarshal(nbson, &node); err == nil {
if err = dbUpsert(node); err != nil {
return err
}
}
return
}
Added documentation for node initialize
package node
import (
"errors"
"github.com/MG-RAST/Shock/shock-server/conf"
"github.com/MG-RAST/Shock/shock-server/db"
"io/ioutil"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"path/filepath"
)
// Initialize creates a copy of the mongodb connection and then uses that connection to
// create the Nodes collection in mongodb. Then, it ensures that there is a unique index
// on the id key in this collection, creating the index if necessary.
func Initialize() {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
DB.EnsureIndex(mgo.Index{Key: []string{"id"}, Unique: true})
}
func dbDelete(q bson.M) (err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
_, err = DB.RemoveAll(q)
session.Close()
return
}
func dbUpsert(n *Node) (err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
_, err = DB.Upsert(bson.M{"id": n.Id}, &n)
session.Close()
return
}
func dbFind(q bson.M, results *Nodes, options map[string]int) (count int, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
if limit, has := options["limit"]; has {
if offset, has := options["offset"]; has {
query := DB.Find(q)
if count, err = query.Count(); err != nil {
return 0, err
}
err = query.Limit(limit).Skip(offset).All(results)
return
} else {
return 0, errors.New("store.db.Find options limit and offset must be used together")
}
}
err = DB.Find(q).All(results)
session.Close()
return
}
func Load(id string, uuid string) (n *Node, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
n = new(Node)
if err = DB.Find(bson.M{"id": id}).One(&n); err == nil {
rights := n.Acl.Check(uuid)
if !rights["read"] {
return nil, errors.New("User Unauthorized")
}
return n, nil
} else {
return nil, err
}
session.Close()
return
}
func LoadUnauth(id string) (n *Node, err error) {
session := db.Connection.Session.Copy()
DB := session.DB(conf.Conf["mongodb-database"]).C("Nodes")
n = new(Node)
if err = DB.Find(bson.M{"id": id}).One(&n); err == nil {
return n, nil
} else {
return nil, err
}
session.Close()
return
}
func LoadNodes(ids []string) (n Nodes, err error) {
if _, err = dbFind(bson.M{"id": bson.M{"$in": ids}}, &n, nil); err == nil {
return n, err
}
return nil, err
}
func ReloadFromDisk(path string) (err error) {
id := filepath.Base(path)
nbson, err := ioutil.ReadFile(path + "/" + id + ".bson")
if err != nil {
return
}
node := new(Node)
if err = bson.Unmarshal(nbson, &node); err == nil {
if err = dbUpsert(node); err != nil {
return err
}
}
return
}
|
package uic
import (
"encoding/base64"
"encoding/json"
"github.com/Cepave/fe/g"
"github.com/Cepave/fe/http/base"
. "github.com/Cepave/fe/model/uic"
"github.com/Cepave/fe/utils"
"github.com/toolkits/str"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
)
type AuthController struct {
base.BaseController
}
func (this *AuthController) Logout() {
u := this.Ctx.Input.GetData("CurrentUser").(*User)
token := this.Ctx.GetCookie("token")
if len(token) > 0 {
url := g.Config().Api.Logout + "/" + token
log.Println("logout url =", url)
result := sendHttpGetRequest(url)
log.Println("logout result =", result)
this.Ctx.SetCookie("token", "", 0, "/")
this.Ctx.SetCookie("token", "", 0, "/", g.Config().Http.Cookie)
}
RemoveSessionByUid(u.Id)
this.Ctx.SetCookie("sig", "", 0, "/")
this.Ctx.SetCookie("sig", "", 0, "/", g.Config().Http.Cookie)
this.Redirect("/auth/login", 302)
}
func (this *AuthController) LoginGet() {
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
cookieSig := this.Ctx.GetCookie("sig")
if cookieSig == "" {
this.renderLoginPage(appSig, callback)
return
}
sessionObj := ReadSessionBySig(cookieSig)
if sessionObj == nil {
this.renderLoginPage(appSig, callback)
return
}
if int64(sessionObj.Expired) < time.Now().Unix() {
RemoveSessionByUid(sessionObj.Uid)
this.renderLoginPage(appSig, callback)
return
}
if appSig != "" && callback != "" {
this.Redirect(callback, 302)
} else {
this.Redirect("/me/info", 302)
}
}
func (this *AuthController) LoginPost() {
name := this.GetString("name", "")
password := this.GetString("password", "")
if name == "" || password == "" {
this.ServeErrJson("name or password is blank")
return
}
var u *User
ldapEnabled := this.MustGetBool("ldap", false)
if ldapEnabled {
sucess, err := utils.LdapBind(g.Config().Ldap.Addr,
g.Config().Ldap.BaseDN,
g.Config().Ldap.BindDN,
g.Config().Ldap.BindPasswd,
g.Config().Ldap.UserField,
name,
password)
if err != nil {
this.ServeErrJson(err.Error())
return
}
if !sucess {
this.ServeErrJson("name or password error")
return
}
user_attributes, err := utils.Ldapsearch(g.Config().Ldap.Addr,
g.Config().Ldap.BaseDN,
g.Config().Ldap.BindDN,
g.Config().Ldap.BindPasswd,
g.Config().Ldap.UserField,
name,
g.Config().Ldap.Attributes)
userSn := ""
userMail := ""
userTel := ""
if err == nil {
userSn = user_attributes["sn"]
userMail = user_attributes["mail"]
userTel = user_attributes["telephoneNumber"]
}
arr := strings.Split(name, "@")
var userName, userEmail string
if len(arr) == 2 {
userName = arr[0]
userEmail = name
} else {
userName = name
userEmail = userMail
}
u = ReadUserByName(userName)
if u == nil {
// 说明用户不存在
u = &User{
Name: userName,
Passwd: "",
Cnname: userSn,
Phone: userTel,
Email: userEmail,
}
_, err = u.Save()
if err != nil {
this.ServeErrJson("insert user fail " + err.Error())
return
}
}
} else {
u = ReadUserByName(name)
if u == nil {
this.ServeErrJson("no such user")
return
}
if u.Passwd != str.Md5Encode(g.Config().Salt+password) {
this.ServeErrJson("password error")
return
}
}
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
if appSig != "" && callback != "" {
SaveSessionAttrs(u.Id, appSig, int(time.Now().Unix())+3600*24*30)
} else {
this.CreateSession(u.Id, 3600*24*30)
}
this.ServeDataJson(callback)
}
func (this *AuthController) renderLoginPage(sig, callback string) {
this.Data["CanRegister"] = g.Config().CanRegister
this.Data["LdapEnabled"] = g.Config().Ldap.Enabled
this.Data["Sig"] = sig
this.Data["Callback"] = callback
this.Data["Shortcut"] = g.Config().Shortcut
this.TplName = "auth/login.html"
}
func (this *AuthController) RegisterGet() {
this.Data["CanRegister"] = g.Config().CanRegister
this.Data["Shortcut"] = g.Config().Shortcut
this.TplName = "auth/register.html"
}
func (this *AuthController) RegisterPost() {
if !g.Config().CanRegister {
this.ServeErrJson("registration system is not open")
return
}
name := strings.TrimSpace(this.GetString("name", ""))
password := strings.TrimSpace(this.GetString("password", ""))
repeatPassword := strings.TrimSpace(this.GetString("repeat_password", ""))
email := strings.TrimSpace(this.GetString("email", ""))
if password != repeatPassword {
this.ServeErrJson("password not equal the repeart one")
return
}
if !utils.IsUsernameValid(name) {
this.ServeErrJson("name pattern is invalid")
return
}
if ReadUserIdByName(name) > 0 {
this.ServeErrJson("name is already existent")
return
}
lastId, err := InsertRegisterUser(name, str.Md5Encode(g.Config().Salt+password), email)
if err != nil {
this.ServeErrJson("insert user fail " + err.Error())
return
}
this.CreateSession(lastId, 3600*24*30)
this.ServeOKJson()
}
func (this *AuthController) CreateSession(uid int64, maxAge int) int {
sig := utils.GenerateUUID()
expired := int(time.Now().Unix()) + maxAge
SaveSessionAttrs(uid, sig, expired)
this.Ctx.SetCookie("sig", sig, maxAge, "/")
this.Ctx.SetCookie("sig", sig, maxAge, "/", g.Config().Http.Cookie)
return expired
}
/**
* @function name: func (this *AuthController) LoginThirdParty()
* @description: This function returns third party login URL.
* @related issues: OWL-206
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 12/17/2015
* @last modified: 12/17/2015
* @called by: beego.Router("/auth/third-party", &AuthController{}, "post:LoginThirdParty")
* in fe/http/uic/uic_routes.go
*/
func (this *AuthController) LoginThirdParty() {
s := g.Config().Api.Redirect
s = base64.StdEncoding.EncodeToString([]byte(s))
strEncoded := url.QueryEscape(s)
loginUrl := g.Config().Api.Login + "/" + strEncoded
this.ServeDataJson(loginUrl)
}
/**
* @function name: func sendHttpGetRequest(url string) map[string]interface{}
* @description: This function sends GET request to given URL.
* @related issues: OWL-206, OWL-159
* @param: url string
* @return: map[string]interface{}
* @author: Don Hsieh
* @since: 12/17/2015
* @last modified: 12/17/2015
* @called by: func (this *AuthController) LoginWithToken()
* in fe/http/uic/auth_controller.go
*/
func sendHttpGetRequest(url string) map[string]interface{} {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Println("Error =", err.Error())
}
client := &http.Client{}
resp, err := client.Do(req)
defer resp.Body.Close()
if err != nil {
log.Println("Error =", err.Error())
return nil
} else {
body, _ := ioutil.ReadAll(resp.Body)
var nodes = make(map[string]interface{})
if err := json.Unmarshal(body, &nodes); err != nil {
log.Println("Error =", err.Error())
return nil
}
return nodes
}
}
func setUserInfo(nodes map[string]interface{}, userInfo map[string]string) {
if status, ok := nodes["status"]; ok {
if int(status.(float64)) == 1 {
data := nodes["data"].(map[string]interface{})
access_key := data["access_key"].(string)
username := data["username"].(string)
email := data["email"].(string)
log.Println("access_key =", access_key)
userInfo["username"] = username
userInfo["email"] = email
userInfo["access_key"] = access_key
}
}
}
func getUserRole(access_key string) int {
urlRole := g.Config().Api.Role + "/" + access_key
nodes := sendHttpGetRequest(urlRole)
role := -1
if int(nodes["status"].(float64)) == 1 {
permission := nodes["data"]
log.Println("permission =", permission)
if permission == "admin" {
role = 0
} else if permission == "operator" {
role = 1
} else if permission == "observer" {
role = 2
} else if permission == "deny" {
role = 3
}
}
return role
}
/**
* @function name: func (this *AuthController) LoginWithToken()
* @description: This function logins user with third party token.
* @related issues: OWL-247, OWL-206
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 01/08/2016
* @called by: beego.Router("/auth/login/:token", &AuthController{}, "get:LoginWithToken")
* in fe/http/uic/uic_routes.go
*/
func (this *AuthController) LoginWithToken() {
log.Println("func (this *AuthController) LoginWithToken()")
token := this.Ctx.Input.Param(":token")
log.Println("token =", token)
key := g.Config().Api.Key
authUrl := g.Config().Api.Access + "/" + token + "/" + key
nodes := sendHttpGetRequest(authUrl)
if nodes == nil {
nodes = sendHttpGetRequest(authUrl)
}
log.Println("nodes =", nodes)
var userInfo = make(map[string]string)
userInfo["username"] = ""
userInfo["email"] = ""
userInfo["access_key"] = ""
if nodes != nil {
setUserInfo(nodes, userInfo)
}
log.Println("userInfo =", userInfo)
username := userInfo["username"]
if len(username) > 0 {
access_key := userInfo["access_key"]
user := ReadUserByName(username)
if user == nil { // create third party user
InsertRegisterUser(username, "")
user = ReadUserByName(username)
}
if len(user.Passwd) == 0 {
role := getUserRole(access_key)
if role < 1 {
role = getUserRole(access_key)
}
email := userInfo["email"]
user.Email = email
user.Role = role
user.Update()
}
maxAge := 3600 * 24 * 30
this.Ctx.SetCookie("token", access_key, maxAge, "/")
this.Ctx.SetCookie("token", access_key, maxAge, "/", g.Config().Http.Cookie)
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
if appSig != "" && callback != "" {
SaveSessionAttrs(user.Id, appSig, int(time.Now().Unix())+3600*24*30)
} else {
this.CreateSession(user.Id, 3600*24*30)
}
this.Redirect("/me/info", 302)
} else {
// not logged in. redirect to login page.
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
this.renderLoginPage(appSig, callback)
}
}
fixup
package uic
import (
"encoding/base64"
"encoding/json"
"github.com/Cepave/fe/g"
"github.com/Cepave/fe/http/base"
. "github.com/Cepave/fe/model/uic"
"github.com/Cepave/fe/utils"
"github.com/toolkits/str"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
)
type AuthController struct {
base.BaseController
}
func (this *AuthController) Logout() {
u := this.Ctx.Input.GetData("CurrentUser").(*User)
token := this.Ctx.GetCookie("token")
if len(token) > 0 {
url := g.Config().Api.Logout + "/" + token
log.Println("logout url =", url)
result := sendHttpGetRequest(url)
log.Println("logout result =", result)
this.Ctx.SetCookie("token", "", 0, "/")
this.Ctx.SetCookie("token", "", 0, "/", g.Config().Http.Cookie)
}
RemoveSessionByUid(u.Id)
this.Ctx.SetCookie("sig", "", 0, "/")
this.Ctx.SetCookie("sig", "", 0, "/", g.Config().Http.Cookie)
this.Redirect("/auth/login", 302)
}
func (this *AuthController) LoginGet() {
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
cookieSig := this.Ctx.GetCookie("sig")
if cookieSig == "" {
this.renderLoginPage(appSig, callback)
return
}
sessionObj := ReadSessionBySig(cookieSig)
if sessionObj == nil {
this.renderLoginPage(appSig, callback)
return
}
if int64(sessionObj.Expired) < time.Now().Unix() {
RemoveSessionByUid(sessionObj.Uid)
this.renderLoginPage(appSig, callback)
return
}
if appSig != "" && callback != "" {
this.Redirect(callback, 302)
} else {
this.Redirect("/me/info", 302)
}
}
func (this *AuthController) LoginPost() {
name := this.GetString("name", "")
password := this.GetString("password", "")
if name == "" || password == "" {
this.ServeErrJson("name or password is blank")
return
}
var u *User
ldapEnabled := this.MustGetBool("ldap", false)
if ldapEnabled {
sucess, err := utils.LdapBind(g.Config().Ldap.Addr,
g.Config().Ldap.BaseDN,
g.Config().Ldap.BindDN,
g.Config().Ldap.BindPasswd,
g.Config().Ldap.UserField,
name,
password)
if err != nil {
this.ServeErrJson(err.Error())
return
}
if !sucess {
this.ServeErrJson("name or password error")
return
}
user_attributes, err := utils.Ldapsearch(g.Config().Ldap.Addr,
g.Config().Ldap.BaseDN,
g.Config().Ldap.BindDN,
g.Config().Ldap.BindPasswd,
g.Config().Ldap.UserField,
name,
g.Config().Ldap.Attributes)
userSn := ""
userMail := ""
userTel := ""
if err == nil {
userSn = user_attributes["sn"]
userMail = user_attributes["mail"]
userTel = user_attributes["telephoneNumber"]
}
arr := strings.Split(name, "@")
var userName, userEmail string
if len(arr) == 2 {
userName = arr[0]
userEmail = name
} else {
userName = name
userEmail = userMail
}
u = ReadUserByName(userName)
if u == nil {
// 说明用户不存在
u = &User{
Name: userName,
Passwd: "",
Cnname: userSn,
Phone: userTel,
Email: userEmail,
}
_, err = u.Save()
if err != nil {
this.ServeErrJson("insert user fail " + err.Error())
return
}
}
} else {
u = ReadUserByName(name)
if u == nil {
this.ServeErrJson("no such user")
return
}
if u.Passwd != str.Md5Encode(g.Config().Salt+password) {
this.ServeErrJson("password error")
return
}
}
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
if appSig != "" && callback != "" {
SaveSessionAttrs(u.Id, appSig, int(time.Now().Unix())+3600*24*30)
} else {
this.CreateSession(u.Id, 3600*24*30)
}
this.ServeDataJson(callback)
}
func (this *AuthController) renderLoginPage(sig, callback string) {
this.Data["CanRegister"] = g.Config().CanRegister
this.Data["LdapEnabled"] = g.Config().Ldap.Enabled
this.Data["Sig"] = sig
this.Data["Callback"] = callback
this.Data["Shortcut"] = g.Config().Shortcut
this.TplName = "auth/login.html"
}
func (this *AuthController) RegisterGet() {
this.Data["CanRegister"] = g.Config().CanRegister
this.Data["Shortcut"] = g.Config().Shortcut
this.TplName = "auth/register.html"
}
func (this *AuthController) RegisterPost() {
if !g.Config().CanRegister {
this.ServeErrJson("registration system is not open")
return
}
name := strings.TrimSpace(this.GetString("name", ""))
password := strings.TrimSpace(this.GetString("password", ""))
repeatPassword := strings.TrimSpace(this.GetString("repeat_password", ""))
email := strings.TrimSpace(this.GetString("email", ""))
if password != repeatPassword {
this.ServeErrJson("password not equal the repeart one")
return
}
if !utils.IsUsernameValid(name) {
this.ServeErrJson("name pattern is invalid")
return
}
if ReadUserIdByName(name) > 0 {
this.ServeErrJson("name is already existent")
return
}
lastId, err := InsertRegisterUser(name, str.Md5Encode(g.Config().Salt+password), email)
if err != nil {
this.ServeErrJson("insert user fail " + err.Error())
return
}
this.CreateSession(lastId, 3600*24*30)
this.ServeOKJson()
}
func (this *AuthController) CreateSession(uid int64, maxAge int) int {
sig := utils.GenerateUUID()
expired := int(time.Now().Unix()) + maxAge
SaveSessionAttrs(uid, sig, expired)
this.Ctx.SetCookie("sig", sig, maxAge, "/")
this.Ctx.SetCookie("sig", sig, maxAge, "/", g.Config().Http.Cookie)
return expired
}
/**
* @function name: func (this *AuthController) LoginThirdParty()
* @description: This function returns third party login URL.
* @related issues: OWL-206
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 12/17/2015
* @last modified: 12/17/2015
* @called by: beego.Router("/auth/third-party", &AuthController{}, "post:LoginThirdParty")
* in fe/http/uic/uic_routes.go
*/
func (this *AuthController) LoginThirdParty() {
s := g.Config().Api.Redirect
s = base64.StdEncoding.EncodeToString([]byte(s))
strEncoded := url.QueryEscape(s)
loginUrl := g.Config().Api.Login + "/" + strEncoded
this.ServeDataJson(loginUrl)
}
/**
* @function name: func sendHttpGetRequest(url string) map[string]interface{}
* @description: This function sends GET request to given URL.
* @related issues: OWL-206, OWL-159
* @param: url string
* @return: map[string]interface{}
* @author: Don Hsieh
* @since: 12/17/2015
* @last modified: 12/17/2015
* @called by: func (this *AuthController) LoginWithToken()
* in fe/http/uic/auth_controller.go
*/
func sendHttpGetRequest(url string) map[string]interface{} {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Println("Error =", err.Error())
}
client := &http.Client{}
resp, err := client.Do(req)
defer resp.Body.Close()
if err != nil {
log.Println("Error =", err.Error())
return nil
} else {
body, _ := ioutil.ReadAll(resp.Body)
var nodes = make(map[string]interface{})
if err := json.Unmarshal(body, &nodes); err != nil {
log.Println("Error =", err.Error())
return nil
}
return nodes
}
}
func setUserInfo(nodes map[string]interface{}, userInfo map[string]string) {
if status, ok := nodes["status"]; ok {
if int(status.(float64)) == 1 {
data := nodes["data"].(map[string]interface{})
access_key := data["access_key"].(string)
username := data["username"].(string)
email := data["email"].(string)
log.Println("access_key =", access_key)
userInfo["username"] = username
userInfo["email"] = email
userInfo["access_key"] = access_key
}
}
}
func getUserRole(access_key string) int {
urlRole := g.Config().Api.Role + "/" + access_key
nodes := sendHttpGetRequest(urlRole)
role := -1
if int(nodes["status"].(float64)) == 1 {
permission := nodes["data"]
log.Println("permission =", permission)
if permission == "admin" {
role = 0
} else if permission == "operator" {
role = 1
} else if permission == "observer" {
role = 2
} else if permission == "deny" {
role = 3
}
}
return role
}
/**
* @function name: func (this *AuthController) LoginWithToken()
* @description: This function logins user with third party token.
* @related issues: OWL-247, OWL-206
* @param: void
* @return: void
* @author: Don Hsieh
* @since: 12/16/2015
* @last modified: 01/08/2016
* @called by: beego.Router("/auth/login/:token", &AuthController{}, "get:LoginWithToken")
* in fe/http/uic/uic_routes.go
*/
func (this *AuthController) LoginWithToken() {
log.Println("func (this *AuthController) LoginWithToken()")
token := this.Ctx.Input.Param(":token")
log.Println("token =", token)
key := g.Config().Api.Key
authUrl := g.Config().Api.Access + "/" + token + "/" + key
nodes := sendHttpGetRequest(authUrl)
if nodes == nil {
nodes = sendHttpGetRequest(authUrl)
}
log.Println("nodes =", nodes)
var userInfo = make(map[string]string)
userInfo["username"] = ""
userInfo["email"] = ""
userInfo["access_key"] = ""
if nodes != nil {
setUserInfo(nodes, userInfo)
}
log.Println("userInfo =", userInfo)
username := userInfo["username"]
if len(username) > 0 {
access_key := userInfo["access_key"]
user := ReadUserByName(username)
if user == nil { // create third party user
InsertRegisterUser(username, "", "")
user = ReadUserByName(username)
}
if len(user.Passwd) == 0 {
role := getUserRole(access_key)
if role < 1 {
role = getUserRole(access_key)
}
email := userInfo["email"]
user.Email = email
user.Role = role
user.Update()
}
maxAge := 3600 * 24 * 30
this.Ctx.SetCookie("token", access_key, maxAge, "/")
this.Ctx.SetCookie("token", access_key, maxAge, "/", g.Config().Http.Cookie)
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
if appSig != "" && callback != "" {
SaveSessionAttrs(user.Id, appSig, int(time.Now().Unix())+3600*24*30)
} else {
this.CreateSession(user.Id, 3600*24*30)
}
this.Redirect("/me/info", 302)
} else {
// not logged in. redirect to login page.
appSig := this.GetString("sig", "")
callback := this.GetString("callback", "")
this.renderLoginPage(appSig, callback)
}
}
|
package hoverfly
import (
"testing"
)
func TestSetMetadataKey(t *testing.T) {
metaBucket := GetRandomName(10)
md := NewBoltDBMetadata(TestDB, metaBucket)
md.Set("foo", "bar")
expect(t, md.Get("foo"), "bar")
}
more tests for metadata
package hoverfly
import (
"testing"
)
func TestSetMetadataKey(t *testing.T) {
metaBucket := GetRandomName(10)
md := NewBoltDBMetadata(TestDB, metaBucket)
md.Set("foo", "bar")
val, err := md.Get("foo")
expect(t, err, nil)
expect(t, val, "bar")
}
func TestDeleteMetadataKey(t *testing.T) {
metaBucket := GetRandomName(10)
md := NewBoltDBMetadata(TestDB, metaBucket)
md.Set("foo", "bar")
md.Delete("foo")
_, err := md.Get("foo")
refute(t, err, nil)
}
func TestGetAllValues(t *testing.T) {
metaBucket := GetRandomName(10)
md := NewBoltDBMetadata(TestDB, metaBucket)
md.Set("foo", "bar")
md.Set("foo2", "bar2")
md.Set("foo3", "bar3")
values, err := md.GetAll()
expect(t, err, nil)
expect(t, len(values), 3)
expect(t, values["foo"], "bar")
}
func TestDeleteAllData(t *testing.T) {
metaBucket := GetRandomName(10)
md := NewBoltDBMetadata(TestDB, metaBucket)
md.Set("foo", "bar")
md.Set("foo2", "bar2")
md.Set("foo3", "bar3")
md.DeleteData()
values, err := md.GetAll()
expect(t, err, nil)
expect(t, len(values), 0)
}
|
// Copyright © 2016 Zenly <hello@zen.ly>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protein
import (
"context"
"github.com/pkg/errors"
"github.com/rainycape/memcache"
"github.com/znly/protein/failure"
)
// -----------------------------------------------------------------------------
//type TranscoderGetter func(ctx context.Context, schemaUID string) ([]byte, error)
//type TranscoderSetter func(ctx context.Context, schemaUID string, payload []byte) error
// -----------------------------------------------------------------------------
/* Memcached */
// CreateTranscoderGetterMemcached returns a TranscoderGetter suitable for
// querying a binary blob from a memcached-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderGetterMemcached(c *memcache.Client) TranscoderGetter {
return func(ctx context.Context, schemaUID string) ([]byte, error) {
item, err := c.Get(schemaUID)
if err != nil {
if err == memcache.ErrCacheMiss {
return nil, errors.WithStack(failure.ErrSchemaNotFound)
}
return nil, errors.WithStack(err)
}
return item.Value, nil
}
}
// CreateTranscoderSetterMemcached returns a TranscoderSetter suitable for
// setting a binary blob into a memcached-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderSetterMemcached(c *memcache.Client) TranscoderSetter {
return func(ctx context.Context, schemaUID string, payload []byte) error {
return c.Set(&memcache.Item{
Key: schemaUID,
Value: payload,
})
}
}
// -----------------------------------------------------------------------------
/* Redis */
// -----------------------------------------------------------------------------
/* Cassandra */
transcoder:helpers > implemented redis TranscoderGetter/Setter
// Copyright © 2016 Zenly <hello@zen.ly>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protein
import (
"context"
"go.uber.org/zap"
"github.com/garyburd/redigo/redis"
"github.com/pkg/errors"
"github.com/rainycape/memcache"
"github.com/znly/protein/failure"
)
// -----------------------------------------------------------------------------
//type TranscoderGetter func(ctx context.Context, schemaUID string) ([]byte, error)
//type TranscoderSetter func(ctx context.Context, schemaUID string, payload []byte) error
// -----------------------------------------------------------------------------
/* Memcached */
// CreateTranscoderGetterMemcached returns a `TranscoderGetter` suitable for
// querying a binary blob from a memcached-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderGetterMemcached(c *memcache.Client) TranscoderGetter {
return func(ctx context.Context, schemaUID string) ([]byte, error) {
item, err := c.Get(schemaUID)
if err != nil {
if err == memcache.ErrCacheMiss {
return nil, errors.WithStack(failure.ErrSchemaNotFound)
}
return nil, errors.WithStack(err)
}
return item.Value, nil
}
}
// CreateTranscoderSetterMemcached returns a `TranscoderSetter` suitable for
// setting a binary blob into a memcached-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderSetterMemcached(c *memcache.Client) TranscoderSetter {
return func(ctx context.Context, schemaUID string, payload []byte) error {
return c.Set(&memcache.Item{
Key: schemaUID,
Value: payload,
})
}
}
// -----------------------------------------------------------------------------
/* Redis */
// CreateTranscoderGetterRedis returns a `TranscoderGetter` suitable for
// querying a binary blob from a redis-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderGetterRedis(p *redis.Pool) TranscoderGetter {
return func(ctx context.Context, schemaUID string) ([]byte, error) {
c := p.Get() // avoid defer()
b, err := redis.Bytes(c.Do("GET", schemaUID))
if err := c.Close(); err != nil {
zap.L().Error(err.Error())
}
if err != nil {
if err == redis.ErrNil {
return nil, errors.WithStack(failure.ErrSchemaNotFound)
}
return nil, errors.WithStack(err)
}
return b, nil
}
}
// CreateTranscoderSetterRedis returns a `TranscoderSetter` suitable for
// setting a binary blob into a redis-compatible store.
//
// The specified context will be ignored.
func CreateTranscoderSetterRedis(p *redis.Pool) TranscoderSetter {
return func(ctx context.Context, schemaUID string, payload []byte) error {
c := p.Get() // avoid defer()
_, err := c.Do("SET", schemaUID, payload)
if err := c.Close(); err != nil {
zap.L().Error(err.Error())
}
return errors.WithStack(err)
}
}
// -----------------------------------------------------------------------------
/* Cassandra */
|
package translator
import (
"code.google.com/p/go.tools/go/exact"
"code.google.com/p/go.tools/go/types"
"fmt"
"go/ast"
"go/token"
"strings"
)
var ReservedKeywords = []string{"arguments", "class", "delete", "eval", "export", "false", "implements", "interface", "in", "let", "new", "package", "private", "protected", "public", "static", "this", "true", "try", "yield"}
type ErrorList []error
func (err ErrorList) Error() string {
return err[0].Error()
}
type PkgContext struct {
pkg *types.Package
info *types.Info
pkgVars map[string]string
objectVars map[types.Object]string
usedVarNames []string
functionSig *types.Signature
resultNames []ast.Expr
postLoopStmt ast.Stmt
output []byte
indentation int
delayedLines []byte
}
func (c *PkgContext) Write(b []byte) (int, error) {
c.output = append(c.output, b...)
return len(b), nil
}
func (c *PkgContext) Printf(format string, values ...interface{}) {
c.Write([]byte(strings.Repeat("\t", c.indentation)))
fmt.Fprintf(c, format, values...)
c.Write([]byte{'\n'})
c.Write(c.delayedLines)
c.delayedLines = nil
}
func (c *PkgContext) Indent(f func()) {
c.indentation += 1
f()
c.indentation -= 1
}
func (c *PkgContext) CatchOutput(f func()) []byte {
origoutput := c.output
c.output = nil
f()
catched := c.output
c.output = origoutput
return catched
}
func (c *PkgContext) Delayed(f func()) {
c.delayedLines = c.CatchOutput(f)
}
func TranslatePackage(importPath string, files []*ast.File, fileSet *token.FileSet, config *types.Config) ([]byte, error) {
info := &types.Info{
Types: make(map[ast.Expr]types.Type),
Values: make(map[ast.Expr]exact.Value),
Objects: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
var errList ErrorList
var previousErr error
config.Error = func(err error) {
if previousErr != nil && previousErr.Error() == err.Error() {
return
}
errList = append(errList, err)
previousErr = err
}
typesPkg, err := config.Check(importPath, fileSet, files, info)
if errList != nil {
return nil, errList
}
if err != nil {
return nil, err
}
config.Packages[importPath] = typesPkg
c := &PkgContext{
pkg: typesPkg,
info: info,
pkgVars: make(map[string]string),
objectVars: make(map[types.Object]string),
usedVarNames: ReservedKeywords,
}
functionsByType := make(map[types.Type][]*ast.FuncDecl)
functionsByObject := make(map[types.Object]*ast.FuncDecl)
var typeSpecs []*ast.TypeSpec
var valueSpecs []*ast.ValueSpec
for _, file := range files {
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
sig := c.info.Objects[d.Name].(*types.Func).Type().(*types.Signature)
var recvType types.Type
if sig.Recv() != nil {
recvType = sig.Recv().Type()
if ptr, isPtr := recvType.(*types.Pointer); isPtr {
recvType = ptr.Elem()
}
}
functionsByType[recvType] = append(functionsByType[recvType], d)
o := c.info.Objects[d.Name]
functionsByObject[o] = d
if sig.Recv() == nil {
c.objectName(o) // register toplevel name
}
case *ast.GenDecl:
switch d.Tok {
case token.TYPE:
for _, spec := range d.Specs {
s := spec.(*ast.TypeSpec)
typeSpecs = append(typeSpecs, s)
c.objectName(c.info.Objects[s.Name]) // register toplevel name
}
case token.CONST, token.VAR:
for _, spec := range d.Specs {
s := spec.(*ast.ValueSpec)
valueSpecs = append(valueSpecs, s)
for _, name := range s.Names {
if !isUnderscore(name) {
c.objectName(c.info.Objects[name]) // register toplevel name
}
}
}
}
}
}
}
return c.CatchOutput(func() {
c.Indent(func() {
c.Printf("var Go$pkg = {};")
for _, importedPkg := range typesPkg.Imports() {
varName := c.newVariable(importedPkg.Name())
c.Printf(`var %s = Go$packages["%s"];`, varName, importedPkg.Path())
c.pkgVars[importedPkg.Path()] = varName
}
// types and their functions
for _, spec := range typeSpecs {
obj := c.info.Objects[spec.Name]
typeName := c.objectName(obj)
c.Printf("var %s;", typeName)
c.translateSpec(spec)
for _, fun := range functionsByType[obj.Type()] {
funName := fun.Name.Name
jsCode, _ := typesPkg.Scope().Lookup("js_" + typeName + "_" + funName).(*types.Const)
if jsCode != nil {
n := c.usedVarNames
c.Printf("%s.prototype.%s = function(%s) {\n%s\n};", typeName, funName, c.translateParams(fun.Type), exact.StringVal(jsCode.Val()))
c.usedVarNames = n
continue
}
_, isStruct := obj.Type().Underlying().(*types.Struct)
c.translateFunction(typeName, isStruct, fun)
}
c.Printf("Go$pkg.%s = %s;", typeName, typeName)
}
// package functions
for _, fun := range functionsByType[nil] {
name := fun.Name.Name
jsCode, _ := typesPkg.Scope().Lookup("js_" + name).(*types.Const)
if jsCode != nil {
n := c.usedVarNames
c.Printf("var %s = function(%s) {\n%s\n};", name, c.translateParams(fun.Type), exact.StringVal(jsCode.Val()))
c.usedVarNames = n
continue
}
if fun.Body == nil {
c.Printf(`var %s = function() { throw new Go$Panic("Native function not implemented: %s"); };`, name, name)
continue
}
funcLit := &ast.FuncLit{
Type: fun.Type,
Body: &ast.BlockStmt{
List: fun.Body.List,
},
}
funType := c.info.Objects[fun.Name].Type()
c.info.Types[fun.Name] = funType
c.info.Types[funcLit] = funType
c.Printf("var %s = %s;", c.translateExpr(fun.Name), c.translateExpr(funcLit))
}
// constants and variables in dependency aware order
var specs []*ast.ValueSpec
pendingObjects := make(map[types.Object]bool)
for _, spec := range valueSpecs {
for i, name := range spec.Names {
if strings.HasPrefix(name.Name, "js_") {
continue
}
var values []ast.Expr
switch o := c.info.Objects[name].(type) {
case *types.Var:
if i < len(spec.Values) {
values = []ast.Expr{spec.Values[i]}
}
case *types.Const:
id := ast.NewIdent("")
c.info.Types[id] = o.Type()
c.info.Values[id] = o.Val()
values = []ast.Expr{id}
default:
panic("")
}
specs = append(specs, &ast.ValueSpec{
Names: []*ast.Ident{name},
Type: spec.Type,
Values: values,
})
pendingObjects[c.info.Objects[spec.Names[0]]] = true
}
}
complete := false
for !complete {
complete = true
for i, spec := range specs {
if spec == nil {
continue
}
if spec.Values != nil {
v := IsReadyVisitor{info: c.info, functions: functionsByObject, pendingObjects: pendingObjects, isReady: true}
ast.Walk(&v, spec.Values[0])
if !v.isReady {
complete = false
continue
}
}
c.translateSpec(spec)
delete(pendingObjects, c.info.Objects[spec.Names[0]])
specs[i] = nil
}
}
// native implementations
if native, hasNative := natives[importPath]; hasNative {
c.Write([]byte(strings.TrimSpace(native)))
c.Write([]byte{'\n'})
}
// exports for package functions
for _, fun := range functionsByType[nil] {
name := fun.Name.Name
if fun.Name.IsExported() || name == "init" || name == "main" {
c.Printf("Go$pkg.%s = %s;", name, name)
}
}
c.Printf("return Go$pkg;")
})
}), nil
}
func (c *PkgContext) translateSpec(spec ast.Spec) {
switch s := spec.(type) {
case *ast.ValueSpec:
for _, name := range s.Names {
c.info.Types[name] = c.info.Objects[name].Type()
}
i := 0
for i < len(s.Names) {
var rhs ast.Expr
n := 1
if i < len(s.Values) {
rhs = s.Values[i]
if tuple, isTuple := c.info.Types[rhs].(*types.Tuple); isTuple {
n = tuple.Len()
}
}
lhs := make([]ast.Expr, n)
for j := range lhs {
if j >= len(s.Names) {
lhs[j] = ast.NewIdent("_")
continue
}
lhs[j] = s.Names[i+j]
}
c.translateStmt(&ast.AssignStmt{
Lhs: lhs,
Tok: token.DEFINE,
Rhs: []ast.Expr{rhs},
}, "")
i += n
}
case *ast.TypeSpec:
obj := c.info.Objects[s.Name]
typeName := c.objectName(obj)
if isWrapped(obj.Type()) {
c.Printf(`var %s = function(v) { this.v = v; };`, typeName)
c.Printf(`%s.prototype.Go$key = function() { return "%s$" + this.v; };`, typeName, typeName)
c.Printf("%s.Go$Pointer = function(getter, setter) { this.Go$get = getter; this.Go$set = setter; };", typeName)
return
}
switch t := obj.Type().Underlying().(type) {
case *types.Struct:
params := make([]string, t.NumFields())
for i := 0; i < t.NumFields(); i++ {
params[i] = t.Field(i).Name() + "_"
}
c.Printf("%s = function(%s) {", typeName, strings.Join(params, ", "))
c.Indent(func() {
c.Printf("this.Go$id = Go$idCounter++;")
for i := 0; i < t.NumFields(); i++ {
field := t.Field(i)
c.Printf("this.%s = %s_ || %s;", field.Name(), field.Name(), c.zeroValue(field.Type()))
}
})
c.Printf("};")
c.Printf(`%s.Go$name = "%s";`, typeName, typeName)
c.Printf(`%s.prototype.Go$key = function() { return this.Go$id; };`, typeName)
c.Printf("%s.Go$NonPointer = function(v) { this.v = v; };", typeName)
for i := 0; i < t.NumFields(); i++ {
field := t.Field(i)
if field.Anonymous() {
fieldType := field.Type()
_, isPointer := fieldType.(*types.Pointer)
_, isUnderlyingInterface := fieldType.Underlying().(*types.Interface)
if !isPointer && !isUnderlyingInterface {
fieldType = types.NewPointer(fieldType) // strange, seems like a bug in go/types
}
methods := fieldType.MethodSet()
for j := 0; j < methods.Len(); j++ {
name := methods.At(j).Obj().Name()
sig := methods.At(j).Type().(*types.Signature)
params := make([]string, sig.Params().Len())
for k := range params {
params[k] = sig.Params().At(k).Name()
}
value := "this." + field.Name()
if isWrapped(field.Type()) {
value = fmt.Sprintf("new %s(%s)", field.Name(), value)
}
paramList := strings.Join(params, ", ")
c.Printf("%s.prototype.%s = function(%s) { return %s.%s(%s); };", typeName, name, paramList, value, name, paramList)
c.Printf("%s.Go$NonPointer.prototype.%s = function(%s) { return this.v.%s(%s); };", typeName, name, paramList, name, paramList)
}
}
}
case *types.Interface:
c.Printf("%s = { Go$implementedBy: [] };", typeName)
default:
underlyingTypeName := c.typeName(t)
c.Printf("%s = function() { %s.apply(this, arguments); };", typeName, underlyingTypeName)
c.Printf("%s.Go$Pointer = function(getter, setter) { this.Go$get = getter; this.Go$set = setter; };", typeName)
if _, isSlice := t.(*types.Slice); isSlice {
c.Printf("%s.Go$nil = new %s({ isNil: true, length: 0 });", typeName, typeName)
}
}
case *ast.ImportSpec:
// ignored
default:
panic(fmt.Sprintf("Unhandled spec: %T\n", s))
}
}
func (c *PkgContext) translateFunction(typeName string, isStruct bool, fun *ast.FuncDecl) {
sig := c.info.Objects[fun.Name].(*types.Func).Type().(*types.Signature)
recvType := sig.Recv().Type()
ptr, isPointer := recvType.(*types.Pointer)
body := fun.Body.List
if fun.Recv.List[0].Names != nil {
recv := fun.Recv.List[0].Names[0]
var this ast.Expr = ast.NewIdent("this")
if isWrapped(recvType) {
this = ast.NewIdent("this.v")
}
if _, isUnderlyingStruct := recvType.Underlying().(*types.Struct); isUnderlyingStruct {
this = &ast.StarExpr{X: this}
}
c.info.Types[recv] = recvType
c.info.Types[this] = recvType
body = append([]ast.Stmt{
&ast.AssignStmt{
Lhs: []ast.Expr{recv},
Tok: token.DEFINE,
Rhs: []ast.Expr{this},
},
}, body...)
}
funcLit := &ast.FuncLit{
Type: fun.Type,
Body: &ast.BlockStmt{
List: body,
},
}
c.info.Types[funcLit] = c.info.Objects[fun.Name].Type()
n := c.usedVarNames
params := c.translateParams(fun.Type)
c.usedVarNames = n
switch {
case isStruct:
c.Printf("%s.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
c.Printf("%s.Go$NonPointer.prototype.%s = function(%s) { return this.v.%s(%s); };", typeName, fun.Name.Name, params, fun.Name.Name, params)
case !isStruct && !isPointer:
value := "this.Go$get()"
if isWrapped(recvType) {
value = fmt.Sprintf("new %s(%s)", typeName, value)
}
c.Printf("%s.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
c.Printf("%s.Go$Pointer.prototype.%s = function(%s) { return %s.%s(%s); };", typeName, fun.Name.Name, params, value, fun.Name.Name, params)
case !isStruct && isPointer:
value := "this"
if isWrapped(ptr.Elem()) {
value = "this.v"
}
c.Printf("%s.prototype.%s = function(%s) { var obj = %s; return (new %s.Go$Pointer(function() { return obj; }, null)).%s(%s); };", typeName, fun.Name.Name, params, value, typeName, fun.Name.Name, params)
c.Printf("%s.Go$Pointer.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
}
}
func (c *PkgContext) translateParams(t *ast.FuncType) string {
params := make([]string, 0)
for _, param := range t.Params.List {
for _, ident := range param.Names {
if isUnderscore(ident) {
params = append(params, c.newVariable("param"))
continue
}
params = append(params, c.objectName(c.info.Objects[ident]))
}
}
return strings.Join(params, ", ")
}
func (c *PkgContext) translateArgs(call *ast.CallExpr) []string {
funType := c.info.Types[call.Fun].Underlying().(*types.Signature)
args := make([]string, funType.Params().Len())
for i := range args {
if funType.IsVariadic() && i == len(args)-1 && !call.Ellipsis.IsValid() {
varargType := funType.Params().At(i).Type().(*types.Slice).Elem()
varargs := make([]string, len(call.Args)-i)
for i, vararg := range call.Args[i:] {
varargs[i] = c.translateExprToType(vararg, varargType)
}
args[i] = fmt.Sprintf("new Go$Slice(%s)", createListComposite(varargType, varargs))
break
}
argType := funType.Params().At(i).Type()
args[i] = c.translateExprToType(call.Args[i], argType)
}
return args
}
func (c *PkgContext) zeroValue(ty types.Type) string {
named, isNamed := ty.(*types.Named)
switch t := ty.Underlying().(type) {
case *types.Basic:
if is64Bit(t) {
return fmt.Sprintf("new %s(0, 0)", c.typeName(ty))
}
if t.Info()&types.IsBoolean != 0 {
return "false"
}
if t.Info()&types.IsNumeric != 0 {
return "0"
}
if t.Info()&types.IsString != 0 {
return `""`
}
if t.Kind() == types.UntypedNil {
panic("Zero value for untyped nil.")
}
case *types.Array:
return fmt.Sprintf("Go$clear(new %s(%d), %s)", toArrayType(t.Elem()), t.Len(), c.zeroValue(t.Elem()))
case *types.Slice:
return fmt.Sprintf("%s.Go$nil", c.typeName(ty))
case *types.Struct:
if isNamed {
return fmt.Sprintf("new %s()", c.objectName(named.Obj()))
}
fields := make([]string, t.NumFields())
for i := range fields {
field := t.Field(i)
fields[i] = field.Name() + ": " + c.zeroValue(field.Type())
}
return fmt.Sprintf("{%s}", strings.Join(fields, ", "))
}
return "null"
}
func (c *PkgContext) newVariable(prefix string) string {
n := 0
for {
name := prefix
for _, b := range []byte(name) {
if b < '0' || b > 'z' {
name = "nonAasciiName"
break
}
}
if n != 0 {
name += fmt.Sprintf("%d", n)
}
used := false
for _, usedName := range c.usedVarNames {
if usedName == name {
used = true
break
}
}
if !used {
c.usedVarNames = append(c.usedVarNames, name)
return name
}
n += 1
}
}
func (c *PkgContext) objectName(o types.Object) string {
if o.Name() == "error" {
return "Go$error"
}
if o.Pkg() != nil && o.Pkg() != c.pkg {
return c.pkgVars[o.Pkg().Path()] + "." + o.Name()
}
name, found := c.objectVars[o]
if !found {
name = c.newVariable(o.Name())
c.objectVars[o] = name
}
switch o.(type) {
case *types.Var, *types.Const:
if o.Parent() == c.pkg.Scope() {
return "Go$pkg." + name
}
}
return name
}
func (c *PkgContext) typeName(ty types.Type) string {
switch t := ty.(type) {
case *types.Basic:
if t.Kind() == types.UntypedNil {
return "null"
}
return "Go$" + toJavaScriptType(t)
case *types.Named:
if _, isStruct := t.Underlying().(*types.Struct); isStruct {
return c.objectName(t.Obj()) + ".Go$NonPointer"
}
return c.objectName(t.Obj())
case *types.Pointer:
if named, isNamed := t.Elem().(*types.Named); isNamed && named.Obj().Name() != "error" {
switch t.Elem().Underlying().(type) {
case *types.Struct:
return c.objectName(named.Obj())
case *types.Interface:
return "Go$Pointer"
default:
return c.objectName(named.Obj()) + ".Go$Pointer"
}
}
return "Go$Pointer"
case *types.Array:
return "Go$Array"
case *types.Slice:
return "Go$Slice"
case *types.Map:
return "Go$Map"
case *types.Interface:
return "Go$Interface"
case *types.Chan:
return "Go$Channel"
case *types.Signature:
return "Go$Func"
default:
panic(fmt.Sprintf("Unhandled type: %T\n", t))
}
}
func toJavaScriptType(t *types.Basic) string {
switch t.Kind() {
case types.UntypedInt:
return "Int"
default:
name := t.String()
return strings.ToUpper(name[:1]) + name[1:]
}
}
func is64Bit(t *types.Basic) bool {
return t.Kind() == types.Int64 || t.Kind() == types.Uint64
}
func isComplex(t *types.Basic) bool {
return t.Kind() == types.Complex64 || t.Kind() == types.Complex128
}
func isTypedArray(t types.Type) bool {
basic, isBasic := t.(*types.Basic)
return isBasic && basic.Info()&types.IsNumeric != 0 && !is64Bit(basic) && !isComplex(basic)
}
func toArrayType(t types.Type) string {
if isTypedArray(t) {
return "Go$" + toJavaScriptType(t.(*types.Basic)) + "Array"
}
return "Go$Array"
}
func createListComposite(elementType types.Type, elements []string) string {
if isTypedArray(elementType) {
return fmt.Sprintf("new %s([%s])", toArrayType(elementType), strings.Join(elements, ", "))
}
return fmt.Sprintf("[%s]", strings.Join(elements, ", "))
}
func isUnderscore(expr ast.Expr) bool {
if id, isIdent := expr.(*ast.Ident); isIdent {
return id.Name == "_"
}
return false
}
func hasId(ty types.Type) bool {
switch t := ty.Underlying().(type) {
case *types.Basic:
return is64Bit(t)
case *types.Pointer, *types.Interface:
return true
}
return false
}
func isWrapped(ty types.Type) bool {
switch t := ty.Underlying().(type) {
case *types.Basic:
return !is64Bit(t) && t.Kind() != types.UntypedNil
case *types.Array, *types.Signature:
return true
}
return false
}
type IsReadyVisitor struct {
info *types.Info
functions map[types.Object]*ast.FuncDecl
pendingObjects map[types.Object]bool
isReady bool
}
func (v *IsReadyVisitor) Visit(node ast.Node) (w ast.Visitor) {
if !v.isReady {
return nil
}
switch n := node.(type) {
case *ast.Ident:
o := v.info.Objects[n]
if v.pendingObjects[o] {
v.isReady = false
return nil
}
if fun, found := v.functions[o]; found {
delete(v.functions, o)
ast.Walk(v, fun)
v.functions[o] = fun
}
}
return v
}
Added "function" to list of keywords.
package translator
import (
"code.google.com/p/go.tools/go/exact"
"code.google.com/p/go.tools/go/types"
"fmt"
"go/ast"
"go/token"
"strings"
)
var ReservedKeywords = []string{"arguments", "class", "delete", "eval", "export", "false", "function", "implements", "interface", "in", "let", "new", "package", "private", "protected", "public", "static", "this", "true", "try", "yield"}
type ErrorList []error
func (err ErrorList) Error() string {
return err[0].Error()
}
type PkgContext struct {
pkg *types.Package
info *types.Info
pkgVars map[string]string
objectVars map[types.Object]string
usedVarNames []string
functionSig *types.Signature
resultNames []ast.Expr
postLoopStmt ast.Stmt
output []byte
indentation int
delayedLines []byte
}
func (c *PkgContext) Write(b []byte) (int, error) {
c.output = append(c.output, b...)
return len(b), nil
}
func (c *PkgContext) Printf(format string, values ...interface{}) {
c.Write([]byte(strings.Repeat("\t", c.indentation)))
fmt.Fprintf(c, format, values...)
c.Write([]byte{'\n'})
c.Write(c.delayedLines)
c.delayedLines = nil
}
func (c *PkgContext) Indent(f func()) {
c.indentation += 1
f()
c.indentation -= 1
}
func (c *PkgContext) CatchOutput(f func()) []byte {
origoutput := c.output
c.output = nil
f()
catched := c.output
c.output = origoutput
return catched
}
func (c *PkgContext) Delayed(f func()) {
c.delayedLines = c.CatchOutput(f)
}
func TranslatePackage(importPath string, files []*ast.File, fileSet *token.FileSet, config *types.Config) ([]byte, error) {
info := &types.Info{
Types: make(map[ast.Expr]types.Type),
Values: make(map[ast.Expr]exact.Value),
Objects: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
var errList ErrorList
var previousErr error
config.Error = func(err error) {
if previousErr != nil && previousErr.Error() == err.Error() {
return
}
errList = append(errList, err)
previousErr = err
}
typesPkg, err := config.Check(importPath, fileSet, files, info)
if errList != nil {
return nil, errList
}
if err != nil {
return nil, err
}
config.Packages[importPath] = typesPkg
c := &PkgContext{
pkg: typesPkg,
info: info,
pkgVars: make(map[string]string),
objectVars: make(map[types.Object]string),
usedVarNames: ReservedKeywords,
}
functionsByType := make(map[types.Type][]*ast.FuncDecl)
functionsByObject := make(map[types.Object]*ast.FuncDecl)
var typeSpecs []*ast.TypeSpec
var valueSpecs []*ast.ValueSpec
for _, file := range files {
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
sig := c.info.Objects[d.Name].(*types.Func).Type().(*types.Signature)
var recvType types.Type
if sig.Recv() != nil {
recvType = sig.Recv().Type()
if ptr, isPtr := recvType.(*types.Pointer); isPtr {
recvType = ptr.Elem()
}
}
functionsByType[recvType] = append(functionsByType[recvType], d)
o := c.info.Objects[d.Name]
functionsByObject[o] = d
if sig.Recv() == nil {
c.objectName(o) // register toplevel name
}
case *ast.GenDecl:
switch d.Tok {
case token.TYPE:
for _, spec := range d.Specs {
s := spec.(*ast.TypeSpec)
typeSpecs = append(typeSpecs, s)
c.objectName(c.info.Objects[s.Name]) // register toplevel name
}
case token.CONST, token.VAR:
for _, spec := range d.Specs {
s := spec.(*ast.ValueSpec)
valueSpecs = append(valueSpecs, s)
for _, name := range s.Names {
if !isUnderscore(name) {
c.objectName(c.info.Objects[name]) // register toplevel name
}
}
}
}
}
}
}
return c.CatchOutput(func() {
c.Indent(func() {
c.Printf("var Go$pkg = {};")
for _, importedPkg := range typesPkg.Imports() {
varName := c.newVariable(importedPkg.Name())
c.Printf(`var %s = Go$packages["%s"];`, varName, importedPkg.Path())
c.pkgVars[importedPkg.Path()] = varName
}
// types and their functions
for _, spec := range typeSpecs {
obj := c.info.Objects[spec.Name]
typeName := c.objectName(obj)
c.Printf("var %s;", typeName)
c.translateSpec(spec)
for _, fun := range functionsByType[obj.Type()] {
funName := fun.Name.Name
jsCode, _ := typesPkg.Scope().Lookup("js_" + typeName + "_" + funName).(*types.Const)
if jsCode != nil {
n := c.usedVarNames
c.Printf("%s.prototype.%s = function(%s) {\n%s\n};", typeName, funName, c.translateParams(fun.Type), exact.StringVal(jsCode.Val()))
c.usedVarNames = n
continue
}
_, isStruct := obj.Type().Underlying().(*types.Struct)
c.translateFunction(typeName, isStruct, fun)
}
c.Printf("Go$pkg.%s = %s;", typeName, typeName)
}
// package functions
for _, fun := range functionsByType[nil] {
name := fun.Name.Name
jsCode, _ := typesPkg.Scope().Lookup("js_" + name).(*types.Const)
if jsCode != nil {
n := c.usedVarNames
c.Printf("var %s = function(%s) {\n%s\n};", name, c.translateParams(fun.Type), exact.StringVal(jsCode.Val()))
c.usedVarNames = n
continue
}
if fun.Body == nil {
c.Printf(`var %s = function() { throw new Go$Panic("Native function not implemented: %s"); };`, name, name)
continue
}
funcLit := &ast.FuncLit{
Type: fun.Type,
Body: &ast.BlockStmt{
List: fun.Body.List,
},
}
funType := c.info.Objects[fun.Name].Type()
c.info.Types[fun.Name] = funType
c.info.Types[funcLit] = funType
c.Printf("var %s = %s;", c.translateExpr(fun.Name), c.translateExpr(funcLit))
}
// constants and variables in dependency aware order
var specs []*ast.ValueSpec
pendingObjects := make(map[types.Object]bool)
for _, spec := range valueSpecs {
for i, name := range spec.Names {
if strings.HasPrefix(name.Name, "js_") {
continue
}
var values []ast.Expr
switch o := c.info.Objects[name].(type) {
case *types.Var:
if i < len(spec.Values) {
values = []ast.Expr{spec.Values[i]}
}
case *types.Const:
id := ast.NewIdent("")
c.info.Types[id] = o.Type()
c.info.Values[id] = o.Val()
values = []ast.Expr{id}
default:
panic("")
}
specs = append(specs, &ast.ValueSpec{
Names: []*ast.Ident{name},
Type: spec.Type,
Values: values,
})
pendingObjects[c.info.Objects[spec.Names[0]]] = true
}
}
complete := false
for !complete {
complete = true
for i, spec := range specs {
if spec == nil {
continue
}
if spec.Values != nil {
v := IsReadyVisitor{info: c.info, functions: functionsByObject, pendingObjects: pendingObjects, isReady: true}
ast.Walk(&v, spec.Values[0])
if !v.isReady {
complete = false
continue
}
}
c.translateSpec(spec)
delete(pendingObjects, c.info.Objects[spec.Names[0]])
specs[i] = nil
}
}
// native implementations
if native, hasNative := natives[importPath]; hasNative {
c.Write([]byte(strings.TrimSpace(native)))
c.Write([]byte{'\n'})
}
// exports for package functions
for _, fun := range functionsByType[nil] {
name := fun.Name.Name
if fun.Name.IsExported() || name == "init" || name == "main" {
c.Printf("Go$pkg.%s = %s;", name, name)
}
}
c.Printf("return Go$pkg;")
})
}), nil
}
func (c *PkgContext) translateSpec(spec ast.Spec) {
switch s := spec.(type) {
case *ast.ValueSpec:
for _, name := range s.Names {
c.info.Types[name] = c.info.Objects[name].Type()
}
i := 0
for i < len(s.Names) {
var rhs ast.Expr
n := 1
if i < len(s.Values) {
rhs = s.Values[i]
if tuple, isTuple := c.info.Types[rhs].(*types.Tuple); isTuple {
n = tuple.Len()
}
}
lhs := make([]ast.Expr, n)
for j := range lhs {
if j >= len(s.Names) {
lhs[j] = ast.NewIdent("_")
continue
}
lhs[j] = s.Names[i+j]
}
c.translateStmt(&ast.AssignStmt{
Lhs: lhs,
Tok: token.DEFINE,
Rhs: []ast.Expr{rhs},
}, "")
i += n
}
case *ast.TypeSpec:
obj := c.info.Objects[s.Name]
typeName := c.objectName(obj)
if isWrapped(obj.Type()) {
c.Printf(`var %s = function(v) { this.v = v; };`, typeName)
c.Printf(`%s.prototype.Go$key = function() { return "%s$" + this.v; };`, typeName, typeName)
c.Printf("%s.Go$Pointer = function(getter, setter) { this.Go$get = getter; this.Go$set = setter; };", typeName)
return
}
switch t := obj.Type().Underlying().(type) {
case *types.Struct:
params := make([]string, t.NumFields())
for i := 0; i < t.NumFields(); i++ {
params[i] = t.Field(i).Name() + "_"
}
c.Printf("%s = function(%s) {", typeName, strings.Join(params, ", "))
c.Indent(func() {
c.Printf("this.Go$id = Go$idCounter++;")
for i := 0; i < t.NumFields(); i++ {
field := t.Field(i)
c.Printf("this.%s = %s_ || %s;", field.Name(), field.Name(), c.zeroValue(field.Type()))
}
})
c.Printf("};")
c.Printf(`%s.Go$name = "%s";`, typeName, typeName)
c.Printf(`%s.prototype.Go$key = function() { return this.Go$id; };`, typeName)
c.Printf("%s.Go$NonPointer = function(v) { this.v = v; };", typeName)
for i := 0; i < t.NumFields(); i++ {
field := t.Field(i)
if field.Anonymous() {
fieldType := field.Type()
_, isPointer := fieldType.(*types.Pointer)
_, isUnderlyingInterface := fieldType.Underlying().(*types.Interface)
if !isPointer && !isUnderlyingInterface {
fieldType = types.NewPointer(fieldType) // strange, seems like a bug in go/types
}
methods := fieldType.MethodSet()
for j := 0; j < methods.Len(); j++ {
name := methods.At(j).Obj().Name()
sig := methods.At(j).Type().(*types.Signature)
params := make([]string, sig.Params().Len())
for k := range params {
params[k] = sig.Params().At(k).Name()
}
value := "this." + field.Name()
if isWrapped(field.Type()) {
value = fmt.Sprintf("new %s(%s)", field.Name(), value)
}
paramList := strings.Join(params, ", ")
c.Printf("%s.prototype.%s = function(%s) { return %s.%s(%s); };", typeName, name, paramList, value, name, paramList)
c.Printf("%s.Go$NonPointer.prototype.%s = function(%s) { return this.v.%s(%s); };", typeName, name, paramList, name, paramList)
}
}
}
case *types.Interface:
c.Printf("%s = { Go$implementedBy: [] };", typeName)
default:
underlyingTypeName := c.typeName(t)
c.Printf("%s = function() { %s.apply(this, arguments); };", typeName, underlyingTypeName)
c.Printf("%s.Go$Pointer = function(getter, setter) { this.Go$get = getter; this.Go$set = setter; };", typeName)
if _, isSlice := t.(*types.Slice); isSlice {
c.Printf("%s.Go$nil = new %s({ isNil: true, length: 0 });", typeName, typeName)
}
}
case *ast.ImportSpec:
// ignored
default:
panic(fmt.Sprintf("Unhandled spec: %T\n", s))
}
}
func (c *PkgContext) translateFunction(typeName string, isStruct bool, fun *ast.FuncDecl) {
sig := c.info.Objects[fun.Name].(*types.Func).Type().(*types.Signature)
recvType := sig.Recv().Type()
ptr, isPointer := recvType.(*types.Pointer)
body := fun.Body.List
if fun.Recv.List[0].Names != nil {
recv := fun.Recv.List[0].Names[0]
var this ast.Expr = ast.NewIdent("this")
if isWrapped(recvType) {
this = ast.NewIdent("this.v")
}
if _, isUnderlyingStruct := recvType.Underlying().(*types.Struct); isUnderlyingStruct {
this = &ast.StarExpr{X: this}
}
c.info.Types[recv] = recvType
c.info.Types[this] = recvType
body = append([]ast.Stmt{
&ast.AssignStmt{
Lhs: []ast.Expr{recv},
Tok: token.DEFINE,
Rhs: []ast.Expr{this},
},
}, body...)
}
funcLit := &ast.FuncLit{
Type: fun.Type,
Body: &ast.BlockStmt{
List: body,
},
}
c.info.Types[funcLit] = c.info.Objects[fun.Name].Type()
n := c.usedVarNames
params := c.translateParams(fun.Type)
c.usedVarNames = n
switch {
case isStruct:
c.Printf("%s.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
c.Printf("%s.Go$NonPointer.prototype.%s = function(%s) { return this.v.%s(%s); };", typeName, fun.Name.Name, params, fun.Name.Name, params)
case !isStruct && !isPointer:
value := "this.Go$get()"
if isWrapped(recvType) {
value = fmt.Sprintf("new %s(%s)", typeName, value)
}
c.Printf("%s.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
c.Printf("%s.Go$Pointer.prototype.%s = function(%s) { return %s.%s(%s); };", typeName, fun.Name.Name, params, value, fun.Name.Name, params)
case !isStruct && isPointer:
value := "this"
if isWrapped(ptr.Elem()) {
value = "this.v"
}
c.Printf("%s.prototype.%s = function(%s) { var obj = %s; return (new %s.Go$Pointer(function() { return obj; }, null)).%s(%s); };", typeName, fun.Name.Name, params, value, typeName, fun.Name.Name, params)
c.Printf("%s.Go$Pointer.prototype.%s = %s;", typeName, fun.Name.Name, c.translateExpr(funcLit))
}
}
func (c *PkgContext) translateParams(t *ast.FuncType) string {
params := make([]string, 0)
for _, param := range t.Params.List {
for _, ident := range param.Names {
if isUnderscore(ident) {
params = append(params, c.newVariable("param"))
continue
}
params = append(params, c.objectName(c.info.Objects[ident]))
}
}
return strings.Join(params, ", ")
}
func (c *PkgContext) translateArgs(call *ast.CallExpr) []string {
funType := c.info.Types[call.Fun].Underlying().(*types.Signature)
args := make([]string, funType.Params().Len())
for i := range args {
if funType.IsVariadic() && i == len(args)-1 && !call.Ellipsis.IsValid() {
varargType := funType.Params().At(i).Type().(*types.Slice).Elem()
varargs := make([]string, len(call.Args)-i)
for i, vararg := range call.Args[i:] {
varargs[i] = c.translateExprToType(vararg, varargType)
}
args[i] = fmt.Sprintf("new Go$Slice(%s)", createListComposite(varargType, varargs))
break
}
argType := funType.Params().At(i).Type()
args[i] = c.translateExprToType(call.Args[i], argType)
}
return args
}
func (c *PkgContext) zeroValue(ty types.Type) string {
named, isNamed := ty.(*types.Named)
switch t := ty.Underlying().(type) {
case *types.Basic:
if is64Bit(t) {
return fmt.Sprintf("new %s(0, 0)", c.typeName(ty))
}
if t.Info()&types.IsBoolean != 0 {
return "false"
}
if t.Info()&types.IsNumeric != 0 {
return "0"
}
if t.Info()&types.IsString != 0 {
return `""`
}
if t.Kind() == types.UntypedNil {
panic("Zero value for untyped nil.")
}
case *types.Array:
return fmt.Sprintf("Go$clear(new %s(%d), %s)", toArrayType(t.Elem()), t.Len(), c.zeroValue(t.Elem()))
case *types.Slice:
return fmt.Sprintf("%s.Go$nil", c.typeName(ty))
case *types.Struct:
if isNamed {
return fmt.Sprintf("new %s()", c.objectName(named.Obj()))
}
fields := make([]string, t.NumFields())
for i := range fields {
field := t.Field(i)
fields[i] = field.Name() + ": " + c.zeroValue(field.Type())
}
return fmt.Sprintf("{%s}", strings.Join(fields, ", "))
}
return "null"
}
func (c *PkgContext) newVariable(prefix string) string {
n := 0
for {
name := prefix
for _, b := range []byte(name) {
if b < '0' || b > 'z' {
name = "nonAasciiName"
break
}
}
if n != 0 {
name += fmt.Sprintf("%d", n)
}
used := false
for _, usedName := range c.usedVarNames {
if usedName == name {
used = true
break
}
}
if !used {
c.usedVarNames = append(c.usedVarNames, name)
return name
}
n += 1
}
}
func (c *PkgContext) objectName(o types.Object) string {
if o.Name() == "error" {
return "Go$error"
}
if o.Pkg() != nil && o.Pkg() != c.pkg {
return c.pkgVars[o.Pkg().Path()] + "." + o.Name()
}
name, found := c.objectVars[o]
if !found {
name = c.newVariable(o.Name())
c.objectVars[o] = name
}
switch o.(type) {
case *types.Var, *types.Const:
if o.Parent() == c.pkg.Scope() {
return "Go$pkg." + name
}
}
return name
}
func (c *PkgContext) typeName(ty types.Type) string {
switch t := ty.(type) {
case *types.Basic:
if t.Kind() == types.UntypedNil {
return "null"
}
return "Go$" + toJavaScriptType(t)
case *types.Named:
if _, isStruct := t.Underlying().(*types.Struct); isStruct {
return c.objectName(t.Obj()) + ".Go$NonPointer"
}
return c.objectName(t.Obj())
case *types.Pointer:
if named, isNamed := t.Elem().(*types.Named); isNamed && named.Obj().Name() != "error" {
switch t.Elem().Underlying().(type) {
case *types.Struct:
return c.objectName(named.Obj())
case *types.Interface:
return "Go$Pointer"
default:
return c.objectName(named.Obj()) + ".Go$Pointer"
}
}
return "Go$Pointer"
case *types.Array:
return "Go$Array"
case *types.Slice:
return "Go$Slice"
case *types.Map:
return "Go$Map"
case *types.Interface:
return "Go$Interface"
case *types.Chan:
return "Go$Channel"
case *types.Signature:
return "Go$Func"
default:
panic(fmt.Sprintf("Unhandled type: %T\n", t))
}
}
func toJavaScriptType(t *types.Basic) string {
switch t.Kind() {
case types.UntypedInt:
return "Int"
default:
name := t.String()
return strings.ToUpper(name[:1]) + name[1:]
}
}
func is64Bit(t *types.Basic) bool {
return t.Kind() == types.Int64 || t.Kind() == types.Uint64
}
func isComplex(t *types.Basic) bool {
return t.Kind() == types.Complex64 || t.Kind() == types.Complex128
}
func isTypedArray(t types.Type) bool {
basic, isBasic := t.(*types.Basic)
return isBasic && basic.Info()&types.IsNumeric != 0 && !is64Bit(basic) && !isComplex(basic)
}
func toArrayType(t types.Type) string {
if isTypedArray(t) {
return "Go$" + toJavaScriptType(t.(*types.Basic)) + "Array"
}
return "Go$Array"
}
func createListComposite(elementType types.Type, elements []string) string {
if isTypedArray(elementType) {
return fmt.Sprintf("new %s([%s])", toArrayType(elementType), strings.Join(elements, ", "))
}
return fmt.Sprintf("[%s]", strings.Join(elements, ", "))
}
func isUnderscore(expr ast.Expr) bool {
if id, isIdent := expr.(*ast.Ident); isIdent {
return id.Name == "_"
}
return false
}
func hasId(ty types.Type) bool {
switch t := ty.Underlying().(type) {
case *types.Basic:
return is64Bit(t)
case *types.Pointer, *types.Interface:
return true
}
return false
}
func isWrapped(ty types.Type) bool {
switch t := ty.Underlying().(type) {
case *types.Basic:
return !is64Bit(t) && t.Kind() != types.UntypedNil
case *types.Array, *types.Signature:
return true
}
return false
}
type IsReadyVisitor struct {
info *types.Info
functions map[types.Object]*ast.FuncDecl
pendingObjects map[types.Object]bool
isReady bool
}
func (v *IsReadyVisitor) Visit(node ast.Node) (w ast.Visitor) {
if !v.isReady {
return nil
}
switch n := node.(type) {
case *ast.Ident:
o := v.info.Objects[n]
if v.pendingObjects[o] {
v.isReady = false
return nil
}
if fun, found := v.functions[o]; found {
delete(v.functions, o)
ast.Walk(v, fun)
v.functions[o] = fun
}
}
return v
}
|
package transports
import (
"fmt"
"github.com/headzoo/surf"
"github.com/headzoo/surf/browser"
)
type FacebookTransport struct {
*Transport
Login string
Password string
Browser *browser.Browser
Authenticated bool
}
func (t *FacebookTransport) Prepare() {
fmt.Println("FacebookTransport, Prepare()")
t.Browser = surf.NewBrowser()
t.Authenticated = false
t.DoLogin()
return
}
func (t *FacebookTransport) DoLogin() {
fmt.Println("FacebookTransport, Login()")
err := t.Browser.Open("https://mobile.facebook.com/")
if err != nil {
panic(err)
}
fmt.Println(t.Browser.Body())
LoginForm := t.Browser.Forms()[1]
LoginForm.Input("email", t.Login)
LoginForm.Input("pass", t.Password)
if LoginForm.Submit() != nil {
panic(err)
}
err = t.Browser.Open("https://mobile.facebook.com/profile.php")
if err != nil {
panic(err)
}
fmt.Println("Logged in as", t.Browser.Title(), "?")
// fmt.Println( t.Browser.Body() )
}
Removing authenticated field from struct
package transports
import (
"fmt"
"github.com/headzoo/surf"
"github.com/headzoo/surf/browser"
"errors"
)
type FacebookTransport struct {
*Transport
Login string
Password string
Browser *browser.Browser
}
func (t *FacebookTransport) Prepare() {
fmt.Println("FacebookTransport, Prepare()")
t.Browser = surf.NewBrowser()
if !t.DoLogin() {
err := errors.New( "Authentication error!")
panic(err)
}
return
}
func (t *FacebookTransport) DoLogin() bool {
fmt.Println("FacebookTransport, Login()")
err := t.Browser.Open("https://mobile.facebook.com/")
if err != nil {
panic(err)
}
fmt.Println(t.Browser.Body())
LoginForm := t.Browser.Forms()[1]
LoginForm.Input("email", t.Login)
LoginForm.Input("pass", t.Password)
if LoginForm.Submit() != nil {
panic(err)
}
err = t.Browser.Open("https://mobile.facebook.com/profile.php")
if err != nil {
panic(err)
}
fmt.Println("Logged in as", t.Browser.Title(), "?")
return true
}
|
package simra
import (
"testing"
"time"
)
type c struct{}
type l struct{}
var onCollision = make(chan bool)
func (c *c) GetXYWH() (x, y, w, h float32) {
x, y, w, h = 0, 0, 0, 0
return
}
func (l *l) OnCollision(c1, c2 Collider) {
go func() {
//time.Sleep(time.Millisecond * 500)
onCollision <- true
}()
}
func waitOnCollision(t *testing.T, shouldCallback bool) {
select {
case <-onCollision:
if !shouldCallback {
t.Error("unexpected OnCollision.")
}
case <-time.After(time.Millisecond * 300):
if shouldCallback {
t.Error("expected OnCollision but not fired.")
}
}
}
func TestAddCollisionListener(t *testing.T) {
var c1, c2 c
var l l
simra := &simra{}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &l)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.collisionCheckAndNotify()
waitOnCollision(t, true)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &l)
if simra.comapLength() != 2 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
}
func TestRemoveCollisionListener(t *testing.T) {
var c1, c2 c
var l l
simra := &simra{}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &l)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.collisionCheckAndNotify()
waitOnCollision(t, true)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(&c1, nil)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &l)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(nil, &c2)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &l)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(&c1, &c2)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
}
[#122] revise variable name to avoid shadowing
package simra
import (
"testing"
"time"
)
type c struct{}
type l struct{}
var onCollision = make(chan bool)
func (c *c) GetXYWH() (x, y, w, h float32) {
x, y, w, h = 0, 0, 0, 0
return
}
func (l *l) OnCollision(c1, c2 Collider) {
go func() {
//time.Sleep(time.Millisecond * 500)
onCollision <- true
}()
}
func waitOnCollision(t *testing.T, shouldCallback bool) {
select {
case <-onCollision:
if !shouldCallback {
t.Error("unexpected OnCollision.")
}
case <-time.After(time.Millisecond * 300):
if shouldCallback {
t.Error("expected OnCollision but not fired.")
}
}
}
func TestAddCollisionListener(t *testing.T) {
var c1, c2 c
var li l
simra := &simra{}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &li)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.collisionCheckAndNotify()
waitOnCollision(t, true)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &li)
if simra.comapLength() != 2 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
}
func TestRemoveCollisionListener(t *testing.T) {
var c1, c2 c
var li l
simra := &simra{}
simra.RemoveAllCollisionListener()
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &li)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.collisionCheckAndNotify()
waitOnCollision(t, true)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(&c1, nil)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &li)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(nil, &c2)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.AddCollisionListener(&c1, &c2, &li)
if simra.comapLength() != 1 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
simra.RemoveCollisionListener(&c1, &c2)
simra.collisionCheckAndNotify()
waitOnCollision(t, false)
if simra.comapLength() != 0 {
t.Error("unexpected comap length. comapLength() =", simra.comapLength())
}
}
|
// Package http provides the HTTP server for accessing the distributed database.
// It also provides the endpoint for other nodes to join an existing cluster.
package http
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/pprof"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/rqlite/rqlite/auth"
"github.com/rqlite/rqlite/command"
"github.com/rqlite/rqlite/command/encoding"
"github.com/rqlite/rqlite/store"
)
var (
// ErrLeaderNotFound is returned when a node cannot locate a leader
ErrLeaderNotFound = errors.New("leader not found")
)
// Database is the interface any queryable system must implement
type Database interface {
// Execute executes a slice of queries, each of which is not expected
// to return rows. If timings is true, then timing information will
// be return. If tx is true, then either all queries will be executed
// successfully or it will as though none executed.
Execute(er *command.ExecuteRequest) ([]*command.ExecuteResult, error)
// Query executes a slice of queries, each of which returns rows. If
// timings is true, then timing information will be returned. If tx
// is true, then all queries will take place while a read transaction
// is held on the database.
Query(qr *command.QueryRequest) ([]*command.QueryRows, error)
}
// Store is the interface the Raft-based database must implement.
type Store interface {
Database
// Join joins the node with the given ID, reachable at addr, to this node.
Join(id, addr string, voter bool) error
// Remove removes the node, specified by id, from the cluster.
Remove(id string) error
// LeaderAddr returns the Raft address of the leader of the cluster.
LeaderAddr() (string, error)
// Stats returns stats on the Store.
Stats() (map[string]interface{}, error)
// Nodes returns the slice of store.Servers in the cluster
Nodes() ([]*store.Server, error)
// Backup wites backup of the node state to dst
Backup(leader bool, f store.BackupFormat, dst io.Writer) error
}
// Cluster is the interface node API services must provide
type Cluster interface {
// GetNodeAPIAddr returns the HTTP API URL for the node at the given Raft address.
GetNodeAPIAddr(nodeAddr string, timeout time.Duration) (string, error)
// Execute performs an Execute Request on a remote node.
Execute(er *command.ExecuteRequest, nodeAddr string, timeout time.Duration) ([]*command.ExecuteResult, error)
// Query performs an Query Request on a remote node.
Query(qr *command.QueryRequest, nodeAddr string, timeout time.Duration) ([]*command.QueryRows, error)
// Stats returns stats on the Cluster.
Stats() (map[string]interface{}, error)
}
// CredentialStore is the interface credential stores must support.
type CredentialStore interface {
// Check returns whether username and password are a valid combination.
Check(username, password string) bool
// HasPerm returns whether username has the given perm.
HasPerm(username string, perm string) bool
// HasAnyPerm returns whether username has any of the given perms.
HasAnyPerm(username string, perm ...string) bool
}
// StatusReporter is the interface status providers must implement.
type StatusReporter interface {
Stats() (map[string]interface{}, error)
}
// DBResults stores either an Execute result or a Query result
type DBResults struct {
ExecuteResult []*command.ExecuteResult
QueryRows []*command.QueryRows
}
// MarshalJSON implements the JSON Marshaler interface.
func (d *DBResults) MarshalJSON() ([]byte, error) {
if d.ExecuteResult != nil {
return encoding.JSONMarshal(d.ExecuteResult)
} else if d.QueryRows != nil {
return encoding.JSONMarshal(d.QueryRows)
}
return nil, fmt.Errorf("no DB results set")
}
// Response represents a response from the HTTP service.
type Response struct {
Results *DBResults `json:"results,omitempty"`
Error string `json:"error,omitempty"`
Time float64 `json:"time,omitempty"`
start time.Time
end time.Time
}
// stats captures stats for the HTTP service.
var stats *expvar.Map
const (
numLeaderNotFound = "leader_not_found"
numExecutions = "executions"
numQueries = "queries"
numRemoteExecutions = "remote_executions"
numRemoteQueries = "remote_queries"
numBackups = "backups"
numLoad = "loads"
numJoins = "joins"
numAuthOK = "authOK"
numAuthFail = "authFail"
// Default timeout for cluster communications.
defaultTimeout = 30 * time.Second
// PermAll means all actions permitted.
PermAll = "all"
// PermJoin means user is permitted to join cluster.
PermJoin = "join"
// PermRemove means user is permitted to remove a node.
PermRemove = "remove"
// PermExecute means user can access execute endpoint.
PermExecute = "execute"
// PermQuery means user can access query endpoint
PermQuery = "query"
// PermStatus means user can retrieve node status.
PermStatus = "status"
// PermReady means user can retrieve ready status.
PermReady = "ready"
// PermBackup means user can backup node.
PermBackup = "backup"
// PermLoad means user can load a SQLite dump into a node.
PermLoad = "load"
// VersionHTTPHeader is the HTTP header key for the version.
VersionHTTPHeader = "X-RQLITE-VERSION"
// ServedByHTTPHeader is the HTTP header used to report which
// node (by node Raft address) actually served the request if
// it wasn't served by this node.
ServedByHTTPHeader = "X-RQLITE-SERVED-BY"
)
func init() {
stats = expvar.NewMap("http")
stats.Add(numLeaderNotFound, 0)
stats.Add(numExecutions, 0)
stats.Add(numQueries, 0)
stats.Add(numRemoteExecutions, 0)
stats.Add(numRemoteQueries, 0)
stats.Add(numBackups, 0)
stats.Add(numLoad, 0)
stats.Add(numJoins, 0)
stats.Add(numAuthOK, 0)
stats.Add(numAuthFail, 0)
}
// SetTime sets the Time attribute of the response. This way it will be present
// in the serialized JSON version.
func (r *Response) SetTime() {
r.Time = r.end.Sub(r.start).Seconds()
}
// NewResponse returns a new instance of response.
func NewResponse() *Response {
return &Response{
Results: &DBResults{},
start: time.Now(),
}
}
// Service provides HTTP service.
type Service struct {
addr string // Bind address of the HTTP service.
ln net.Listener // Service listener
store Store // The Raft-backed database store.
cluster Cluster // The Cluster service.
start time.Time // Start up time.
lastBackup time.Time // Time of last successful backup.
statusMu sync.RWMutex
statuses map[string]StatusReporter
CACertFile string // Path to root X.509 certificate.
CertFile string // Path to SSL certificate.
KeyFile string // Path to SSL private key.
TLS1011 bool // Whether older, deprecated TLS should be supported.
credentialStore CredentialStore
Expvar bool
Pprof bool
BuildInfo map[string]interface{}
logger *log.Logger
}
// New returns an uninitialized HTTP service. If credentials is nil, then
// the service performs no authentication and authorization checks.
func New(addr string, store Store, cluster Cluster, credentials CredentialStore) *Service {
return &Service{
addr: addr,
store: store,
cluster: cluster,
start: time.Now(),
statuses: make(map[string]StatusReporter),
credentialStore: credentials,
logger: log.New(os.Stderr, "[http] ", log.LstdFlags),
}
}
// Start starts the service.
func (s *Service) Start() error {
server := http.Server{
Handler: s,
}
var ln net.Listener
var err error
if s.CertFile == "" || s.KeyFile == "" {
ln, err = net.Listen("tcp", s.addr)
if err != nil {
return err
}
} else {
config, err := createTLSConfig(s.CertFile, s.KeyFile, s.CACertFile, s.TLS1011)
if err != nil {
return err
}
ln, err = tls.Listen("tcp", s.addr, config)
if err != nil {
return err
}
s.logger.Printf("secure HTTPS server enabled with cert %s, key %s", s.CertFile, s.KeyFile)
}
s.ln = ln
go func() {
err := server.Serve(s.ln)
if err != nil {
s.logger.Println("HTTP service Serve() returned:", err.Error())
}
}()
s.logger.Println("service listening on", s.Addr())
return nil
}
// Close closes the service.
func (s *Service) Close() {
s.ln.Close()
return
}
// HTTPS returns whether this service is using HTTPS.
func (s *Service) HTTPS() bool {
return s.CertFile != "" && s.KeyFile != ""
}
// ServeHTTP allows Service to serve HTTP requests.
func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.addBuildVersion(w)
switch {
case r.URL.Path == "/" || r.URL.Path == "":
http.Redirect(w, r, "/status", http.StatusFound)
case strings.HasPrefix(r.URL.Path, "/db/execute"):
stats.Add(numExecutions, 1)
s.handleExecute(w, r)
case strings.HasPrefix(r.URL.Path, "/db/query"):
stats.Add(numQueries, 1)
s.handleQuery(w, r)
case strings.HasPrefix(r.URL.Path, "/db/backup"):
stats.Add(numBackups, 1)
s.handleBackup(w, r)
case strings.HasPrefix(r.URL.Path, "/db/load"):
stats.Add(numLoad, 1)
s.handleLoad(w, r)
case strings.HasPrefix(r.URL.Path, "/join"):
stats.Add(numJoins, 1)
s.handleJoin(w, r)
case strings.HasPrefix(r.URL.Path, "/remove"):
s.handleRemove(w, r)
case strings.HasPrefix(r.URL.Path, "/status"):
s.handleStatus(w, r)
case strings.HasPrefix(r.URL.Path, "/nodes"):
s.handleNodes(w, r)
case strings.HasPrefix(r.URL.Path, "/readyz"):
s.handleReadyz(w, r)
case r.URL.Path == "/debug/vars" && s.Expvar:
s.handleExpvar(w, r)
case strings.HasPrefix(r.URL.Path, "/debug/pprof") && s.Pprof:
s.handlePprof(w, r)
default:
w.WriteHeader(http.StatusNotFound)
}
}
// RegisterStatus allows other modules to register status for serving over HTTP.
func (s *Service) RegisterStatus(key string, stat StatusReporter) error {
s.statusMu.Lock()
defer s.statusMu.Unlock()
if _, ok := s.statuses[key]; ok {
return fmt.Errorf("status already registered with key %s", key)
}
s.statuses[key] = stat
return nil
}
// handleJoin handles cluster-join requests from other nodes.
func (s *Service) handleJoin(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermJoin) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
md := map[string]interface{}{}
if err := json.Unmarshal(b, &md); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteID, ok := md["id"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteAddr, ok := md["addr"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
voter, ok := md["voter"]
if !ok {
voter = true
}
if err := s.store.Join(remoteID.(string), remoteAddr.(string), voter.(bool)); err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleRemove handles cluster-remove requests.
func (s *Service) handleRemove(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermRemove) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "DELETE" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
m := map[string]string{}
if err := json.Unmarshal(b, &m); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if len(m) != 1 {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteID, ok := m["id"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
if err := s.store.Remove(remoteID); err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleBackup returns the consistent database snapshot.
func (s *Service) handleBackup(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermBackup) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
noLeader, err := noLeader(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
bf, err := backupFormat(w, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.store.Backup(!noLeader, bf, w)
if err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
s.lastBackup = time.Now()
}
// handleLoad loads the state contained in a .dump output. This API is different
// from others in that it expects a raw file, not wrapped in any kind of JSON.
func (s *Service) handleLoad(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermLoad) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timings, err := isTimings(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
r.Body.Close()
// No JSON structure expected for this API.
queries := []string{string(b)}
er := executeRequestFromStrings(queries, timings, false)
results, err := s.store.Execute(er)
if err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
resp.Error = err.Error()
} else {
resp.Results.ExecuteResult = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleStatus returns status on the system.
func (s *Service) handleStatus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
storeStatus, err := s.store.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("store stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
clusterStatus, err := s.cluster.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("cluster stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
rt := map[string]interface{}{
"GOARCH": runtime.GOARCH,
"GOOS": runtime.GOOS,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
"num_cpu": runtime.NumCPU(),
"num_goroutine": runtime.NumGoroutine(),
"version": runtime.Version(),
}
oss := map[string]interface{}{
"pid": os.Getpid(),
"ppid": os.Getppid(),
"page_size": os.Getpagesize(),
}
executable, err := os.Executable()
if err == nil {
oss["executable"] = executable
}
hostname, err := os.Hostname()
if err == nil {
oss["hostname"] = hostname
}
httpStatus := map[string]interface{}{
"bind_addr": s.Addr().String(),
"auth": prettyEnabled(s.credentialStore != nil),
"cluster": clusterStatus,
}
nodeStatus := map[string]interface{}{
"start_time": s.start,
"uptime": time.Since(s.start).String(),
}
// Build the status response.
status := map[string]interface{}{
"os": oss,
"runtime": rt,
"store": storeStatus,
"http": httpStatus,
"node": nodeStatus,
}
if !s.lastBackup.IsZero() {
status["last_backup_time"] = s.lastBackup
}
if s.BuildInfo != nil {
status["build"] = s.BuildInfo
}
// Add any registered StatusReporters.
func() {
s.statusMu.RLock()
defer s.statusMu.RUnlock()
for k, v := range s.statuses {
stat, err := v.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("registered stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
status[k] = stat
}
}()
pretty, _ := isPretty(r)
var b []byte
if pretty {
b, err = json.MarshalIndent(status, "", " ")
} else {
b, err = json.Marshal(status)
}
if err != nil {
http.Error(w, fmt.Sprintf("JSON marshal: %s", err.Error()),
http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
http.Error(w, fmt.Sprintf("write: %s", err.Error()),
http.StatusInternalServerError)
return
}
}
// handleNodes returns status on the other voting nodes in the system.
// This attempts to contact all the nodes in the cluster, so may take
// some time to return.
func (s *Service) handleNodes(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
timeout, err := timeoutParam(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
includeNonVoters, err := nonVoters(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Get nodes in the cluster, and possibly filter out non-voters.
nodes, err := s.store.Nodes()
if err != nil {
http.Error(w, fmt.Sprintf("store nodes: %s", err.Error()),
http.StatusInternalServerError)
return
}
filteredNodes := make([]*store.Server, 0)
for _, n := range nodes {
if n.Suffrage != "Voter" && !includeNonVoters {
continue
}
filteredNodes = append(filteredNodes, n)
}
lAddr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
nodesResp, err := s.checkNodes(filteredNodes, timeout)
if err != nil {
http.Error(w, fmt.Sprintf("check nodes: %s", err.Error()),
http.StatusInternalServerError)
return
}
resp := make(map[string]struct {
APIAddr string `json:"api_addr,omitempty"`
Addr string `json:"addr,omitempty"`
Reachable bool `json:"reachable"`
Leader bool `json:"leader"`
Time float64 `json:"time,omitempty"`
Error string `json:"error,omitempty"`
})
for _, n := range filteredNodes {
nn := resp[n.ID]
nn.Addr = n.Addr
nn.Leader = nn.Addr == lAddr
nn.APIAddr = nodesResp[n.ID].apiAddr
nn.Reachable = nodesResp[n.ID].reachable
nn.Time = nodesResp[n.ID].time.Seconds()
nn.Error = nodesResp[n.ID].error
resp[n.ID] = nn
}
pretty, _ := isPretty(r)
var b []byte
if pretty {
b, err = json.MarshalIndent(resp, "", " ")
} else {
b, err = json.Marshal(resp)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleReadyz returns whether the node is ready.
func (s *Service) handleReadyz(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermReady) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
timeout, err := timeoutParam(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
lAddr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
if lAddr != "" {
if _, err := s.cluster.GetNodeAPIAddr(lAddr, timeout); err == nil {
w.WriteHeader(http.StatusOK)
w.Write([]byte("[+]leader ok"))
return
}
}
w.WriteHeader(http.StatusServiceUnavailable)
}
// handleExecute handles queries that modify the database.
func (s *Service) handleExecute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermExecute) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timeout, isTx, timings, redirect, err := reqParams(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
r.Body.Close()
stmts, err := ParseRequest(b)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
er := &command.ExecuteRequest{
Request: &command.Request{
Transaction: isTx,
Statements: stmts,
},
Timings: timings,
}
results, resultsErr := s.store.Execute(er)
if resultsErr != nil && resultsErr == store.ErrNotLeader {
if redirect {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
loc := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, loc, http.StatusMovedPermanently)
return
}
addr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
if addr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
results, resultsErr = s.cluster.Execute(er, addr, timeout)
stats.Add(numRemoteExecutions, 1)
w.Header().Add(ServedByHTTPHeader, addr)
}
if resultsErr != nil {
resp.Error = resultsErr.Error()
} else {
resp.Results.ExecuteResult = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleQuery handles queries that do not modify the database.
func (s *Service) handleQuery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermQuery) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" && r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timeout, isTx, timings, redirect, err := reqParams(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
lvl, err := level(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
frsh, err := freshness(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Get the query statement(s), and do tx if necessary.
queries, err := requestQueries(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
qr := &command.QueryRequest{
Request: &command.Request{
Transaction: isTx,
Statements: queries,
},
Timings: timings,
Level: lvl,
Freshness: frsh.Nanoseconds(),
}
results, resultsErr := s.store.Query(qr)
if resultsErr != nil && resultsErr == store.ErrNotLeader {
if redirect {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
loc := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, loc, http.StatusMovedPermanently)
return
}
addr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if addr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
results, resultsErr = s.cluster.Query(qr, addr, timeout)
stats.Add(numRemoteQueries, 1)
w.Header().Add(ServedByHTTPHeader, addr)
}
if resultsErr != nil {
resp.Error = resultsErr.Error()
} else {
resp.Results.QueryRows = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleExpvar serves registered expvar information over HTTP.
func (s *Service) handleExpvar(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
// handlePprof serves pprof information over HTTP.
func (s *Service) handlePprof(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
switch r.URL.Path {
case "/debug/pprof/cmdline":
pprof.Cmdline(w, r)
case "/debug/pprof/profile":
pprof.Profile(w, r)
case "/debug/pprof/symbol":
pprof.Symbol(w, r)
default:
pprof.Index(w, r)
}
}
// Addr returns the address on which the Service is listening
func (s *Service) Addr() net.Addr {
return s.ln.Addr()
}
// FormRedirect returns the value for the "Location" header for a 301 response.
func (s *Service) FormRedirect(r *http.Request, url string) string {
rq := r.URL.RawQuery
if rq != "" {
rq = fmt.Sprintf("?%s", rq)
}
return fmt.Sprintf("%s%s%s", url, r.URL.Path, rq)
}
// CheckRequestPerm checks if the request is authenticated and authorized
// with the given Perm.
func (s *Service) CheckRequestPerm(r *http.Request, perm string) (b bool) {
defer func() {
if b {
stats.Add(numAuthOK, 1)
} else {
stats.Add(numAuthFail, 1)
}
}()
// No credential store? Auth is not even enabled.
if s.credentialStore == nil {
return true
}
// Is the required perm granted to all users, including anonymous users?
if s.credentialStore.HasAnyPerm(auth.AllUsers, perm, PermAll) {
return true
}
// At this point there needs to be BasicAuth information in the request.
username, password, ok := r.BasicAuth()
if !ok {
return false
}
// Are the BasicAuth creds good?
if !s.credentialStore.Check(username, password) {
return false
}
// Is the specified user authorized?
return s.credentialStore.HasAnyPerm(username, perm, PermAll)
}
// LeaderAPIAddr returns the API address of the leader, as known by this node.
func (s *Service) LeaderAPIAddr() string {
nodeAddr, err := s.store.LeaderAddr()
if err != nil {
return ""
}
apiAddr, err := s.cluster.GetNodeAPIAddr(nodeAddr, defaultTimeout)
if err != nil {
return ""
}
return apiAddr
}
type checkNodesResponse struct {
apiAddr string
reachable bool
time time.Duration
error string
}
// checkNodes returns a map of node ID to node responsivness, reachable
// being defined as node responds to a simple request over the network.
func (s *Service) checkNodes(nodes []*store.Server, timeout time.Duration) (map[string]*checkNodesResponse, error) {
var wg sync.WaitGroup
var mu sync.Mutex
resp := make(map[string]*checkNodesResponse)
for _, n := range nodes {
resp[n.ID] = &checkNodesResponse{}
}
// Now confirm.
for _, n := range nodes {
wg.Add(1)
go func(id, raftAddr string) {
defer wg.Done()
mu.Lock()
defer mu.Unlock()
start := time.Now()
apiAddr, err := s.cluster.GetNodeAPIAddr(raftAddr, timeout)
if err != nil {
resp[id].error = err.Error()
return
}
resp[id].reachable = true
resp[id].apiAddr = apiAddr
resp[id].time = time.Since(start)
}(n.ID, n.Addr)
}
wg.Wait()
return resp, nil
}
// addBuildVersion adds the build version to the HTTP response.
func (s *Service) addBuildVersion(w http.ResponseWriter) {
// Add version header to every response, if available.
version := "unknown"
if v, ok := s.BuildInfo["version"].(string); ok {
version = v
}
w.Header().Add(VersionHTTPHeader, version)
}
// writeResponse writes the given response to the given writer.
func (s *Service) writeResponse(w http.ResponseWriter, r *http.Request, j *Response) {
var b []byte
var err error
pretty, _ := isPretty(r)
timings, _ := isTimings(r)
if timings {
j.SetTime()
}
if pretty {
b, err = json.MarshalIndent(j, "", " ")
} else {
b, err = json.Marshal(j)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
s.logger.Println("writing response failed:", err.Error())
}
}
func requestQueries(r *http.Request) ([]*command.Statement, error) {
if r.Method == "GET" {
query, err := stmtParam(r)
if err != nil || query == "" {
return nil, errors.New("bad query GET request")
}
return []*command.Statement{
{
Sql: query,
},
}, nil
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, errors.New("bad query POST request")
}
r.Body.Close()
return ParseRequest(b)
}
// createTLSConfig returns a TLS config from the given cert and key.
func createTLSConfig(certFile, keyFile, caCertFile string, tls1011 bool) (*tls.Config, error) {
var err error
var minTLS = uint16(tls.VersionTLS12)
if tls1011 {
minTLS = tls.VersionTLS10
}
config := &tls.Config{
NextProtos: []string{"h2", "http/1.1"},
MinVersion: minTLS,
}
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
if caCertFile != "" {
asn1Data, err := ioutil.ReadFile(caCertFile)
if err != nil {
return nil, err
}
config.RootCAs = x509.NewCertPool()
ok := config.RootCAs.AppendCertsFromPEM(asn1Data)
if !ok {
return nil, fmt.Errorf("failed to parse root certificate(s) in %q", caCertFile)
}
}
return config, nil
}
// queryParam returns whether the given query param is present.
func queryParam(req *http.Request, param string) (bool, error) {
err := req.ParseForm()
if err != nil {
return false, err
}
if _, ok := req.Form[param]; ok {
return true, nil
}
return false, nil
}
// stmtParam returns the value for URL param 'q', if present.
func stmtParam(req *http.Request) (string, error) {
q := req.URL.Query()
stmt := strings.TrimSpace(q.Get("q"))
return stmt, nil
}
// fmtParam returns the value for URL param 'fmt', if present.
func fmtParam(req *http.Request) (string, error) {
q := req.URL.Query()
return strings.TrimSpace(q.Get("fmt")), nil
}
// isPretty returns whether the HTTP response body should be pretty-printed.
func isPretty(req *http.Request) (bool, error) {
return queryParam(req, "pretty")
}
// isRedirect returns whether the HTTP request is requesting a explicit
// redirect to the leader, if necessary.
func isRedirect(req *http.Request) (bool, error) {
return queryParam(req, "redirect")
}
// timeoutParam returns the value, if any, set for timeout. If not set, it
// returns the value passed in as a default.
func timeoutParam(req *http.Request, def time.Duration) (time.Duration, error) {
q := req.URL.Query()
timeout := strings.TrimSpace(q.Get("timeout"))
if timeout == "" {
return def, nil
}
t, err := time.ParseDuration(timeout)
if err != nil {
return 0, err
}
return t, nil
}
// isTx returns whether the HTTP request is requesting a transaction.
func isTx(req *http.Request) (bool, error) {
return queryParam(req, "transaction")
}
// reqParams is a convenience function to get a bunch of query params
// in one function call.
func reqParams(req *http.Request, def time.Duration) (timeout time.Duration, tx, timings, redirect bool, err error) {
timeout, err = timeoutParam(req, def)
if err != nil {
return 0, false, false, false, err
}
tx, err = isTx(req)
if err != nil {
return 0, false, false, false, err
}
timings, err = isTimings(req)
if err != nil {
return 0, false, false, false, err
}
redirect, err = isRedirect(req)
if err != nil {
return 0, false, false, false, err
}
return timeout, tx, timings, redirect, nil
}
// noLeader returns whether processing should skip the leader check.
func noLeader(req *http.Request) (bool, error) {
return queryParam(req, "noleader")
}
// nonVoters returns whether a query is requesting to include non-voter results
func nonVoters(req *http.Request) (bool, error) {
return queryParam(req, "nonvoters")
}
// isTimings returns whether timings are requested.
func isTimings(req *http.Request) (bool, error) {
return queryParam(req, "timings")
}
// level returns the requested consistency level for a query
func level(req *http.Request) (command.QueryRequest_Level, error) {
q := req.URL.Query()
lvl := strings.TrimSpace(q.Get("level"))
switch strings.ToLower(lvl) {
case "none":
return command.QueryRequest_QUERY_REQUEST_LEVEL_NONE, nil
case "weak":
return command.QueryRequest_QUERY_REQUEST_LEVEL_WEAK, nil
case "strong":
return command.QueryRequest_QUERY_REQUEST_LEVEL_STRONG, nil
default:
return command.QueryRequest_QUERY_REQUEST_LEVEL_WEAK, nil
}
}
// freshness returns any freshness requested with a query.
func freshness(req *http.Request) (time.Duration, error) {
q := req.URL.Query()
f := strings.TrimSpace(q.Get("freshness"))
if f == "" {
return 0, nil
}
d, err := time.ParseDuration(f)
if err != nil {
return 0, err
}
return d, nil
}
// backupFormat returns the request backup format, setting the response header
// accordingly.
func backupFormat(w http.ResponseWriter, r *http.Request) (store.BackupFormat, error) {
fmt, err := fmtParam(r)
if err != nil {
return store.BackupBinary, err
}
if fmt == "sql" {
w.Header().Set("Content-Type", "application/sql")
return store.BackupSQL, nil
}
w.Header().Set("Content-Type", "application/octet-stream")
return store.BackupBinary, nil
}
func prettyEnabled(e bool) string {
if e {
return "enabled"
}
return "disabled"
}
// NormalizeAddr ensures that the given URL has a HTTP protocol prefix.
// If none is supplied, it prefixes the URL with "http://".
func NormalizeAddr(addr string) string {
if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") {
return fmt.Sprintf("http://%s", addr)
}
return addr
}
// EnsureHTTPS modifies the given URL, ensuring it is using the HTTPS protocol.
func EnsureHTTPS(addr string) string {
if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") {
return fmt.Sprintf("https://%s", addr)
}
return strings.Replace(addr, "http://", "https://", 1)
}
// CheckHTTPS returns true if the given URL uses HTTPS.
func CheckHTTPS(addr string) bool {
return strings.HasPrefix(addr, "https://")
}
// queryRequestFromStrings converts a slice of strings into a command.QueryRequest
func executeRequestFromStrings(s []string, timings, tx bool) *command.ExecuteRequest {
stmts := make([]*command.Statement, len(s))
for i := range s {
stmts[i] = &command.Statement{
Sql: s[i],
}
}
return &command.ExecuteRequest{
Request: &command.Request{
Statements: stmts,
Transaction: tx,
},
Timings: timings,
}
}
// queryRequestFromStrings converts a slice of strings into a command.QueryRequest
func queryRequestFromStrings(s []string, timings, tx bool) *command.QueryRequest {
stmts := make([]*command.Statement, len(s))
for i := range s {
stmts[i] = &command.Statement{
Sql: s[i],
}
}
return &command.QueryRequest{
Request: &command.Request{
Statements: stmts,
Transaction: tx,
},
Timings: timings,
}
}
Return a clearer response if no DB results
This shouldn't happen, but it might, this will allow the underlying
error to be returned to the caller. The body of the response will now
look something like:
{
"results": [],
"error": "some error",
"time": 0.021976516
}
// Package http provides the HTTP server for accessing the distributed database.
// It also provides the endpoint for other nodes to join an existing cluster.
package http
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/pprof"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/rqlite/rqlite/auth"
"github.com/rqlite/rqlite/command"
"github.com/rqlite/rqlite/command/encoding"
"github.com/rqlite/rqlite/store"
)
var (
// ErrLeaderNotFound is returned when a node cannot locate a leader
ErrLeaderNotFound = errors.New("leader not found")
)
// Database is the interface any queryable system must implement
type Database interface {
// Execute executes a slice of queries, each of which is not expected
// to return rows. If timings is true, then timing information will
// be return. If tx is true, then either all queries will be executed
// successfully or it will as though none executed.
Execute(er *command.ExecuteRequest) ([]*command.ExecuteResult, error)
// Query executes a slice of queries, each of which returns rows. If
// timings is true, then timing information will be returned. If tx
// is true, then all queries will take place while a read transaction
// is held on the database.
Query(qr *command.QueryRequest) ([]*command.QueryRows, error)
}
// Store is the interface the Raft-based database must implement.
type Store interface {
Database
// Join joins the node with the given ID, reachable at addr, to this node.
Join(id, addr string, voter bool) error
// Remove removes the node, specified by id, from the cluster.
Remove(id string) error
// LeaderAddr returns the Raft address of the leader of the cluster.
LeaderAddr() (string, error)
// Stats returns stats on the Store.
Stats() (map[string]interface{}, error)
// Nodes returns the slice of store.Servers in the cluster
Nodes() ([]*store.Server, error)
// Backup wites backup of the node state to dst
Backup(leader bool, f store.BackupFormat, dst io.Writer) error
}
// Cluster is the interface node API services must provide
type Cluster interface {
// GetNodeAPIAddr returns the HTTP API URL for the node at the given Raft address.
GetNodeAPIAddr(nodeAddr string, timeout time.Duration) (string, error)
// Execute performs an Execute Request on a remote node.
Execute(er *command.ExecuteRequest, nodeAddr string, timeout time.Duration) ([]*command.ExecuteResult, error)
// Query performs an Query Request on a remote node.
Query(qr *command.QueryRequest, nodeAddr string, timeout time.Duration) ([]*command.QueryRows, error)
// Stats returns stats on the Cluster.
Stats() (map[string]interface{}, error)
}
// CredentialStore is the interface credential stores must support.
type CredentialStore interface {
// Check returns whether username and password are a valid combination.
Check(username, password string) bool
// HasPerm returns whether username has the given perm.
HasPerm(username string, perm string) bool
// HasAnyPerm returns whether username has any of the given perms.
HasAnyPerm(username string, perm ...string) bool
}
// StatusReporter is the interface status providers must implement.
type StatusReporter interface {
Stats() (map[string]interface{}, error)
}
// DBResults stores either an Execute result or a Query result
type DBResults struct {
ExecuteResult []*command.ExecuteResult
QueryRows []*command.QueryRows
}
// MarshalJSON implements the JSON Marshaler interface.
func (d *DBResults) MarshalJSON() ([]byte, error) {
if d.ExecuteResult != nil {
return encoding.JSONMarshal(d.ExecuteResult)
} else if d.QueryRows != nil {
return encoding.JSONMarshal(d.QueryRows)
}
return json.Marshal(make([]interface{}, 0)) // Any empty list.
}
// Response represents a response from the HTTP service.
type Response struct {
Results *DBResults `json:"results,omitempty"`
Error string `json:"error,omitempty"`
Time float64 `json:"time,omitempty"`
start time.Time
end time.Time
}
// stats captures stats for the HTTP service.
var stats *expvar.Map
const (
numLeaderNotFound = "leader_not_found"
numExecutions = "executions"
numQueries = "queries"
numRemoteExecutions = "remote_executions"
numRemoteQueries = "remote_queries"
numBackups = "backups"
numLoad = "loads"
numJoins = "joins"
numAuthOK = "authOK"
numAuthFail = "authFail"
// Default timeout for cluster communications.
defaultTimeout = 30 * time.Second
// PermAll means all actions permitted.
PermAll = "all"
// PermJoin means user is permitted to join cluster.
PermJoin = "join"
// PermRemove means user is permitted to remove a node.
PermRemove = "remove"
// PermExecute means user can access execute endpoint.
PermExecute = "execute"
// PermQuery means user can access query endpoint
PermQuery = "query"
// PermStatus means user can retrieve node status.
PermStatus = "status"
// PermReady means user can retrieve ready status.
PermReady = "ready"
// PermBackup means user can backup node.
PermBackup = "backup"
// PermLoad means user can load a SQLite dump into a node.
PermLoad = "load"
// VersionHTTPHeader is the HTTP header key for the version.
VersionHTTPHeader = "X-RQLITE-VERSION"
// ServedByHTTPHeader is the HTTP header used to report which
// node (by node Raft address) actually served the request if
// it wasn't served by this node.
ServedByHTTPHeader = "X-RQLITE-SERVED-BY"
)
func init() {
stats = expvar.NewMap("http")
stats.Add(numLeaderNotFound, 0)
stats.Add(numExecutions, 0)
stats.Add(numQueries, 0)
stats.Add(numRemoteExecutions, 0)
stats.Add(numRemoteQueries, 0)
stats.Add(numBackups, 0)
stats.Add(numLoad, 0)
stats.Add(numJoins, 0)
stats.Add(numAuthOK, 0)
stats.Add(numAuthFail, 0)
}
// SetTime sets the Time attribute of the response. This way it will be present
// in the serialized JSON version.
func (r *Response) SetTime() {
r.Time = r.end.Sub(r.start).Seconds()
}
// NewResponse returns a new instance of response.
func NewResponse() *Response {
return &Response{
Results: &DBResults{},
start: time.Now(),
}
}
// Service provides HTTP service.
type Service struct {
addr string // Bind address of the HTTP service.
ln net.Listener // Service listener
store Store // The Raft-backed database store.
cluster Cluster // The Cluster service.
start time.Time // Start up time.
lastBackup time.Time // Time of last successful backup.
statusMu sync.RWMutex
statuses map[string]StatusReporter
CACertFile string // Path to root X.509 certificate.
CertFile string // Path to SSL certificate.
KeyFile string // Path to SSL private key.
TLS1011 bool // Whether older, deprecated TLS should be supported.
credentialStore CredentialStore
Expvar bool
Pprof bool
BuildInfo map[string]interface{}
logger *log.Logger
}
// New returns an uninitialized HTTP service. If credentials is nil, then
// the service performs no authentication and authorization checks.
func New(addr string, store Store, cluster Cluster, credentials CredentialStore) *Service {
return &Service{
addr: addr,
store: store,
cluster: cluster,
start: time.Now(),
statuses: make(map[string]StatusReporter),
credentialStore: credentials,
logger: log.New(os.Stderr, "[http] ", log.LstdFlags),
}
}
// Start starts the service.
func (s *Service) Start() error {
server := http.Server{
Handler: s,
}
var ln net.Listener
var err error
if s.CertFile == "" || s.KeyFile == "" {
ln, err = net.Listen("tcp", s.addr)
if err != nil {
return err
}
} else {
config, err := createTLSConfig(s.CertFile, s.KeyFile, s.CACertFile, s.TLS1011)
if err != nil {
return err
}
ln, err = tls.Listen("tcp", s.addr, config)
if err != nil {
return err
}
s.logger.Printf("secure HTTPS server enabled with cert %s, key %s", s.CertFile, s.KeyFile)
}
s.ln = ln
go func() {
err := server.Serve(s.ln)
if err != nil {
s.logger.Println("HTTP service Serve() returned:", err.Error())
}
}()
s.logger.Println("service listening on", s.Addr())
return nil
}
// Close closes the service.
func (s *Service) Close() {
s.ln.Close()
return
}
// HTTPS returns whether this service is using HTTPS.
func (s *Service) HTTPS() bool {
return s.CertFile != "" && s.KeyFile != ""
}
// ServeHTTP allows Service to serve HTTP requests.
func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.addBuildVersion(w)
switch {
case r.URL.Path == "/" || r.URL.Path == "":
http.Redirect(w, r, "/status", http.StatusFound)
case strings.HasPrefix(r.URL.Path, "/db/execute"):
stats.Add(numExecutions, 1)
s.handleExecute(w, r)
case strings.HasPrefix(r.URL.Path, "/db/query"):
stats.Add(numQueries, 1)
s.handleQuery(w, r)
case strings.HasPrefix(r.URL.Path, "/db/backup"):
stats.Add(numBackups, 1)
s.handleBackup(w, r)
case strings.HasPrefix(r.URL.Path, "/db/load"):
stats.Add(numLoad, 1)
s.handleLoad(w, r)
case strings.HasPrefix(r.URL.Path, "/join"):
stats.Add(numJoins, 1)
s.handleJoin(w, r)
case strings.HasPrefix(r.URL.Path, "/remove"):
s.handleRemove(w, r)
case strings.HasPrefix(r.URL.Path, "/status"):
s.handleStatus(w, r)
case strings.HasPrefix(r.URL.Path, "/nodes"):
s.handleNodes(w, r)
case strings.HasPrefix(r.URL.Path, "/readyz"):
s.handleReadyz(w, r)
case r.URL.Path == "/debug/vars" && s.Expvar:
s.handleExpvar(w, r)
case strings.HasPrefix(r.URL.Path, "/debug/pprof") && s.Pprof:
s.handlePprof(w, r)
default:
w.WriteHeader(http.StatusNotFound)
}
}
// RegisterStatus allows other modules to register status for serving over HTTP.
func (s *Service) RegisterStatus(key string, stat StatusReporter) error {
s.statusMu.Lock()
defer s.statusMu.Unlock()
if _, ok := s.statuses[key]; ok {
return fmt.Errorf("status already registered with key %s", key)
}
s.statuses[key] = stat
return nil
}
// handleJoin handles cluster-join requests from other nodes.
func (s *Service) handleJoin(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermJoin) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
md := map[string]interface{}{}
if err := json.Unmarshal(b, &md); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteID, ok := md["id"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteAddr, ok := md["addr"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
voter, ok := md["voter"]
if !ok {
voter = true
}
if err := s.store.Join(remoteID.(string), remoteAddr.(string), voter.(bool)); err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleRemove handles cluster-remove requests.
func (s *Service) handleRemove(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermRemove) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "DELETE" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
m := map[string]string{}
if err := json.Unmarshal(b, &m); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if len(m) != 1 {
w.WriteHeader(http.StatusBadRequest)
return
}
remoteID, ok := m["id"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
if err := s.store.Remove(remoteID); err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleBackup returns the consistent database snapshot.
func (s *Service) handleBackup(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermBackup) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
noLeader, err := noLeader(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
bf, err := backupFormat(w, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.store.Backup(!noLeader, bf, w)
if err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
s.lastBackup = time.Now()
}
// handleLoad loads the state contained in a .dump output. This API is different
// from others in that it expects a raw file, not wrapped in any kind of JSON.
func (s *Service) handleLoad(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermLoad) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timings, err := isTimings(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
r.Body.Close()
// No JSON structure expected for this API.
queries := []string{string(b)}
er := executeRequestFromStrings(queries, timings, false)
results, err := s.store.Execute(er)
if err != nil {
if err == store.ErrNotLeader {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
redirect := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, redirect, http.StatusMovedPermanently)
return
}
resp.Error = err.Error()
} else {
resp.Results.ExecuteResult = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleStatus returns status on the system.
func (s *Service) handleStatus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
storeStatus, err := s.store.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("store stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
clusterStatus, err := s.cluster.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("cluster stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
rt := map[string]interface{}{
"GOARCH": runtime.GOARCH,
"GOOS": runtime.GOOS,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
"num_cpu": runtime.NumCPU(),
"num_goroutine": runtime.NumGoroutine(),
"version": runtime.Version(),
}
oss := map[string]interface{}{
"pid": os.Getpid(),
"ppid": os.Getppid(),
"page_size": os.Getpagesize(),
}
executable, err := os.Executable()
if err == nil {
oss["executable"] = executable
}
hostname, err := os.Hostname()
if err == nil {
oss["hostname"] = hostname
}
httpStatus := map[string]interface{}{
"bind_addr": s.Addr().String(),
"auth": prettyEnabled(s.credentialStore != nil),
"cluster": clusterStatus,
}
nodeStatus := map[string]interface{}{
"start_time": s.start,
"uptime": time.Since(s.start).String(),
}
// Build the status response.
status := map[string]interface{}{
"os": oss,
"runtime": rt,
"store": storeStatus,
"http": httpStatus,
"node": nodeStatus,
}
if !s.lastBackup.IsZero() {
status["last_backup_time"] = s.lastBackup
}
if s.BuildInfo != nil {
status["build"] = s.BuildInfo
}
// Add any registered StatusReporters.
func() {
s.statusMu.RLock()
defer s.statusMu.RUnlock()
for k, v := range s.statuses {
stat, err := v.Stats()
if err != nil {
http.Error(w, fmt.Sprintf("registered stats: %s", err.Error()),
http.StatusInternalServerError)
return
}
status[k] = stat
}
}()
pretty, _ := isPretty(r)
var b []byte
if pretty {
b, err = json.MarshalIndent(status, "", " ")
} else {
b, err = json.Marshal(status)
}
if err != nil {
http.Error(w, fmt.Sprintf("JSON marshal: %s", err.Error()),
http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
http.Error(w, fmt.Sprintf("write: %s", err.Error()),
http.StatusInternalServerError)
return
}
}
// handleNodes returns status on the other voting nodes in the system.
// This attempts to contact all the nodes in the cluster, so may take
// some time to return.
func (s *Service) handleNodes(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
timeout, err := timeoutParam(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
includeNonVoters, err := nonVoters(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Get nodes in the cluster, and possibly filter out non-voters.
nodes, err := s.store.Nodes()
if err != nil {
http.Error(w, fmt.Sprintf("store nodes: %s", err.Error()),
http.StatusInternalServerError)
return
}
filteredNodes := make([]*store.Server, 0)
for _, n := range nodes {
if n.Suffrage != "Voter" && !includeNonVoters {
continue
}
filteredNodes = append(filteredNodes, n)
}
lAddr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
nodesResp, err := s.checkNodes(filteredNodes, timeout)
if err != nil {
http.Error(w, fmt.Sprintf("check nodes: %s", err.Error()),
http.StatusInternalServerError)
return
}
resp := make(map[string]struct {
APIAddr string `json:"api_addr,omitempty"`
Addr string `json:"addr,omitempty"`
Reachable bool `json:"reachable"`
Leader bool `json:"leader"`
Time float64 `json:"time,omitempty"`
Error string `json:"error,omitempty"`
})
for _, n := range filteredNodes {
nn := resp[n.ID]
nn.Addr = n.Addr
nn.Leader = nn.Addr == lAddr
nn.APIAddr = nodesResp[n.ID].apiAddr
nn.Reachable = nodesResp[n.ID].reachable
nn.Time = nodesResp[n.ID].time.Seconds()
nn.Error = nodesResp[n.ID].error
resp[n.ID] = nn
}
pretty, _ := isPretty(r)
var b []byte
if pretty {
b, err = json.MarshalIndent(resp, "", " ")
} else {
b, err = json.Marshal(resp)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// handleReadyz returns whether the node is ready.
func (s *Service) handleReadyz(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermReady) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
timeout, err := timeoutParam(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
lAddr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
if lAddr != "" {
if _, err := s.cluster.GetNodeAPIAddr(lAddr, timeout); err == nil {
w.WriteHeader(http.StatusOK)
w.Write([]byte("[+]leader ok"))
return
}
}
w.WriteHeader(http.StatusServiceUnavailable)
}
// handleExecute handles queries that modify the database.
func (s *Service) handleExecute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermExecute) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timeout, isTx, timings, redirect, err := reqParams(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
r.Body.Close()
stmts, err := ParseRequest(b)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
er := &command.ExecuteRequest{
Request: &command.Request{
Transaction: isTx,
Statements: stmts,
},
Timings: timings,
}
results, resultsErr := s.store.Execute(er)
if resultsErr != nil && resultsErr == store.ErrNotLeader {
if redirect {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
loc := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, loc, http.StatusMovedPermanently)
return
}
addr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, fmt.Sprintf("leader address: %s", err.Error()),
http.StatusInternalServerError)
return
}
if addr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
results, resultsErr = s.cluster.Execute(er, addr, timeout)
stats.Add(numRemoteExecutions, 1)
w.Header().Add(ServedByHTTPHeader, addr)
}
if resultsErr != nil {
resp.Error = resultsErr.Error()
} else {
resp.Results.ExecuteResult = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleQuery handles queries that do not modify the database.
func (s *Service) handleQuery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermQuery) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Method != "GET" && r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
resp := NewResponse()
timeout, isTx, timings, redirect, err := reqParams(r, defaultTimeout)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
lvl, err := level(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
frsh, err := freshness(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Get the query statement(s), and do tx if necessary.
queries, err := requestQueries(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
qr := &command.QueryRequest{
Request: &command.Request{
Transaction: isTx,
Statements: queries,
},
Timings: timings,
Level: lvl,
Freshness: frsh.Nanoseconds(),
}
results, resultsErr := s.store.Query(qr)
if resultsErr != nil && resultsErr == store.ErrNotLeader {
if redirect {
leaderAPIAddr := s.LeaderAPIAddr()
if leaderAPIAddr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
loc := s.FormRedirect(r, leaderAPIAddr)
http.Redirect(w, r, loc, http.StatusMovedPermanently)
return
}
addr, err := s.store.LeaderAddr()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if addr == "" {
stats.Add(numLeaderNotFound, 1)
http.Error(w, ErrLeaderNotFound.Error(), http.StatusServiceUnavailable)
return
}
results, resultsErr = s.cluster.Query(qr, addr, timeout)
stats.Add(numRemoteQueries, 1)
w.Header().Add(ServedByHTTPHeader, addr)
}
if resultsErr != nil {
resp.Error = resultsErr.Error()
} else {
resp.Results.QueryRows = results
}
resp.end = time.Now()
s.writeResponse(w, r, resp)
}
// handleExpvar serves registered expvar information over HTTP.
func (s *Service) handleExpvar(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
// handlePprof serves pprof information over HTTP.
func (s *Service) handlePprof(w http.ResponseWriter, r *http.Request) {
if !s.CheckRequestPerm(r, PermStatus) {
w.WriteHeader(http.StatusUnauthorized)
return
}
switch r.URL.Path {
case "/debug/pprof/cmdline":
pprof.Cmdline(w, r)
case "/debug/pprof/profile":
pprof.Profile(w, r)
case "/debug/pprof/symbol":
pprof.Symbol(w, r)
default:
pprof.Index(w, r)
}
}
// Addr returns the address on which the Service is listening
func (s *Service) Addr() net.Addr {
return s.ln.Addr()
}
// FormRedirect returns the value for the "Location" header for a 301 response.
func (s *Service) FormRedirect(r *http.Request, url string) string {
rq := r.URL.RawQuery
if rq != "" {
rq = fmt.Sprintf("?%s", rq)
}
return fmt.Sprintf("%s%s%s", url, r.URL.Path, rq)
}
// CheckRequestPerm checks if the request is authenticated and authorized
// with the given Perm.
func (s *Service) CheckRequestPerm(r *http.Request, perm string) (b bool) {
defer func() {
if b {
stats.Add(numAuthOK, 1)
} else {
stats.Add(numAuthFail, 1)
}
}()
// No credential store? Auth is not even enabled.
if s.credentialStore == nil {
return true
}
// Is the required perm granted to all users, including anonymous users?
if s.credentialStore.HasAnyPerm(auth.AllUsers, perm, PermAll) {
return true
}
// At this point there needs to be BasicAuth information in the request.
username, password, ok := r.BasicAuth()
if !ok {
return false
}
// Are the BasicAuth creds good?
if !s.credentialStore.Check(username, password) {
return false
}
// Is the specified user authorized?
return s.credentialStore.HasAnyPerm(username, perm, PermAll)
}
// LeaderAPIAddr returns the API address of the leader, as known by this node.
func (s *Service) LeaderAPIAddr() string {
nodeAddr, err := s.store.LeaderAddr()
if err != nil {
return ""
}
apiAddr, err := s.cluster.GetNodeAPIAddr(nodeAddr, defaultTimeout)
if err != nil {
return ""
}
return apiAddr
}
type checkNodesResponse struct {
apiAddr string
reachable bool
time time.Duration
error string
}
// checkNodes returns a map of node ID to node responsivness, reachable
// being defined as node responds to a simple request over the network.
func (s *Service) checkNodes(nodes []*store.Server, timeout time.Duration) (map[string]*checkNodesResponse, error) {
var wg sync.WaitGroup
var mu sync.Mutex
resp := make(map[string]*checkNodesResponse)
for _, n := range nodes {
resp[n.ID] = &checkNodesResponse{}
}
// Now confirm.
for _, n := range nodes {
wg.Add(1)
go func(id, raftAddr string) {
defer wg.Done()
mu.Lock()
defer mu.Unlock()
start := time.Now()
apiAddr, err := s.cluster.GetNodeAPIAddr(raftAddr, timeout)
if err != nil {
resp[id].error = err.Error()
return
}
resp[id].reachable = true
resp[id].apiAddr = apiAddr
resp[id].time = time.Since(start)
}(n.ID, n.Addr)
}
wg.Wait()
return resp, nil
}
// addBuildVersion adds the build version to the HTTP response.
func (s *Service) addBuildVersion(w http.ResponseWriter) {
// Add version header to every response, if available.
version := "unknown"
if v, ok := s.BuildInfo["version"].(string); ok {
version = v
}
w.Header().Add(VersionHTTPHeader, version)
}
// writeResponse writes the given response to the given writer.
func (s *Service) writeResponse(w http.ResponseWriter, r *http.Request, j *Response) {
var b []byte
var err error
pretty, _ := isPretty(r)
timings, _ := isTimings(r)
if timings {
j.SetTime()
}
if pretty {
b, err = json.MarshalIndent(j, "", " ")
} else {
b, err = json.Marshal(j)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_, err = w.Write(b)
if err != nil {
s.logger.Println("writing response failed:", err.Error())
}
}
func requestQueries(r *http.Request) ([]*command.Statement, error) {
if r.Method == "GET" {
query, err := stmtParam(r)
if err != nil || query == "" {
return nil, errors.New("bad query GET request")
}
return []*command.Statement{
{
Sql: query,
},
}, nil
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, errors.New("bad query POST request")
}
r.Body.Close()
return ParseRequest(b)
}
// createTLSConfig returns a TLS config from the given cert and key.
func createTLSConfig(certFile, keyFile, caCertFile string, tls1011 bool) (*tls.Config, error) {
var err error
var minTLS = uint16(tls.VersionTLS12)
if tls1011 {
minTLS = tls.VersionTLS10
}
config := &tls.Config{
NextProtos: []string{"h2", "http/1.1"},
MinVersion: minTLS,
}
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
if caCertFile != "" {
asn1Data, err := ioutil.ReadFile(caCertFile)
if err != nil {
return nil, err
}
config.RootCAs = x509.NewCertPool()
ok := config.RootCAs.AppendCertsFromPEM(asn1Data)
if !ok {
return nil, fmt.Errorf("failed to parse root certificate(s) in %q", caCertFile)
}
}
return config, nil
}
// queryParam returns whether the given query param is present.
func queryParam(req *http.Request, param string) (bool, error) {
err := req.ParseForm()
if err != nil {
return false, err
}
if _, ok := req.Form[param]; ok {
return true, nil
}
return false, nil
}
// stmtParam returns the value for URL param 'q', if present.
func stmtParam(req *http.Request) (string, error) {
q := req.URL.Query()
stmt := strings.TrimSpace(q.Get("q"))
return stmt, nil
}
// fmtParam returns the value for URL param 'fmt', if present.
func fmtParam(req *http.Request) (string, error) {
q := req.URL.Query()
return strings.TrimSpace(q.Get("fmt")), nil
}
// isPretty returns whether the HTTP response body should be pretty-printed.
func isPretty(req *http.Request) (bool, error) {
return queryParam(req, "pretty")
}
// isRedirect returns whether the HTTP request is requesting a explicit
// redirect to the leader, if necessary.
func isRedirect(req *http.Request) (bool, error) {
return queryParam(req, "redirect")
}
// timeoutParam returns the value, if any, set for timeout. If not set, it
// returns the value passed in as a default.
func timeoutParam(req *http.Request, def time.Duration) (time.Duration, error) {
q := req.URL.Query()
timeout := strings.TrimSpace(q.Get("timeout"))
if timeout == "" {
return def, nil
}
t, err := time.ParseDuration(timeout)
if err != nil {
return 0, err
}
return t, nil
}
// isTx returns whether the HTTP request is requesting a transaction.
func isTx(req *http.Request) (bool, error) {
return queryParam(req, "transaction")
}
// reqParams is a convenience function to get a bunch of query params
// in one function call.
func reqParams(req *http.Request, def time.Duration) (timeout time.Duration, tx, timings, redirect bool, err error) {
timeout, err = timeoutParam(req, def)
if err != nil {
return 0, false, false, false, err
}
tx, err = isTx(req)
if err != nil {
return 0, false, false, false, err
}
timings, err = isTimings(req)
if err != nil {
return 0, false, false, false, err
}
redirect, err = isRedirect(req)
if err != nil {
return 0, false, false, false, err
}
return timeout, tx, timings, redirect, nil
}
// noLeader returns whether processing should skip the leader check.
func noLeader(req *http.Request) (bool, error) {
return queryParam(req, "noleader")
}
// nonVoters returns whether a query is requesting to include non-voter results
func nonVoters(req *http.Request) (bool, error) {
return queryParam(req, "nonvoters")
}
// isTimings returns whether timings are requested.
func isTimings(req *http.Request) (bool, error) {
return queryParam(req, "timings")
}
// level returns the requested consistency level for a query
func level(req *http.Request) (command.QueryRequest_Level, error) {
q := req.URL.Query()
lvl := strings.TrimSpace(q.Get("level"))
switch strings.ToLower(lvl) {
case "none":
return command.QueryRequest_QUERY_REQUEST_LEVEL_NONE, nil
case "weak":
return command.QueryRequest_QUERY_REQUEST_LEVEL_WEAK, nil
case "strong":
return command.QueryRequest_QUERY_REQUEST_LEVEL_STRONG, nil
default:
return command.QueryRequest_QUERY_REQUEST_LEVEL_WEAK, nil
}
}
// freshness returns any freshness requested with a query.
func freshness(req *http.Request) (time.Duration, error) {
q := req.URL.Query()
f := strings.TrimSpace(q.Get("freshness"))
if f == "" {
return 0, nil
}
d, err := time.ParseDuration(f)
if err != nil {
return 0, err
}
return d, nil
}
// backupFormat returns the request backup format, setting the response header
// accordingly.
func backupFormat(w http.ResponseWriter, r *http.Request) (store.BackupFormat, error) {
fmt, err := fmtParam(r)
if err != nil {
return store.BackupBinary, err
}
if fmt == "sql" {
w.Header().Set("Content-Type", "application/sql")
return store.BackupSQL, nil
}
w.Header().Set("Content-Type", "application/octet-stream")
return store.BackupBinary, nil
}
func prettyEnabled(e bool) string {
if e {
return "enabled"
}
return "disabled"
}
// NormalizeAddr ensures that the given URL has a HTTP protocol prefix.
// If none is supplied, it prefixes the URL with "http://".
func NormalizeAddr(addr string) string {
if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") {
return fmt.Sprintf("http://%s", addr)
}
return addr
}
// EnsureHTTPS modifies the given URL, ensuring it is using the HTTPS protocol.
func EnsureHTTPS(addr string) string {
if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") {
return fmt.Sprintf("https://%s", addr)
}
return strings.Replace(addr, "http://", "https://", 1)
}
// CheckHTTPS returns true if the given URL uses HTTPS.
func CheckHTTPS(addr string) bool {
return strings.HasPrefix(addr, "https://")
}
// queryRequestFromStrings converts a slice of strings into a command.QueryRequest
func executeRequestFromStrings(s []string, timings, tx bool) *command.ExecuteRequest {
stmts := make([]*command.Statement, len(s))
for i := range s {
stmts[i] = &command.Statement{
Sql: s[i],
}
}
return &command.ExecuteRequest{
Request: &command.Request{
Statements: stmts,
Transaction: tx,
},
Timings: timings,
}
}
// queryRequestFromStrings converts a slice of strings into a command.QueryRequest
func queryRequestFromStrings(s []string, timings, tx bool) *command.QueryRequest {
stmts := make([]*command.Statement, len(s))
for i := range s {
stmts[i] = &command.Statement{
Sql: s[i],
}
}
return &command.QueryRequest{
Request: &command.Request{
Statements: stmts,
Transaction: tx,
},
Timings: timings,
}
}
|
package colly
import (
"errors"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"regexp"
"sync"
"time"
"github.com/gobwas/glob"
)
type httpBackend struct {
LimitRules []*LimitRule
Client *http.Client
lock *sync.Mutex
}
// LimitRule provides connection restrictions for domains.
// There can be two kind of limitations:
// - Parallelism: Set limit for the number of concurrent requests to a domain
// - Delay: Set rate limit for a domain (this means no parallelism on the matching domains)
type LimitRule struct {
// DomainRegexp is a regular expression to match against domains
DomainRegexp string
// DomainRegexp is a glob pattern to match against domains
DomainGlob string
// Delay is the duration to wait before creating a new request to the matching domains
Delay time.Duration
// Parallelism is the number of the maximum allowed concurrent requests of the matching domains
Parallelism int
waitChan chan bool
compiledRegexp *regexp.Regexp
compiledGlob glob.Glob
}
// Init initializes the private members of LimitRule
func (r *LimitRule) Init() error {
waitChanSize := 1
if r.Parallelism > 1 {
waitChanSize = r.Parallelism
}
r.waitChan = make(chan bool, waitChanSize)
hasPattern := false
if r.DomainRegexp != "" {
c, err := regexp.Compile(r.DomainRegexp)
if err != nil {
return err
}
r.compiledRegexp = c
hasPattern = true
}
if r.DomainGlob != "" {
c, err := glob.Compile(r.DomainGlob)
if err != nil {
return err
}
r.compiledGlob = c
hasPattern = true
}
if !hasPattern {
return errors.New("No pattern defined in LimitRule")
}
return nil
}
func (h *httpBackend) Init() {
h.LimitRules = make([]*LimitRule, 0, 8)
jar, _ := cookiejar.New(nil)
h.Client = &http.Client{
Jar: jar,
}
h.lock = &sync.Mutex{}
}
// Match checks that the domain parameter triggers the rule
func (r *LimitRule) Match(domain string) bool {
match := false
if r.compiledRegexp != nil && r.compiledRegexp.MatchString(domain) {
match = true
}
if r.compiledGlob != nil && r.compiledGlob.Match(domain) {
match = true
}
return match
}
func (h *httpBackend) GetMatchingRule(domain string) *LimitRule {
for _, r := range h.LimitRules {
if r.Match(domain) {
return r
}
}
return nil
}
func (h *httpBackend) Do(request *http.Request) (*Response, error) {
r := h.GetMatchingRule(request.URL.Host)
if r != nil {
r.waitChan <- true
}
res, err := h.Client.Do(request)
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
res.Body.Close()
if r != nil {
go func(r *LimitRule) {
time.Sleep(r.Delay)
<-r.waitChan
}(r)
}
return &Response{
StatusCode: res.StatusCode,
Body: body,
Headers: &res.Header,
}, nil
}
func (h *httpBackend) Limit(rule *LimitRule) error {
h.lock.Lock()
h.LimitRules = append(h.LimitRules, rule)
h.lock.Unlock()
return rule.Init()
}
func (h *httpBackend) Limits(rules []*LimitRule) error {
for _, r := range rules {
if err := h.Limit(r); err != nil {
return err
}
}
return nil
}
[fix] consistently decrement backend parallelism channel
package colly
import (
"errors"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"regexp"
"sync"
"time"
"github.com/gobwas/glob"
)
type httpBackend struct {
LimitRules []*LimitRule
Client *http.Client
lock *sync.Mutex
}
// LimitRule provides connection restrictions for domains.
// There can be two kind of limitations:
// - Parallelism: Set limit for the number of concurrent requests to a domain
// - Delay: Set rate limit for a domain (this means no parallelism on the matching domains)
type LimitRule struct {
// DomainRegexp is a regular expression to match against domains
DomainRegexp string
// DomainRegexp is a glob pattern to match against domains
DomainGlob string
// Delay is the duration to wait before creating a new request to the matching domains
Delay time.Duration
// Parallelism is the number of the maximum allowed concurrent requests of the matching domains
Parallelism int
waitChan chan bool
compiledRegexp *regexp.Regexp
compiledGlob glob.Glob
}
// Init initializes the private members of LimitRule
func (r *LimitRule) Init() error {
waitChanSize := 1
if r.Parallelism > 1 {
waitChanSize = r.Parallelism
}
r.waitChan = make(chan bool, waitChanSize)
hasPattern := false
if r.DomainRegexp != "" {
c, err := regexp.Compile(r.DomainRegexp)
if err != nil {
return err
}
r.compiledRegexp = c
hasPattern = true
}
if r.DomainGlob != "" {
c, err := glob.Compile(r.DomainGlob)
if err != nil {
return err
}
r.compiledGlob = c
hasPattern = true
}
if !hasPattern {
return errors.New("No pattern defined in LimitRule")
}
return nil
}
func (h *httpBackend) Init() {
h.LimitRules = make([]*LimitRule, 0, 8)
jar, _ := cookiejar.New(nil)
h.Client = &http.Client{
Jar: jar,
}
h.lock = &sync.Mutex{}
}
// Match checks that the domain parameter triggers the rule
func (r *LimitRule) Match(domain string) bool {
match := false
if r.compiledRegexp != nil && r.compiledRegexp.MatchString(domain) {
match = true
}
if r.compiledGlob != nil && r.compiledGlob.Match(domain) {
match = true
}
return match
}
func (h *httpBackend) GetMatchingRule(domain string) *LimitRule {
for _, r := range h.LimitRules {
if r.Match(domain) {
return r
}
}
return nil
}
func (h *httpBackend) Do(request *http.Request) (*Response, error) {
r := h.GetMatchingRule(request.URL.Host)
if r != nil {
r.waitChan <- true
defer func(r *LimitRule) {
time.Sleep(r.Delay)
<-r.waitChan
}(r)
}
res, err := h.Client.Do(request)
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
res.Body.Close()
return &Response{
StatusCode: res.StatusCode,
Body: body,
Headers: &res.Header,
}, nil
}
func (h *httpBackend) Limit(rule *LimitRule) error {
h.lock.Lock()
h.LimitRules = append(h.LimitRules, rule)
h.lock.Unlock()
return rule.Init()
}
func (h *httpBackend) Limits(rules []*LimitRule) error {
for _, r := range rules {
if err := h.Limit(r); err != nil {
return err
}
}
return nil
}
|
package spider
import "io"
type spinFunc func(*Context) error
type spiderFunc struct {
method string
url string
body io.Reader
fn spinFunc
}
func (s *spiderFunc) Setup(parent *Context) (*Context, error) {
return NewHTTPContext(s.method, s.url, s.body)
}
func (s *spiderFunc) Spin(ctx *Context) error { return s.fn(ctx) }
func NewHTTPSpider(method, url string, body io.Reader, fn spinFunc) *spiderFunc {
return &spiderFunc{
method: method,
url: url,
body: body,
fn: fn,
}
}
func NewGETSpider(url string, fn spinFunc) *spiderFunc {
return NewHTTPSpider("GET", url, nil, fn)
}
func NewPOSTSpider(url string, body io.Reader, fn spinFunc) *spiderFunc {
return NewHTTPSpider("POST", url, body, fn)
}
func NewPUTSpider(url string, body io.Reader, fn spinFunc) *spiderFunc {
return NewHTTPSpider("PUT", url, body, fn)
}
func NewDELETESpider(url string, fn spinFunc) *spiderFunc {
return NewHTTPSpider("DELETE", url, nil, fn)
}
Rename methods for creating spiders to be more clear and simple
package spider
import "io"
type spinFunc func(*Context) error
type spiderFunc struct {
method string
url string
body io.Reader
fn spinFunc
}
func (s *spiderFunc) Setup(parent *Context) (*Context, error) {
return NewHTTPContext(s.method, s.url, s.body)
}
func (s *spiderFunc) Spin(ctx *Context) error { return s.fn(ctx) }
func NewHTTPSpider(method, url string, body io.Reader, fn spinFunc) *spiderFunc {
return &spiderFunc{
method: method,
url: url,
body: body,
fn: fn,
}
}
func Get(url string, fn spinFunc) *spiderFunc {
return NewHTTPSpider("GET", url, nil, fn)
}
func Post(url string, body io.Reader, fn spinFunc) *spiderFunc {
return NewHTTPSpider("POST", url, body, fn)
}
func Put(url string, body io.Reader, fn spinFunc) *spiderFunc {
return NewHTTPSpider("PUT", url, body, fn)
}
func Delete(url string, fn spinFunc) *spiderFunc {
return NewHTTPSpider("DELETE", url, nil, fn)
}
|
// Copyright 2017 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"errors"
"fmt"
"html/template"
"io"
"mime"
"net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/gohugoio/hugo/common/maps"
"github.com/gohugoio/hugo/resource"
"github.com/gohugoio/hugo/langs"
src "github.com/gohugoio/hugo/source"
"golang.org/x/sync/errgroup"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/media"
"github.com/markbates/inflect"
"golang.org/x/net/context"
"github.com/fsnotify/fsnotify"
bp "github.com/gohugoio/hugo/bufferpool"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/hugolib/pagemeta"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/parser"
"github.com/gohugoio/hugo/related"
"github.com/gohugoio/hugo/source"
"github.com/gohugoio/hugo/tpl"
"github.com/gohugoio/hugo/transform"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/spf13/nitro"
"github.com/spf13/viper"
)
var _ = transform.AbsURL
// used to indicate if run as a test.
var testMode bool
var defaultTimer *nitro.B
// Site contains all the information relevant for constructing a static
// site. The basic flow of information is as follows:
//
// 1. A list of Files is parsed and then converted into Pages.
//
// 2. Pages contain sections (based on the file they were generated from),
// aliases and slugs (included in a pages frontmatter) which are the
// various targets that will get generated. There will be canonical
// listing. The canonical path can be overruled based on a pattern.
//
// 3. Taxonomies are created via configuration and will present some aspect of
// the final page and typically a perm url.
//
// 4. All Pages are passed through a template based on their desired
// layout based on numerous different elements.
//
// 5. The entire collection of files is written to disk.
type Site struct {
owner *HugoSites
*PageCollections
Taxonomies TaxonomyList
// Plural is what we get in the folder, so keep track of this mapping
// to get the singular form from that value.
taxonomiesPluralSingular map[string]string
// This is temporary, see https://github.com/gohugoio/hugo/issues/2835
// Maps "actors-gerard-depardieu" to "Gérard Depardieu" when preserveTaxonomyNames
// is set.
taxonomiesOrigKey map[string]string
Sections Taxonomy
Info SiteInfo
Menus Menus
timer *nitro.B
layoutHandler *output.LayoutHandler
draftCount int
futureCount int
expiredCount int
Data map[string]interface{}
Language *langs.Language
disabledKinds map[string]bool
// Output formats defined in site config per Page Kind, or some defaults
// if not set.
// Output formats defined in Page front matter will override these.
outputFormats map[string]output.Formats
// All the output formats and media types available for this site.
// These values will be merged from the Hugo defaults, the site config and,
// finally, the language settings.
outputFormatsConfig output.Formats
mediaTypesConfig media.Types
// How to handle page front matter.
frontmatterHandler pagemeta.FrontMatterHandler
// We render each site for all the relevant output formats in serial with
// this rendering context pointing to the current one.
rc *siteRenderingContext
// The output formats that we need to render this site in. This slice
// will be fixed once set.
// This will be the union of Site.Pages' outputFormats.
// This slice will be sorted.
renderFormats output.Formats
// Logger etc.
*deps.Deps `json:"-"`
// The func used to title case titles.
titleFunc func(s string) string
relatedDocsHandler *relatedDocsHandler
}
type siteRenderingContext struct {
output.Format
}
func (s *Site) initRenderFormats() {
formatSet := make(map[string]bool)
formats := output.Formats{}
for _, p := range s.Pages {
for _, f := range p.outputFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
sort.Sort(formats)
s.renderFormats = formats
}
func (s *Site) isEnabled(kind string) bool {
if kind == kindUnknown {
panic("Unknown kind")
}
return !s.disabledKinds[kind]
}
// reset returns a new Site prepared for rebuild.
func (s *Site) reset() *Site {
return &Site{Deps: s.Deps,
layoutHandler: output.NewLayoutHandler(),
disabledKinds: s.disabledKinds,
titleFunc: s.titleFunc,
relatedDocsHandler: newSearchIndexHandler(s.relatedDocsHandler.cfg),
outputFormats: s.outputFormats,
rc: s.rc,
outputFormatsConfig: s.outputFormatsConfig,
frontmatterHandler: s.frontmatterHandler,
mediaTypesConfig: s.mediaTypesConfig,
Language: s.Language,
owner: s.owner,
PageCollections: newPageCollections()}
}
// newSite creates a new site with the given configuration.
func newSite(cfg deps.DepsCfg) (*Site, error) {
c := newPageCollections()
if cfg.Language == nil {
cfg.Language = langs.NewDefaultLanguage(cfg.Cfg)
}
disabledKinds := make(map[string]bool)
for _, disabled := range cast.ToStringSlice(cfg.Language.Get("disableKinds")) {
disabledKinds[disabled] = true
}
var (
mediaTypesConfig []map[string]interface{}
outputFormatsConfig []map[string]interface{}
siteOutputFormatsConfig output.Formats
siteMediaTypesConfig media.Types
err error
)
// Add language last, if set, so it gets precedence.
for _, cfg := range []config.Provider{cfg.Cfg, cfg.Language} {
if cfg.IsSet("mediaTypes") {
mediaTypesConfig = append(mediaTypesConfig, cfg.GetStringMap("mediaTypes"))
}
if cfg.IsSet("outputFormats") {
outputFormatsConfig = append(outputFormatsConfig, cfg.GetStringMap("outputFormats"))
}
}
siteMediaTypesConfig, err = media.DecodeTypes(mediaTypesConfig...)
if err != nil {
return nil, err
}
siteOutputFormatsConfig, err = output.DecodeFormats(siteMediaTypesConfig, outputFormatsConfig...)
if err != nil {
return nil, err
}
outputFormats, err := createSiteOutputFormats(siteOutputFormatsConfig, cfg.Language)
if err != nil {
return nil, err
}
var relatedContentConfig related.Config
if cfg.Language.IsSet("related") {
relatedContentConfig, err = related.DecodeConfig(cfg.Language.Get("related"))
if err != nil {
return nil, err
}
} else {
relatedContentConfig = related.DefaultConfig
taxonomies := cfg.Language.GetStringMapString("taxonomies")
if _, found := taxonomies["tag"]; found {
relatedContentConfig.Add(related.IndexConfig{Name: "tags", Weight: 80})
}
}
titleFunc := helpers.GetTitleFunc(cfg.Language.GetString("titleCaseStyle"))
frontMatterHandler, err := pagemeta.NewFrontmatterHandler(cfg.Logger, cfg.Cfg)
if err != nil {
return nil, err
}
s := &Site{
PageCollections: c,
layoutHandler: output.NewLayoutHandler(),
Language: cfg.Language,
disabledKinds: disabledKinds,
titleFunc: titleFunc,
relatedDocsHandler: newSearchIndexHandler(relatedContentConfig),
outputFormats: outputFormats,
rc: &siteRenderingContext{output.HTMLFormat},
outputFormatsConfig: siteOutputFormatsConfig,
mediaTypesConfig: siteMediaTypesConfig,
frontmatterHandler: frontMatterHandler,
}
s.Info = newSiteInfo(siteBuilderCfg{s: s, pageCollections: c, language: s.Language})
return s, nil
}
// NewSite creates a new site with the given dependency configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSite(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err = applyDepsIfNeeded(cfg, s); err != nil {
return nil, err
}
return s, nil
}
// NewSiteDefaultLang creates a new site in the default language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewSiteDefaultLang(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewDefaultLanguage(v), withTemplate...)
}
// NewEnglishSite creates a new site in English language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewEnglishSite(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewLanguage("en", v), withTemplate...)
}
// newSiteForLang creates a new site in the given language.
func newSiteForLang(lang *langs.Language, withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
withTemplates := func(templ tpl.TemplateHandler) error {
for _, wt := range withTemplate {
if err := wt(templ); err != nil {
return err
}
}
return nil
}
cfg := deps.DepsCfg{WithTemplate: withTemplates, Language: lang, Cfg: lang}
return NewSiteForCfg(cfg)
}
// NewSiteForCfg creates a new site for the given configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSiteForCfg(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err := applyDepsIfNeeded(cfg, s); err != nil {
return nil, err
}
return s, nil
}
type SiteInfos []*SiteInfo
// First is a convenience method to get the first Site, i.e. the main language.
func (s SiteInfos) First() *SiteInfo {
if len(s) == 0 {
return nil
}
return s[0]
}
type SiteInfo struct {
Taxonomies TaxonomyList
Authors AuthorList
Social SiteSocial
*PageCollections
Menus *Menus
Hugo *HugoInfo
Title string
RSSLink string
Author map[string]interface{}
LanguageCode string
Copyright string
LastChange time.Time
Permalinks PermalinkOverrides
Params map[string]interface{}
BuildDrafts bool
canonifyURLs bool
relativeURLs bool
uglyURLs func(p *Page) bool
preserveTaxonomyNames bool
Data *map[string]interface{}
Config SiteConfig
owner *HugoSites
s *Site
multilingual *Multilingual
Language *langs.Language
LanguagePrefix string
Languages langs.Languages
defaultContentLanguageInSubdir bool
sectionPagesMenu string
}
func (s *SiteInfo) String() string {
return fmt.Sprintf("Site(%q)", s.Title)
}
func (s *SiteInfo) BaseURL() template.URL {
return template.URL(s.s.PathSpec.BaseURL.String())
}
// ServerPort returns the port part of the BaseURL, 0 if none found.
func (s *SiteInfo) ServerPort() int {
ps := s.s.PathSpec.BaseURL.URL().Port()
if ps == "" {
return 0
}
p, err := strconv.Atoi(ps)
if err != nil {
return 0
}
return p
}
// GoogleAnalytics is kept here for historic reasons.
func (s *SiteInfo) GoogleAnalytics() string {
return s.Config.Services.GoogleAnalytics.ID
}
// DisqusShortname is kept here for historic reasons.
func (s *SiteInfo) DisqusShortname() string {
return s.Config.Services.Disqus.Shortname
}
// Used in tests.
type siteBuilderCfg struct {
language *langs.Language
s *Site
pageCollections *PageCollections
}
// TODO(bep) get rid of this
func newSiteInfo(cfg siteBuilderCfg) SiteInfo {
return SiteInfo{
s: cfg.s,
multilingual: newMultiLingualForLanguage(cfg.language),
PageCollections: cfg.pageCollections,
Params: make(map[string]interface{}),
uglyURLs: func(p *Page) bool {
return false
},
}
}
// SiteSocial is a place to put social details on a site level. These are the
// standard keys that themes will expect to have available, but can be
// expanded to any others on a per site basis
// github
// facebook
// facebook_admin
// twitter
// twitter_domain
// googleplus
// pinterest
// instagram
// youtube
// linkedin
type SiteSocial map[string]string
// Param is a convenience method to do lookups in SiteInfo's Params map.
//
// This method is also implemented on Page and Node.
func (s *SiteInfo) Param(key interface{}) (interface{}, error) {
keyStr, err := cast.ToStringE(key)
if err != nil {
return nil, err
}
keyStr = strings.ToLower(keyStr)
return s.Params[keyStr], nil
}
func (s *SiteInfo) IsMultiLingual() bool {
return len(s.Languages) > 1
}
func (s *SiteInfo) IsServer() bool {
return s.owner.running
}
func (s *SiteInfo) refLink(ref string, page *Page, relative bool, outputFormat string) (string, error) {
var refURL *url.URL
var err error
ref = filepath.ToSlash(ref)
refURL, err = url.Parse(ref)
if err != nil {
return "", err
}
var target *Page
var link string
if refURL.Path != "" {
target, err := s.getPageNew(page, refURL.Path)
if err != nil {
return "", err
}
if target == nil {
return "", fmt.Errorf("No page found with path or logical name \"%s\".\n", refURL.Path)
}
var permalinker Permalinker = target
if outputFormat != "" {
o := target.OutputFormats().Get(outputFormat)
if o == nil {
return "", fmt.Errorf("Output format %q not found for page %q", outputFormat, refURL.Path)
}
permalinker = o
}
if relative {
link = permalinker.RelPermalink()
} else {
link = permalinker.Permalink()
}
}
if refURL.Fragment != "" {
link = link + "#" + refURL.Fragment
if refURL.Path != "" && target != nil && !target.getRenderingConfig().PlainIDAnchors {
link = link + ":" + target.UniqueID()
} else if page != nil && !page.getRenderingConfig().PlainIDAnchors {
link = link + ":" + page.UniqueID()
}
}
return link, nil
}
// Ref will give an absolute URL to ref in the given Page.
func (s *SiteInfo) Ref(ref string, page *Page, options ...string) (string, error) {
outputFormat := ""
if len(options) > 0 {
outputFormat = options[0]
}
return s.refLink(ref, page, false, outputFormat)
}
// RelRef will give an relative URL to ref in the given Page.
func (s *SiteInfo) RelRef(ref string, page *Page, options ...string) (string, error) {
outputFormat := ""
if len(options) > 0 {
outputFormat = options[0]
}
return s.refLink(ref, page, true, outputFormat)
}
func (s *Site) running() bool {
return s.owner != nil && s.owner.running
}
func init() {
defaultTimer = nitro.Initalize()
}
func (s *Site) timerStep(step string) {
if s.timer == nil {
s.timer = defaultTimer
}
s.timer.Step(step)
}
type whatChanged struct {
source bool
other bool
files map[string]bool
}
// RegisterMediaTypes will register the Site's media types in the mime
// package, so it will behave correctly with Hugo's built-in server.
func (s *Site) RegisterMediaTypes() {
for _, mt := range s.mediaTypesConfig {
for _, suffix := range mt.Suffixes {
_ = mime.AddExtensionType(mt.Delimiter+suffix, mt.Type()+"; charset=utf-8")
}
}
}
func (s *Site) filterFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
seen := make(map[fsnotify.Event]bool)
for _, ev := range events {
// Avoid processing the same event twice.
if seen[ev] {
continue
}
seen[ev] = true
if s.SourceSpec.IgnoreFile(ev.Name) {
continue
}
// Throw away any directories
isRegular, err := s.SourceSpec.IsRegularSourceFile(ev.Name)
if err != nil && os.IsNotExist(err) && (ev.Op&fsnotify.Remove == fsnotify.Remove || ev.Op&fsnotify.Rename == fsnotify.Rename) {
// Force keep of event
isRegular = true
}
if !isRegular {
continue
}
filtered = append(filtered, ev)
}
return filtered
}
func (s *Site) translateFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
eventMap := make(map[string][]fsnotify.Event)
// We often get a Remove etc. followed by a Create, a Create followed by a Write.
// Remove the superflous events to mage the update logic simpler.
for _, ev := range events {
eventMap[ev.Name] = append(eventMap[ev.Name], ev)
}
for _, ev := range events {
mapped := eventMap[ev.Name]
// Keep one
found := false
var kept fsnotify.Event
for i, ev2 := range mapped {
if i == 0 {
kept = ev2
}
if ev2.Op&fsnotify.Write == fsnotify.Write {
kept = ev2
found = true
}
if !found && ev2.Op&fsnotify.Create == fsnotify.Create {
kept = ev2
}
}
filtered = append(filtered, kept)
}
return filtered
}
// reBuild partially rebuilds a site given the filesystem events.
// It returns whetever the content source was changed.
// TODO(bep) clean up/rewrite this method.
func (s *Site) processPartial(events []fsnotify.Event) (whatChanged, error) {
events = s.filterFileEvents(events)
events = s.translateFileEvents(events)
s.Log.DEBUG.Printf("Rebuild for events %q", events)
h := s.owner
s.timerStep("initialize rebuild")
// First we need to determine what changed
var (
sourceChanged = []fsnotify.Event{}
sourceReallyChanged = []fsnotify.Event{}
contentFilesChanged []string
tmplChanged = []fsnotify.Event{}
dataChanged = []fsnotify.Event{}
i18nChanged = []fsnotify.Event{}
shortcodesChanged = make(map[string]bool)
sourceFilesChanged = make(map[string]bool)
// prevent spamming the log on changes
logger = helpers.NewDistinctFeedbackLogger()
)
cachePartitions := make([]string, len(events))
for i, ev := range events {
cachePartitions[i] = resource.ResourceKeyPartition(ev.Name)
if s.isContentDirEvent(ev) {
logger.Println("Source changed", ev)
sourceChanged = append(sourceChanged, ev)
}
if s.isLayoutDirEvent(ev) {
logger.Println("Template changed", ev)
tmplChanged = append(tmplChanged, ev)
if strings.Contains(ev.Name, "shortcodes") {
clearIsInnerShortcodeCache()
shortcode := filepath.Base(ev.Name)
shortcode = strings.TrimSuffix(shortcode, filepath.Ext(shortcode))
shortcodesChanged[shortcode] = true
}
}
if s.isDataDirEvent(ev) {
logger.Println("Data changed", ev)
dataChanged = append(dataChanged, ev)
}
if s.isI18nEvent(ev) {
logger.Println("i18n changed", ev)
i18nChanged = append(dataChanged, ev)
}
}
// These in memory resource caches will be rebuilt on demand.
for _, s := range s.owner.Sites {
s.ResourceSpec.ResourceCache.DeletePartitions(cachePartitions...)
}
if len(tmplChanged) > 0 || len(i18nChanged) > 0 {
sites := s.owner.Sites
first := sites[0]
// TOD(bep) globals clean
if err := first.Deps.LoadResources(); err != nil {
return whatChanged{}, err
}
s.TemplateHandler().PrintErrors()
for i := 1; i < len(sites); i++ {
site := sites[i]
var err error
depsCfg := deps.DepsCfg{
Language: site.Language,
MediaTypes: site.mediaTypesConfig,
}
site.Deps, err = first.Deps.ForLanguage(depsCfg)
if err != nil {
return whatChanged{}, err
}
}
s.timerStep("template prep")
}
if len(dataChanged) > 0 {
if err := s.readDataFromSourceFS(); err != nil {
s.Log.ERROR.Println(err)
}
}
for _, ev := range sourceChanged {
removed := false
if ev.Op&fsnotify.Remove == fsnotify.Remove {
removed = true
}
// Some editors (Vim) sometimes issue only a Rename operation when writing an existing file
// Sometimes a rename operation means that file has been renamed other times it means
// it's been updated
if ev.Op&fsnotify.Rename == fsnotify.Rename {
// If the file is still on disk, it's only been updated, if it's not, it's been moved
if ex, err := afero.Exists(s.Fs.Source, ev.Name); !ex || err != nil {
removed = true
}
}
if removed && isContentFile(ev.Name) {
h.removePageByFilename(ev.Name)
}
sourceReallyChanged = append(sourceReallyChanged, ev)
sourceFilesChanged[ev.Name] = true
}
for shortcode := range shortcodesChanged {
// There are certain scenarios that, when a shortcode changes,
// it isn't sufficient to just rerender the already parsed shortcode.
// One example is if the user adds a new shortcode to the content file first,
// and then creates the shortcode on the file system.
// To handle these scenarios, we must do a full reprocessing of the
// pages that keeps a reference to the changed shortcode.
pagesWithShortcode := h.findPagesByShortcode(shortcode)
for _, p := range pagesWithShortcode {
contentFilesChanged = append(contentFilesChanged, p.File.Filename())
}
}
if len(sourceReallyChanged) > 0 || len(contentFilesChanged) > 0 {
var filenamesChanged []string
for _, e := range sourceReallyChanged {
filenamesChanged = append(filenamesChanged, e.Name)
}
if len(contentFilesChanged) > 0 {
filenamesChanged = append(filenamesChanged, contentFilesChanged...)
}
filenamesChanged = helpers.UniqueStrings(filenamesChanged)
if err := s.readAndProcessContent(filenamesChanged...); err != nil {
return whatChanged{}, err
}
}
changed := whatChanged{
source: len(sourceChanged) > 0,
other: len(tmplChanged) > 0 || len(i18nChanged) > 0 || len(dataChanged) > 0,
files: sourceFilesChanged,
}
return changed, nil
}
func (s *Site) loadData(fs afero.Fs) (err error) {
spec := src.NewSourceSpec(s.PathSpec, fs)
fileSystem := spec.NewFilesystem("")
s.Data = make(map[string]interface{})
for _, r := range fileSystem.Files() {
if err := s.handleDataFile(r); err != nil {
return err
}
}
return
}
func (s *Site) handleDataFile(r source.ReadableFile) error {
var current map[string]interface{}
f, err := r.Open()
if err != nil {
return fmt.Errorf("Failed to open data file %q: %s", r.LogicalName(), err)
}
defer f.Close()
// Crawl in data tree to insert data
current = s.Data
keyParts := strings.Split(r.Dir(), helpers.FilePathSeparator)
// The first path element is the virtual folder (typically theme name), which is
// not part of the key.
if len(keyParts) > 1 {
for _, key := range keyParts[1:] {
if key != "" {
if _, ok := current[key]; !ok {
current[key] = make(map[string]interface{})
}
current = current[key].(map[string]interface{})
}
}
}
data, err := s.readData(r)
if err != nil {
s.Log.ERROR.Printf("Failed to read data from %s: %s", filepath.Join(r.Path(), r.LogicalName()), err)
return nil
}
if data == nil {
return nil
}
// filepath.Walk walks the files in lexical order, '/' comes before '.'
// this warning could happen if
// 1. A theme uses the same key; the main data folder wins
// 2. A sub folder uses the same key: the sub folder wins
higherPrecedentData := current[r.BaseFileName()]
switch data.(type) {
case nil:
// hear the crickets?
case map[string]interface{}:
switch higherPrecedentData.(type) {
case nil:
current[r.BaseFileName()] = data
case map[string]interface{}:
// merge maps: insert entries from data for keys that
// don't already exist in higherPrecedentData
higherPrecedentMap := higherPrecedentData.(map[string]interface{})
for key, value := range data.(map[string]interface{}) {
if _, exists := higherPrecedentMap[key]; exists {
s.Log.WARN.Printf("Data for key '%s' in path '%s' is overridden higher precedence data already in the data tree", key, r.Path())
} else {
higherPrecedentMap[key] = value
}
}
default:
// can't merge: higherPrecedentData is not a map
s.Log.WARN.Printf("The %T data from '%s' overridden by "+
"higher precedence %T data already in the data tree", data, r.Path(), higherPrecedentData)
}
case []interface{}:
if higherPrecedentData == nil {
current[r.BaseFileName()] = data
} else {
// we don't merge array data
s.Log.WARN.Printf("The %T data from '%s' overridden by "+
"higher precedence %T data already in the data tree", data, r.Path(), higherPrecedentData)
}
default:
s.Log.ERROR.Printf("unexpected data type %T in file %s", data, r.LogicalName())
}
return nil
}
func (s *Site) readData(f source.ReadableFile) (interface{}, error) {
file, err := f.Open()
if err != nil {
return nil, fmt.Errorf("readData: failed to open data file: %s", err)
}
defer file.Close()
content := helpers.ReaderToBytes(file)
switch f.Extension() {
case "yaml", "yml":
return parser.HandleYAMLData(content)
case "json":
return parser.HandleJSONData(content)
case "toml":
return parser.HandleTOMLMetaData(content)
default:
return nil, fmt.Errorf("Data not supported for extension '%s'", f.Extension())
}
}
func (s *Site) readDataFromSourceFS() error {
err := s.loadData(s.PathSpec.BaseFs.Data.Fs)
s.timerStep("load data")
return err
}
func (s *Site) process(config BuildCfg) (err error) {
if err = s.initialize(); err != nil {
return
}
s.timerStep("initialize")
if err = s.readDataFromSourceFS(); err != nil {
return
}
s.timerStep("load i18n")
if err := s.readAndProcessContent(); err != nil {
return err
}
s.timerStep("read and convert pages from source")
return err
}
func (s *Site) setupSitePages() {
var siteLastChange time.Time
for i, page := range s.RegularPages {
if i < len(s.RegularPages)-1 {
page.Next = s.RegularPages[i+1]
}
if i > 0 {
page.Prev = s.RegularPages[i-1]
}
// Determine Site.Info.LastChange
// Note that the logic to determine which date to use for Lastmod
// is already applied, so this is *the* date to use.
// We cannot just pick the last page in the default sort, because
// that may not be ordered by date.
if page.Lastmod.After(siteLastChange) {
siteLastChange = page.Lastmod
}
}
s.Info.LastChange = siteLastChange
}
func (s *Site) render(config *BuildCfg, outFormatIdx int) (err error) {
if outFormatIdx == 0 {
if err = s.preparePages(); err != nil {
return
}
s.timerStep("prepare pages")
// Note that even if disableAliases is set, the aliases themselves are
// preserved on page. The motivation with this is to be able to generate
// 301 redirects in a .htacess file and similar using a custom output format.
if !s.Cfg.GetBool("disableAliases") {
// Aliases must be rendered before pages.
// Some sites, Hugo docs included, have faulty alias definitions that point
// to itself or another real page. These will be overwritten in the next
// step.
if err = s.renderAliases(); err != nil {
return
}
s.timerStep("render and write aliases")
}
}
if err = s.renderPages(config); err != nil {
return
}
s.timerStep("render and write pages")
// TODO(bep) render consider this, ref. render404 etc.
if outFormatIdx > 0 {
return
}
if err = s.renderSitemap(); err != nil {
return
}
s.timerStep("render and write Sitemap")
if err = s.renderRobotsTXT(); err != nil {
return
}
s.timerStep("render and write robots.txt")
if err = s.render404(); err != nil {
return
}
s.timerStep("render and write 404")
return
}
func (s *Site) Initialise() (err error) {
return s.initialize()
}
func (s *Site) initialize() (err error) {
s.Menus = Menus{}
return s.initializeSiteInfo()
}
// HomeAbsURL is a convenience method giving the absolute URL to the home page.
func (s *SiteInfo) HomeAbsURL() string {
base := ""
if s.IsMultiLingual() {
base = s.Language.Lang
}
return s.owner.AbsURL(base, false)
}
// SitemapAbsURL is a convenience method giving the absolute URL to the sitemap.
func (s *SiteInfo) SitemapAbsURL() string {
sitemapDefault := parseSitemap(s.s.Cfg.GetStringMap("sitemap"))
p := s.HomeAbsURL()
if !strings.HasSuffix(p, "/") {
p += "/"
}
p += sitemapDefault.Filename
return p
}
func (s *Site) initializeSiteInfo() error {
var (
lang = s.Language
languages langs.Languages
)
if s.owner != nil && s.owner.multilingual != nil {
languages = s.owner.multilingual.Languages
}
params := lang.Params()
permalinks := make(PermalinkOverrides)
for k, v := range s.Cfg.GetStringMapString("permalinks") {
permalinks[k] = pathPattern(v)
}
defaultContentInSubDir := s.Cfg.GetBool("defaultContentLanguageInSubdir")
defaultContentLanguage := s.Cfg.GetString("defaultContentLanguage")
languagePrefix := ""
if s.multilingualEnabled() && (defaultContentInSubDir || lang.Lang != defaultContentLanguage) {
languagePrefix = "/" + lang.Lang
}
var multilingual *Multilingual
if s.owner != nil {
multilingual = s.owner.multilingual
}
var uglyURLs = func(p *Page) bool {
return false
}
v := s.Cfg.Get("uglyURLs")
if v != nil {
switch vv := v.(type) {
case bool:
uglyURLs = func(p *Page) bool {
return vv
}
case string:
// Is what be get from CLI (--uglyURLs)
vvv := cast.ToBool(vv)
uglyURLs = func(p *Page) bool {
return vvv
}
default:
m := cast.ToStringMapBool(v)
uglyURLs = func(p *Page) bool {
return m[p.Section()]
}
}
}
siteConfig, err := loadSiteConfig(lang)
if err != nil {
return err
}
s.Info = SiteInfo{
Title: lang.GetString("title"),
Author: lang.GetStringMap("author"),
Social: lang.GetStringMapString("social"),
LanguageCode: lang.GetString("languageCode"),
Copyright: lang.GetString("copyright"),
multilingual: multilingual,
Language: lang,
LanguagePrefix: languagePrefix,
Languages: languages,
defaultContentLanguageInSubdir: defaultContentInSubDir,
sectionPagesMenu: lang.GetString("sectionPagesMenu"),
BuildDrafts: s.Cfg.GetBool("buildDrafts"),
canonifyURLs: s.Cfg.GetBool("canonifyURLs"),
relativeURLs: s.Cfg.GetBool("relativeURLs"),
uglyURLs: uglyURLs,
preserveTaxonomyNames: lang.GetBool("preserveTaxonomyNames"),
PageCollections: s.PageCollections,
Menus: &s.Menus,
Params: params,
Permalinks: permalinks,
Data: &s.Data,
owner: s.owner,
s: s,
Config: siteConfig,
// TODO(bep) make this Menu and similar into delegate methods on SiteInfo
Taxonomies: s.Taxonomies,
}
rssOutputFormat, found := s.outputFormats[KindHome].GetByName(output.RSSFormat.Name)
if found {
s.Info.RSSLink = s.permalink(rssOutputFormat.BaseFilename())
}
return nil
}
func (s *Site) isI18nEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsI18n(e.Name)
}
func (s *Site) isDataDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsData(e.Name)
}
func (s *Site) isLayoutDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsLayout(e.Name)
}
func (s *Site) absContentDir() string {
return s.PathSpec.AbsPathify(s.PathSpec.ContentDir)
}
func (s *Site) isContentDirEvent(e fsnotify.Event) bool {
return s.BaseFs.IsContent(e.Name)
}
type contentCaptureResultHandler struct {
defaultContentProcessor *siteContentProcessor
contentProcessors map[string]*siteContentProcessor
}
func (c *contentCaptureResultHandler) getContentProcessor(lang string) *siteContentProcessor {
proc, found := c.contentProcessors[lang]
if found {
return proc
}
return c.defaultContentProcessor
}
func (c *contentCaptureResultHandler) handleSingles(fis ...*fileInfo) {
for _, fi := range fis {
proc := c.getContentProcessor(fi.Lang())
proc.processSingle(fi)
}
}
func (c *contentCaptureResultHandler) handleBundles(d *bundleDirs) {
for _, b := range d.bundles {
proc := c.getContentProcessor(b.fi.Lang())
proc.processBundle(b)
}
}
func (c *contentCaptureResultHandler) handleCopyFiles(files ...pathLangFile) {
for _, proc := range c.contentProcessors {
proc.processAssets(files)
}
}
func (s *Site) readAndProcessContent(filenames ...string) error {
ctx := context.Background()
g, ctx := errgroup.WithContext(ctx)
defaultContentLanguage := s.SourceSpec.DefaultContentLanguage
contentProcessors := make(map[string]*siteContentProcessor)
var defaultContentProcessor *siteContentProcessor
sites := s.owner.langSite()
for k, v := range sites {
if v.Language.Disabled {
continue
}
proc := newSiteContentProcessor(ctx, len(filenames) > 0, v)
contentProcessors[k] = proc
if k == defaultContentLanguage {
defaultContentProcessor = proc
}
g.Go(func() error {
return proc.process(ctx)
})
}
var (
handler captureResultHandler
bundleMap *contentChangeMap
)
mainHandler := &contentCaptureResultHandler{contentProcessors: contentProcessors, defaultContentProcessor: defaultContentProcessor}
sourceSpec := source.NewSourceSpec(s.PathSpec, s.BaseFs.Content.Fs)
if s.running() {
// Need to track changes.
bundleMap = s.owner.ContentChanges
handler = &captureResultHandlerChain{handlers: []captureBundlesHandler{mainHandler, bundleMap}}
} else {
handler = mainHandler
}
c := newCapturer(s.Log, sourceSpec, handler, bundleMap, filenames...)
err1 := c.capture()
for _, proc := range contentProcessors {
proc.closeInput()
}
err2 := g.Wait()
if err1 != nil {
return err1
}
return err2
}
func (s *Site) buildSiteMeta() (err error) {
defer s.timerStep("build Site meta")
if len(s.Pages) == 0 {
return
}
s.assembleTaxonomies()
for _, p := range s.AllPages {
// this depends on taxonomies
p.setValuesForKind(s)
}
return
}
func (s *Site) getMenusFromConfig() Menus {
ret := Menus{}
if menus := s.Language.GetStringMap("menu"); menus != nil {
for name, menu := range menus {
m, err := cast.ToSliceE(menu)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
} else {
for _, entry := range m {
s.Log.DEBUG.Printf("found menu: %q, in site config\n", name)
menuEntry := MenuEntry{Menu: name}
ime, err := cast.ToStringMapE(entry)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
}
menuEntry.marshallMap(ime)
menuEntry.URL = s.Info.createNodeMenuEntryURL(menuEntry.URL)
if ret[name] == nil {
ret[name] = &Menu{}
}
*ret[name] = ret[name].add(&menuEntry)
}
}
}
return ret
}
return ret
}
func (s *SiteInfo) createNodeMenuEntryURL(in string) string {
if !strings.HasPrefix(in, "/") {
return in
}
// make it match the nodes
menuEntryURL := in
menuEntryURL = helpers.SanitizeURLKeepTrailingSlash(s.s.PathSpec.URLize(menuEntryURL))
if !s.canonifyURLs {
menuEntryURL = helpers.AddContextRoot(s.s.PathSpec.BaseURL.String(), menuEntryURL)
}
return menuEntryURL
}
func (s *Site) assembleMenus() {
s.Menus = Menus{}
type twoD struct {
MenuName, EntryName string
}
flat := map[twoD]*MenuEntry{}
children := map[twoD]Menu{}
// add menu entries from config to flat hash
menuConfig := s.getMenusFromConfig()
for name, menu := range menuConfig {
for _, me := range *menu {
flat[twoD{name, me.KeyName()}] = me
}
}
sectionPagesMenu := s.Info.sectionPagesMenu
pages := s.Pages
if sectionPagesMenu != "" {
for _, p := range pages {
if p.Kind == KindSection {
// From Hugo 0.22 we have nested sections, but until we get a
// feel of how that would work in this setting, let us keep
// this menu for the top level only.
id := p.Section()
if _, ok := flat[twoD{sectionPagesMenu, id}]; ok {
continue
}
me := MenuEntry{Identifier: id,
Name: p.LinkTitle(),
Weight: p.Weight,
URL: p.RelPermalink()}
flat[twoD{sectionPagesMenu, me.KeyName()}] = &me
}
}
}
// Add menu entries provided by pages
for _, p := range pages {
for name, me := range p.Menus() {
if _, ok := flat[twoD{name, me.KeyName()}]; ok {
s.Log.ERROR.Printf("Two or more menu items have the same name/identifier in Menu %q: %q.\nRename or set an unique identifier.\n", name, me.KeyName())
continue
}
flat[twoD{name, me.KeyName()}] = me
}
}
// Create Children Menus First
for _, e := range flat {
if e.Parent != "" {
children[twoD{e.Menu, e.Parent}] = children[twoD{e.Menu, e.Parent}].add(e)
}
}
// Placing Children in Parents (in flat)
for p, childmenu := range children {
_, ok := flat[twoD{p.MenuName, p.EntryName}]
if !ok {
// if parent does not exist, create one without a URL
flat[twoD{p.MenuName, p.EntryName}] = &MenuEntry{Name: p.EntryName, URL: ""}
}
flat[twoD{p.MenuName, p.EntryName}].Children = childmenu
}
// Assembling Top Level of Tree
for menu, e := range flat {
if e.Parent == "" {
_, ok := s.Menus[menu.MenuName]
if !ok {
s.Menus[menu.MenuName] = &Menu{}
}
*s.Menus[menu.MenuName] = s.Menus[menu.MenuName].add(e)
}
}
}
func (s *Site) getTaxonomyKey(key string) string {
if s.Info.preserveTaxonomyNames {
// Keep as is
return key
}
return s.PathSpec.MakePathSanitized(key)
}
// We need to create the top level taxonomy early in the build process
// to be able to determine the page Kind correctly.
func (s *Site) createTaxonomiesEntries() {
s.Taxonomies = make(TaxonomyList)
taxonomies := s.Language.GetStringMapString("taxonomies")
for _, plural := range taxonomies {
s.Taxonomies[plural] = make(Taxonomy)
}
}
func (s *Site) assembleTaxonomies() {
s.taxonomiesPluralSingular = make(map[string]string)
s.taxonomiesOrigKey = make(map[string]string)
taxonomies := s.Language.GetStringMapString("taxonomies")
s.Log.INFO.Printf("found taxonomies: %#v\n", taxonomies)
for singular, plural := range taxonomies {
s.taxonomiesPluralSingular[plural] = singular
for _, p := range s.Pages {
vals := p.getParam(plural, !s.Info.preserveTaxonomyNames)
weight := p.getParamToLower(plural + "_weight")
if weight == nil {
weight = 0
}
if vals != nil {
if v, ok := vals.([]string); ok {
for _, idx := range v {
x := WeightedPage{weight.(int), p}
s.Taxonomies[plural].add(s.getTaxonomyKey(idx), x)
if s.Info.preserveTaxonomyNames {
// Need to track the original
s.taxonomiesOrigKey[fmt.Sprintf("%s-%s", plural, s.PathSpec.MakePathSanitized(idx))] = idx
}
}
} else if v, ok := vals.(string); ok {
x := WeightedPage{weight.(int), p}
s.Taxonomies[plural].add(s.getTaxonomyKey(v), x)
if s.Info.preserveTaxonomyNames {
// Need to track the original
s.taxonomiesOrigKey[fmt.Sprintf("%s-%s", plural, s.PathSpec.MakePathSanitized(v))] = v
}
} else {
s.Log.ERROR.Printf("Invalid %s in %s\n", plural, p.File.Path())
}
}
}
for k := range s.Taxonomies[plural] {
s.Taxonomies[plural][k].Sort()
}
}
s.Info.Taxonomies = s.Taxonomies
}
// Prepare site for a new full build.
func (s *Site) resetBuildState() {
s.relatedDocsHandler = newSearchIndexHandler(s.relatedDocsHandler.cfg)
s.PageCollections = newPageCollectionsFromPages(s.rawAllPages)
// TODO(bep) get rid of this double
s.Info.PageCollections = s.PageCollections
s.draftCount = 0
s.futureCount = 0
s.expiredCount = 0
spc = newPageCache()
for _, p := range s.rawAllPages {
p.subSections = Pages{}
p.parent = nil
p.scratch = maps.NewScratch()
p.mainPageOutput = nil
}
}
func (s *Site) kindFromSections(sections []string) string {
if len(sections) == 0 {
return KindSection
}
if _, isTaxonomy := s.Taxonomies[sections[0]]; isTaxonomy {
if len(sections) == 1 {
return KindTaxonomyTerm
}
return KindTaxonomy
}
return KindSection
}
func (s *Site) layouts(p *PageOutput) ([]string, error) {
return s.layoutHandler.For(p.layoutDescriptor, p.outputFormat)
}
func (s *Site) preparePages() error {
var errors []error
for _, p := range s.Pages {
if err := p.prepareLayouts(); err != nil {
errors = append(errors, err)
}
if err := p.prepareData(s); err != nil {
errors = append(errors, err)
}
}
if len(errors) != 0 {
return fmt.Errorf("Prepare pages failed: %.100q…", errors)
}
return nil
}
func errorCollator(results <-chan error, errs chan<- error) {
errMsgs := []string{}
for err := range results {
if err != nil {
errMsgs = append(errMsgs, err.Error())
}
}
if len(errMsgs) == 0 {
errs <- nil
} else {
errs <- errors.New(strings.Join(errMsgs, "\n"))
}
close(errs)
}
func (s *Site) appendThemeTemplates(in []string) []string {
if !s.PathSpec.ThemeSet() {
return in
}
out := []string{}
// First place all non internal templates
for _, t := range in {
if !strings.HasPrefix(t, "_internal/") {
out = append(out, t)
}
}
// Then place theme templates with the same names
for _, t := range in {
if !strings.HasPrefix(t, "_internal/") {
out = append(out, "theme/"+t)
}
}
// Lastly place internal templates
for _, t := range in {
if strings.HasPrefix(t, "_internal/") {
out = append(out, t)
}
}
return out
}
// GetPage looks up a page of a given type for the given ref.
// In Hugo <= 0.44 you had to add Page Kind (section, home) etc. as the first
// argument and then either a unix styled path (with or without a leading slash))
// or path elements separated.
// When we now remove the Kind from this API, we need to make the transition as painless
// as possible for existing sites. Most sites will use {{ .Site.GetPage "section" "my/section" }},
// i.e. 2 arguments, so we test for that.
func (s *SiteInfo) GetPage(ref ...string) (*Page, error) {
var refs []string
for _, r := range ref {
// A common construct in the wild is
// .Site.GetPage "home" "" or
// .Site.GetPage "home" "/"
if r != "" && r != "/" {
refs = append(refs, r)
}
}
var key string
if len(refs) > 2 {
// This was allowed in Hugo <= 0.44, but we cannot support this with the
// new API. This should be the most unusual case.
return nil, fmt.Errorf(`too many arguments to .Site.GetPage: %v. Use lookups on the form {{ .Site.GetPage "/posts/mypage-md" }}`, ref)
}
if len(refs) == 0 || refs[0] == KindHome {
key = "/"
} else if len(refs) == 1 {
key = refs[0]
} else {
key = refs[1]
}
key = filepath.ToSlash(key)
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
return s.getPageNew(nil, key)
}
func (s *Site) permalinkForOutputFormat(link string, f output.Format) (string, error) {
var (
baseURL string
err error
)
if f.Protocol != "" {
baseURL, err = s.PathSpec.BaseURL.WithProtocol(f.Protocol)
if err != nil {
return "", err
}
} else {
baseURL = s.PathSpec.BaseURL.String()
}
return s.PathSpec.PermalinkForBaseURL(link, baseURL), nil
}
func (s *Site) permalink(link string) string {
return s.PathSpec.PermalinkForBaseURL(link, s.PathSpec.BaseURL.String())
}
func (s *Site) renderAndWriteXML(statCounter *uint64, name string, dest string, d interface{}, layouts ...string) error {
s.Log.DEBUG.Printf("Render XML for %q to %q", name, dest)
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
renderBuffer.WriteString("<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\" ?>\n")
if err := s.renderForLayouts(name, d, renderBuffer, layouts...); err != nil {
helpers.DistinctWarnLog.Println(err)
return nil
}
outBuffer := bp.GetBuffer()
defer bp.PutBuffer(outBuffer)
var path []byte
if s.Info.relativeURLs {
path = []byte(helpers.GetDottedRelativePath(dest))
} else {
s := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(s, "/") {
s += "/"
}
path = []byte(s)
}
transformer := transform.NewChain(transform.AbsURLInXML)
if err := transformer.Apply(outBuffer, renderBuffer, path); err != nil {
s.DistinctErrorLog.Println(err)
return nil
}
return s.publish(statCounter, dest, outBuffer)
}
func (s *Site) renderAndWritePage(statCounter *uint64, name string, dest string, p *PageOutput, layouts ...string) error {
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
if err := s.renderForLayouts(p.Kind, p, renderBuffer, layouts...); err != nil {
helpers.DistinctWarnLog.Println(err)
return nil
}
if renderBuffer.Len() == 0 {
return nil
}
outBuffer := bp.GetBuffer()
defer bp.PutBuffer(outBuffer)
transformLinks := transform.NewEmptyTransforms()
isHTML := p.outputFormat.IsHTML
if isHTML {
if s.Info.relativeURLs || s.Info.canonifyURLs {
transformLinks = append(transformLinks, transform.AbsURL)
}
if s.running() && s.Cfg.GetBool("watch") && !s.Cfg.GetBool("disableLiveReload") {
transformLinks = append(transformLinks, transform.LiveReloadInject(s.Cfg.GetInt("liveReloadPort")))
}
// For performance reasons we only inject the Hugo generator tag on the home page.
if p.IsHome() {
if !s.Cfg.GetBool("disableHugoGeneratorInject") {
transformLinks = append(transformLinks, transform.HugoGeneratorInject)
}
}
}
var path []byte
if s.Info.relativeURLs {
path = []byte(helpers.GetDottedRelativePath(dest))
} else if s.Info.canonifyURLs {
url := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(url, "/") {
url += "/"
}
path = []byte(url)
}
transformer := transform.NewChain(transformLinks...)
if err := transformer.Apply(outBuffer, renderBuffer, path); err != nil {
s.DistinctErrorLog.Println(err)
return nil
}
return s.publish(statCounter, dest, outBuffer)
}
func (s *Site) renderForLayouts(name string, d interface{}, w io.Writer, layouts ...string) (err error) {
var templ tpl.Template
defer func() {
if r := recover(); r != nil {
templName := ""
if templ != nil {
templName = templ.Name()
}
s.DistinctErrorLog.Printf("Failed to render %q: %s", templName, r)
s.DistinctErrorLog.Printf("Stack Trace:\n%s", stackTrace(1200))
// TOD(bep) we really need to fix this. Also see below.
if !s.running() && !testMode {
os.Exit(-1)
}
}
}()
templ = s.findFirstTemplate(layouts...)
if templ == nil {
return fmt.Errorf("[%s] Unable to locate layout for %q: %s\n", s.Language.Lang, name, layouts)
}
if err = templ.Execute(w, d); err != nil {
// Behavior here should be dependent on if running in server or watch mode.
if p, ok := d.(*PageOutput); ok {
if p.File != nil {
s.DistinctErrorLog.Printf("Error while rendering %q in %q: %s", name, p.File.Dir(), err)
} else {
s.DistinctErrorLog.Printf("Error while rendering %q: %s", name, err)
}
} else {
s.DistinctErrorLog.Printf("Error while rendering %q: %s", name, err)
}
if !s.running() && !testMode {
// TODO(bep) check if this can be propagated
os.Exit(-1)
} else if testMode {
return
}
}
return
}
func (s *Site) findFirstTemplate(layouts ...string) tpl.Template {
for _, layout := range layouts {
if templ, found := s.Tmpl.Lookup(layout); found {
return templ
}
}
return nil
}
func (s *Site) publish(statCounter *uint64, path string, r io.Reader) (err error) {
s.PathSpec.ProcessingStats.Incr(statCounter)
return helpers.WriteToDisk(filepath.Clean(path), r, s.BaseFs.PublishFs)
}
func getGoMaxProcs() int {
if gmp := os.Getenv("GOMAXPROCS"); gmp != "" {
if p, err := strconv.Atoi(gmp); err != nil {
return p
}
}
return 1
}
func (s *Site) newNodePage(typ string, sections ...string) *Page {
p := &Page{
language: s.Language,
pageInit: &pageInit{},
pageContentInit: &pageContentInit{},
Kind: typ,
Source: Source{File: &source.FileInfo{}},
data: make(map[string]interface{}),
Site: &s.Info,
sections: sections,
s: s}
p.outputFormats = p.s.outputFormats[p.Kind]
return p
}
func (s *Site) newHomePage() *Page {
p := s.newNodePage(KindHome)
p.title = s.Info.Title
pages := Pages{}
p.data["Pages"] = pages
p.Pages = pages
return p
}
func (s *Site) newTaxonomyPage(plural, key string) *Page {
p := s.newNodePage(KindTaxonomy, plural, key)
if s.Info.preserveTaxonomyNames {
// Keep (mostly) as is in the title
// We make the first character upper case, mostly because
// it is easier to reason about in the tests.
p.title = helpers.FirstUpper(key)
key = s.PathSpec.MakePathSanitized(key)
} else {
p.title = strings.Replace(s.titleFunc(key), "-", " ", -1)
}
return p
}
func (s *Site) newSectionPage(name string) *Page {
p := s.newNodePage(KindSection, name)
sectionName := helpers.FirstUpper(name)
if s.Cfg.GetBool("pluralizeListTitles") {
p.title = inflect.Pluralize(sectionName)
} else {
p.title = sectionName
}
return p
}
func (s *Site) newTaxonomyTermsPage(plural string) *Page {
p := s.newNodePage(KindTaxonomyTerm, plural)
p.title = s.titleFunc(plural)
return p
}
hugolib: Mark shortcode changes as content changes in server mode
This is unfortunate, but is needed to re-create the taxonomies collections etc. that may be referenced from them.
Fixes #4965
// Copyright 2017 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"errors"
"fmt"
"html/template"
"io"
"mime"
"net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/gohugoio/hugo/common/maps"
"github.com/gohugoio/hugo/resource"
"github.com/gohugoio/hugo/langs"
src "github.com/gohugoio/hugo/source"
"golang.org/x/sync/errgroup"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/media"
"github.com/markbates/inflect"
"golang.org/x/net/context"
"github.com/fsnotify/fsnotify"
bp "github.com/gohugoio/hugo/bufferpool"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/hugolib/pagemeta"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/parser"
"github.com/gohugoio/hugo/related"
"github.com/gohugoio/hugo/source"
"github.com/gohugoio/hugo/tpl"
"github.com/gohugoio/hugo/transform"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/spf13/nitro"
"github.com/spf13/viper"
)
var _ = transform.AbsURL
// used to indicate if run as a test.
var testMode bool
var defaultTimer *nitro.B
// Site contains all the information relevant for constructing a static
// site. The basic flow of information is as follows:
//
// 1. A list of Files is parsed and then converted into Pages.
//
// 2. Pages contain sections (based on the file they were generated from),
// aliases and slugs (included in a pages frontmatter) which are the
// various targets that will get generated. There will be canonical
// listing. The canonical path can be overruled based on a pattern.
//
// 3. Taxonomies are created via configuration and will present some aspect of
// the final page and typically a perm url.
//
// 4. All Pages are passed through a template based on their desired
// layout based on numerous different elements.
//
// 5. The entire collection of files is written to disk.
type Site struct {
owner *HugoSites
*PageCollections
Taxonomies TaxonomyList
// Plural is what we get in the folder, so keep track of this mapping
// to get the singular form from that value.
taxonomiesPluralSingular map[string]string
// This is temporary, see https://github.com/gohugoio/hugo/issues/2835
// Maps "actors-gerard-depardieu" to "Gérard Depardieu" when preserveTaxonomyNames
// is set.
taxonomiesOrigKey map[string]string
Sections Taxonomy
Info SiteInfo
Menus Menus
timer *nitro.B
layoutHandler *output.LayoutHandler
draftCount int
futureCount int
expiredCount int
Data map[string]interface{}
Language *langs.Language
disabledKinds map[string]bool
// Output formats defined in site config per Page Kind, or some defaults
// if not set.
// Output formats defined in Page front matter will override these.
outputFormats map[string]output.Formats
// All the output formats and media types available for this site.
// These values will be merged from the Hugo defaults, the site config and,
// finally, the language settings.
outputFormatsConfig output.Formats
mediaTypesConfig media.Types
// How to handle page front matter.
frontmatterHandler pagemeta.FrontMatterHandler
// We render each site for all the relevant output formats in serial with
// this rendering context pointing to the current one.
rc *siteRenderingContext
// The output formats that we need to render this site in. This slice
// will be fixed once set.
// This will be the union of Site.Pages' outputFormats.
// This slice will be sorted.
renderFormats output.Formats
// Logger etc.
*deps.Deps `json:"-"`
// The func used to title case titles.
titleFunc func(s string) string
relatedDocsHandler *relatedDocsHandler
}
type siteRenderingContext struct {
output.Format
}
func (s *Site) initRenderFormats() {
formatSet := make(map[string]bool)
formats := output.Formats{}
for _, p := range s.Pages {
for _, f := range p.outputFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
sort.Sort(formats)
s.renderFormats = formats
}
func (s *Site) isEnabled(kind string) bool {
if kind == kindUnknown {
panic("Unknown kind")
}
return !s.disabledKinds[kind]
}
// reset returns a new Site prepared for rebuild.
func (s *Site) reset() *Site {
return &Site{Deps: s.Deps,
layoutHandler: output.NewLayoutHandler(),
disabledKinds: s.disabledKinds,
titleFunc: s.titleFunc,
relatedDocsHandler: newSearchIndexHandler(s.relatedDocsHandler.cfg),
outputFormats: s.outputFormats,
rc: s.rc,
outputFormatsConfig: s.outputFormatsConfig,
frontmatterHandler: s.frontmatterHandler,
mediaTypesConfig: s.mediaTypesConfig,
Language: s.Language,
owner: s.owner,
PageCollections: newPageCollections()}
}
// newSite creates a new site with the given configuration.
func newSite(cfg deps.DepsCfg) (*Site, error) {
c := newPageCollections()
if cfg.Language == nil {
cfg.Language = langs.NewDefaultLanguage(cfg.Cfg)
}
disabledKinds := make(map[string]bool)
for _, disabled := range cast.ToStringSlice(cfg.Language.Get("disableKinds")) {
disabledKinds[disabled] = true
}
var (
mediaTypesConfig []map[string]interface{}
outputFormatsConfig []map[string]interface{}
siteOutputFormatsConfig output.Formats
siteMediaTypesConfig media.Types
err error
)
// Add language last, if set, so it gets precedence.
for _, cfg := range []config.Provider{cfg.Cfg, cfg.Language} {
if cfg.IsSet("mediaTypes") {
mediaTypesConfig = append(mediaTypesConfig, cfg.GetStringMap("mediaTypes"))
}
if cfg.IsSet("outputFormats") {
outputFormatsConfig = append(outputFormatsConfig, cfg.GetStringMap("outputFormats"))
}
}
siteMediaTypesConfig, err = media.DecodeTypes(mediaTypesConfig...)
if err != nil {
return nil, err
}
siteOutputFormatsConfig, err = output.DecodeFormats(siteMediaTypesConfig, outputFormatsConfig...)
if err != nil {
return nil, err
}
outputFormats, err := createSiteOutputFormats(siteOutputFormatsConfig, cfg.Language)
if err != nil {
return nil, err
}
var relatedContentConfig related.Config
if cfg.Language.IsSet("related") {
relatedContentConfig, err = related.DecodeConfig(cfg.Language.Get("related"))
if err != nil {
return nil, err
}
} else {
relatedContentConfig = related.DefaultConfig
taxonomies := cfg.Language.GetStringMapString("taxonomies")
if _, found := taxonomies["tag"]; found {
relatedContentConfig.Add(related.IndexConfig{Name: "tags", Weight: 80})
}
}
titleFunc := helpers.GetTitleFunc(cfg.Language.GetString("titleCaseStyle"))
frontMatterHandler, err := pagemeta.NewFrontmatterHandler(cfg.Logger, cfg.Cfg)
if err != nil {
return nil, err
}
s := &Site{
PageCollections: c,
layoutHandler: output.NewLayoutHandler(),
Language: cfg.Language,
disabledKinds: disabledKinds,
titleFunc: titleFunc,
relatedDocsHandler: newSearchIndexHandler(relatedContentConfig),
outputFormats: outputFormats,
rc: &siteRenderingContext{output.HTMLFormat},
outputFormatsConfig: siteOutputFormatsConfig,
mediaTypesConfig: siteMediaTypesConfig,
frontmatterHandler: frontMatterHandler,
}
s.Info = newSiteInfo(siteBuilderCfg{s: s, pageCollections: c, language: s.Language})
return s, nil
}
// NewSite creates a new site with the given dependency configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSite(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err = applyDepsIfNeeded(cfg, s); err != nil {
return nil, err
}
return s, nil
}
// NewSiteDefaultLang creates a new site in the default language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewSiteDefaultLang(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewDefaultLanguage(v), withTemplate...)
}
// NewEnglishSite creates a new site in English language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewEnglishSite(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewLanguage("en", v), withTemplate...)
}
// newSiteForLang creates a new site in the given language.
func newSiteForLang(lang *langs.Language, withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
withTemplates := func(templ tpl.TemplateHandler) error {
for _, wt := range withTemplate {
if err := wt(templ); err != nil {
return err
}
}
return nil
}
cfg := deps.DepsCfg{WithTemplate: withTemplates, Language: lang, Cfg: lang}
return NewSiteForCfg(cfg)
}
// NewSiteForCfg creates a new site for the given configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSiteForCfg(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err := applyDepsIfNeeded(cfg, s); err != nil {
return nil, err
}
return s, nil
}
type SiteInfos []*SiteInfo
// First is a convenience method to get the first Site, i.e. the main language.
func (s SiteInfos) First() *SiteInfo {
if len(s) == 0 {
return nil
}
return s[0]
}
type SiteInfo struct {
Taxonomies TaxonomyList
Authors AuthorList
Social SiteSocial
*PageCollections
Menus *Menus
Hugo *HugoInfo
Title string
RSSLink string
Author map[string]interface{}
LanguageCode string
Copyright string
LastChange time.Time
Permalinks PermalinkOverrides
Params map[string]interface{}
BuildDrafts bool
canonifyURLs bool
relativeURLs bool
uglyURLs func(p *Page) bool
preserveTaxonomyNames bool
Data *map[string]interface{}
Config SiteConfig
owner *HugoSites
s *Site
multilingual *Multilingual
Language *langs.Language
LanguagePrefix string
Languages langs.Languages
defaultContentLanguageInSubdir bool
sectionPagesMenu string
}
func (s *SiteInfo) String() string {
return fmt.Sprintf("Site(%q)", s.Title)
}
func (s *SiteInfo) BaseURL() template.URL {
return template.URL(s.s.PathSpec.BaseURL.String())
}
// ServerPort returns the port part of the BaseURL, 0 if none found.
func (s *SiteInfo) ServerPort() int {
ps := s.s.PathSpec.BaseURL.URL().Port()
if ps == "" {
return 0
}
p, err := strconv.Atoi(ps)
if err != nil {
return 0
}
return p
}
// GoogleAnalytics is kept here for historic reasons.
func (s *SiteInfo) GoogleAnalytics() string {
return s.Config.Services.GoogleAnalytics.ID
}
// DisqusShortname is kept here for historic reasons.
func (s *SiteInfo) DisqusShortname() string {
return s.Config.Services.Disqus.Shortname
}
// Used in tests.
type siteBuilderCfg struct {
language *langs.Language
s *Site
pageCollections *PageCollections
}
// TODO(bep) get rid of this
func newSiteInfo(cfg siteBuilderCfg) SiteInfo {
return SiteInfo{
s: cfg.s,
multilingual: newMultiLingualForLanguage(cfg.language),
PageCollections: cfg.pageCollections,
Params: make(map[string]interface{}),
uglyURLs: func(p *Page) bool {
return false
},
}
}
// SiteSocial is a place to put social details on a site level. These are the
// standard keys that themes will expect to have available, but can be
// expanded to any others on a per site basis
// github
// facebook
// facebook_admin
// twitter
// twitter_domain
// googleplus
// pinterest
// instagram
// youtube
// linkedin
type SiteSocial map[string]string
// Param is a convenience method to do lookups in SiteInfo's Params map.
//
// This method is also implemented on Page and Node.
func (s *SiteInfo) Param(key interface{}) (interface{}, error) {
keyStr, err := cast.ToStringE(key)
if err != nil {
return nil, err
}
keyStr = strings.ToLower(keyStr)
return s.Params[keyStr], nil
}
func (s *SiteInfo) IsMultiLingual() bool {
return len(s.Languages) > 1
}
func (s *SiteInfo) IsServer() bool {
return s.owner.running
}
func (s *SiteInfo) refLink(ref string, page *Page, relative bool, outputFormat string) (string, error) {
var refURL *url.URL
var err error
ref = filepath.ToSlash(ref)
refURL, err = url.Parse(ref)
if err != nil {
return "", err
}
var target *Page
var link string
if refURL.Path != "" {
target, err := s.getPageNew(page, refURL.Path)
if err != nil {
return "", err
}
if target == nil {
return "", fmt.Errorf("No page found with path or logical name \"%s\".\n", refURL.Path)
}
var permalinker Permalinker = target
if outputFormat != "" {
o := target.OutputFormats().Get(outputFormat)
if o == nil {
return "", fmt.Errorf("Output format %q not found for page %q", outputFormat, refURL.Path)
}
permalinker = o
}
if relative {
link = permalinker.RelPermalink()
} else {
link = permalinker.Permalink()
}
}
if refURL.Fragment != "" {
link = link + "#" + refURL.Fragment
if refURL.Path != "" && target != nil && !target.getRenderingConfig().PlainIDAnchors {
link = link + ":" + target.UniqueID()
} else if page != nil && !page.getRenderingConfig().PlainIDAnchors {
link = link + ":" + page.UniqueID()
}
}
return link, nil
}
// Ref will give an absolute URL to ref in the given Page.
func (s *SiteInfo) Ref(ref string, page *Page, options ...string) (string, error) {
outputFormat := ""
if len(options) > 0 {
outputFormat = options[0]
}
return s.refLink(ref, page, false, outputFormat)
}
// RelRef will give an relative URL to ref in the given Page.
func (s *SiteInfo) RelRef(ref string, page *Page, options ...string) (string, error) {
outputFormat := ""
if len(options) > 0 {
outputFormat = options[0]
}
return s.refLink(ref, page, true, outputFormat)
}
func (s *Site) running() bool {
return s.owner != nil && s.owner.running
}
func init() {
defaultTimer = nitro.Initalize()
}
func (s *Site) timerStep(step string) {
if s.timer == nil {
s.timer = defaultTimer
}
s.timer.Step(step)
}
type whatChanged struct {
source bool
other bool
files map[string]bool
}
// RegisterMediaTypes will register the Site's media types in the mime
// package, so it will behave correctly with Hugo's built-in server.
func (s *Site) RegisterMediaTypes() {
for _, mt := range s.mediaTypesConfig {
for _, suffix := range mt.Suffixes {
_ = mime.AddExtensionType(mt.Delimiter+suffix, mt.Type()+"; charset=utf-8")
}
}
}
func (s *Site) filterFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
seen := make(map[fsnotify.Event]bool)
for _, ev := range events {
// Avoid processing the same event twice.
if seen[ev] {
continue
}
seen[ev] = true
if s.SourceSpec.IgnoreFile(ev.Name) {
continue
}
// Throw away any directories
isRegular, err := s.SourceSpec.IsRegularSourceFile(ev.Name)
if err != nil && os.IsNotExist(err) && (ev.Op&fsnotify.Remove == fsnotify.Remove || ev.Op&fsnotify.Rename == fsnotify.Rename) {
// Force keep of event
isRegular = true
}
if !isRegular {
continue
}
filtered = append(filtered, ev)
}
return filtered
}
func (s *Site) translateFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
eventMap := make(map[string][]fsnotify.Event)
// We often get a Remove etc. followed by a Create, a Create followed by a Write.
// Remove the superflous events to mage the update logic simpler.
for _, ev := range events {
eventMap[ev.Name] = append(eventMap[ev.Name], ev)
}
for _, ev := range events {
mapped := eventMap[ev.Name]
// Keep one
found := false
var kept fsnotify.Event
for i, ev2 := range mapped {
if i == 0 {
kept = ev2
}
if ev2.Op&fsnotify.Write == fsnotify.Write {
kept = ev2
found = true
}
if !found && ev2.Op&fsnotify.Create == fsnotify.Create {
kept = ev2
}
}
filtered = append(filtered, kept)
}
return filtered
}
// reBuild partially rebuilds a site given the filesystem events.
// It returns whetever the content source was changed.
// TODO(bep) clean up/rewrite this method.
func (s *Site) processPartial(events []fsnotify.Event) (whatChanged, error) {
events = s.filterFileEvents(events)
events = s.translateFileEvents(events)
s.Log.DEBUG.Printf("Rebuild for events %q", events)
h := s.owner
s.timerStep("initialize rebuild")
// First we need to determine what changed
var (
sourceChanged = []fsnotify.Event{}
sourceReallyChanged = []fsnotify.Event{}
contentFilesChanged []string
tmplChanged = []fsnotify.Event{}
dataChanged = []fsnotify.Event{}
i18nChanged = []fsnotify.Event{}
shortcodesChanged = make(map[string]bool)
sourceFilesChanged = make(map[string]bool)
// prevent spamming the log on changes
logger = helpers.NewDistinctFeedbackLogger()
)
cachePartitions := make([]string, len(events))
for i, ev := range events {
cachePartitions[i] = resource.ResourceKeyPartition(ev.Name)
if s.isContentDirEvent(ev) {
logger.Println("Source changed", ev)
sourceChanged = append(sourceChanged, ev)
}
if s.isLayoutDirEvent(ev) {
logger.Println("Template changed", ev)
tmplChanged = append(tmplChanged, ev)
if strings.Contains(ev.Name, "shortcodes") {
clearIsInnerShortcodeCache()
shortcode := filepath.Base(ev.Name)
shortcode = strings.TrimSuffix(shortcode, filepath.Ext(shortcode))
shortcodesChanged[shortcode] = true
}
}
if s.isDataDirEvent(ev) {
logger.Println("Data changed", ev)
dataChanged = append(dataChanged, ev)
}
if s.isI18nEvent(ev) {
logger.Println("i18n changed", ev)
i18nChanged = append(dataChanged, ev)
}
}
// These in memory resource caches will be rebuilt on demand.
for _, s := range s.owner.Sites {
s.ResourceSpec.ResourceCache.DeletePartitions(cachePartitions...)
}
if len(tmplChanged) > 0 || len(i18nChanged) > 0 {
sites := s.owner.Sites
first := sites[0]
// TOD(bep) globals clean
if err := first.Deps.LoadResources(); err != nil {
return whatChanged{}, err
}
s.TemplateHandler().PrintErrors()
for i := 1; i < len(sites); i++ {
site := sites[i]
var err error
depsCfg := deps.DepsCfg{
Language: site.Language,
MediaTypes: site.mediaTypesConfig,
}
site.Deps, err = first.Deps.ForLanguage(depsCfg)
if err != nil {
return whatChanged{}, err
}
}
s.timerStep("template prep")
}
if len(dataChanged) > 0 {
if err := s.readDataFromSourceFS(); err != nil {
s.Log.ERROR.Println(err)
}
}
for _, ev := range sourceChanged {
removed := false
if ev.Op&fsnotify.Remove == fsnotify.Remove {
removed = true
}
// Some editors (Vim) sometimes issue only a Rename operation when writing an existing file
// Sometimes a rename operation means that file has been renamed other times it means
// it's been updated
if ev.Op&fsnotify.Rename == fsnotify.Rename {
// If the file is still on disk, it's only been updated, if it's not, it's been moved
if ex, err := afero.Exists(s.Fs.Source, ev.Name); !ex || err != nil {
removed = true
}
}
if removed && isContentFile(ev.Name) {
h.removePageByFilename(ev.Name)
}
sourceReallyChanged = append(sourceReallyChanged, ev)
sourceFilesChanged[ev.Name] = true
}
for shortcode := range shortcodesChanged {
// There are certain scenarios that, when a shortcode changes,
// it isn't sufficient to just rerender the already parsed shortcode.
// One example is if the user adds a new shortcode to the content file first,
// and then creates the shortcode on the file system.
// To handle these scenarios, we must do a full reprocessing of the
// pages that keeps a reference to the changed shortcode.
pagesWithShortcode := h.findPagesByShortcode(shortcode)
for _, p := range pagesWithShortcode {
contentFilesChanged = append(contentFilesChanged, p.File.Filename())
}
}
if len(sourceReallyChanged) > 0 || len(contentFilesChanged) > 0 {
var filenamesChanged []string
for _, e := range sourceReallyChanged {
filenamesChanged = append(filenamesChanged, e.Name)
}
if len(contentFilesChanged) > 0 {
filenamesChanged = append(filenamesChanged, contentFilesChanged...)
}
filenamesChanged = helpers.UniqueStrings(filenamesChanged)
if err := s.readAndProcessContent(filenamesChanged...); err != nil {
return whatChanged{}, err
}
}
changed := whatChanged{
source: len(sourceChanged) > 0 || len(shortcodesChanged) > 0,
other: len(tmplChanged) > 0 || len(i18nChanged) > 0 || len(dataChanged) > 0,
files: sourceFilesChanged,
}
return changed, nil
}
func (s *Site) loadData(fs afero.Fs) (err error) {
spec := src.NewSourceSpec(s.PathSpec, fs)
fileSystem := spec.NewFilesystem("")
s.Data = make(map[string]interface{})
for _, r := range fileSystem.Files() {
if err := s.handleDataFile(r); err != nil {
return err
}
}
return
}
func (s *Site) handleDataFile(r source.ReadableFile) error {
var current map[string]interface{}
f, err := r.Open()
if err != nil {
return fmt.Errorf("Failed to open data file %q: %s", r.LogicalName(), err)
}
defer f.Close()
// Crawl in data tree to insert data
current = s.Data
keyParts := strings.Split(r.Dir(), helpers.FilePathSeparator)
// The first path element is the virtual folder (typically theme name), which is
// not part of the key.
if len(keyParts) > 1 {
for _, key := range keyParts[1:] {
if key != "" {
if _, ok := current[key]; !ok {
current[key] = make(map[string]interface{})
}
current = current[key].(map[string]interface{})
}
}
}
data, err := s.readData(r)
if err != nil {
s.Log.ERROR.Printf("Failed to read data from %s: %s", filepath.Join(r.Path(), r.LogicalName()), err)
return nil
}
if data == nil {
return nil
}
// filepath.Walk walks the files in lexical order, '/' comes before '.'
// this warning could happen if
// 1. A theme uses the same key; the main data folder wins
// 2. A sub folder uses the same key: the sub folder wins
higherPrecedentData := current[r.BaseFileName()]
switch data.(type) {
case nil:
// hear the crickets?
case map[string]interface{}:
switch higherPrecedentData.(type) {
case nil:
current[r.BaseFileName()] = data
case map[string]interface{}:
// merge maps: insert entries from data for keys that
// don't already exist in higherPrecedentData
higherPrecedentMap := higherPrecedentData.(map[string]interface{})
for key, value := range data.(map[string]interface{}) {
if _, exists := higherPrecedentMap[key]; exists {
s.Log.WARN.Printf("Data for key '%s' in path '%s' is overridden higher precedence data already in the data tree", key, r.Path())
} else {
higherPrecedentMap[key] = value
}
}
default:
// can't merge: higherPrecedentData is not a map
s.Log.WARN.Printf("The %T data from '%s' overridden by "+
"higher precedence %T data already in the data tree", data, r.Path(), higherPrecedentData)
}
case []interface{}:
if higherPrecedentData == nil {
current[r.BaseFileName()] = data
} else {
// we don't merge array data
s.Log.WARN.Printf("The %T data from '%s' overridden by "+
"higher precedence %T data already in the data tree", data, r.Path(), higherPrecedentData)
}
default:
s.Log.ERROR.Printf("unexpected data type %T in file %s", data, r.LogicalName())
}
return nil
}
func (s *Site) readData(f source.ReadableFile) (interface{}, error) {
file, err := f.Open()
if err != nil {
return nil, fmt.Errorf("readData: failed to open data file: %s", err)
}
defer file.Close()
content := helpers.ReaderToBytes(file)
switch f.Extension() {
case "yaml", "yml":
return parser.HandleYAMLData(content)
case "json":
return parser.HandleJSONData(content)
case "toml":
return parser.HandleTOMLMetaData(content)
default:
return nil, fmt.Errorf("Data not supported for extension '%s'", f.Extension())
}
}
func (s *Site) readDataFromSourceFS() error {
err := s.loadData(s.PathSpec.BaseFs.Data.Fs)
s.timerStep("load data")
return err
}
func (s *Site) process(config BuildCfg) (err error) {
if err = s.initialize(); err != nil {
return
}
s.timerStep("initialize")
if err = s.readDataFromSourceFS(); err != nil {
return
}
s.timerStep("load i18n")
if err := s.readAndProcessContent(); err != nil {
return err
}
s.timerStep("read and convert pages from source")
return err
}
func (s *Site) setupSitePages() {
var siteLastChange time.Time
for i, page := range s.RegularPages {
if i < len(s.RegularPages)-1 {
page.Next = s.RegularPages[i+1]
}
if i > 0 {
page.Prev = s.RegularPages[i-1]
}
// Determine Site.Info.LastChange
// Note that the logic to determine which date to use for Lastmod
// is already applied, so this is *the* date to use.
// We cannot just pick the last page in the default sort, because
// that may not be ordered by date.
if page.Lastmod.After(siteLastChange) {
siteLastChange = page.Lastmod
}
}
s.Info.LastChange = siteLastChange
}
func (s *Site) render(config *BuildCfg, outFormatIdx int) (err error) {
if outFormatIdx == 0 {
if err = s.preparePages(); err != nil {
return
}
s.timerStep("prepare pages")
// Note that even if disableAliases is set, the aliases themselves are
// preserved on page. The motivation with this is to be able to generate
// 301 redirects in a .htacess file and similar using a custom output format.
if !s.Cfg.GetBool("disableAliases") {
// Aliases must be rendered before pages.
// Some sites, Hugo docs included, have faulty alias definitions that point
// to itself or another real page. These will be overwritten in the next
// step.
if err = s.renderAliases(); err != nil {
return
}
s.timerStep("render and write aliases")
}
}
if err = s.renderPages(config); err != nil {
return
}
s.timerStep("render and write pages")
// TODO(bep) render consider this, ref. render404 etc.
if outFormatIdx > 0 {
return
}
if err = s.renderSitemap(); err != nil {
return
}
s.timerStep("render and write Sitemap")
if err = s.renderRobotsTXT(); err != nil {
return
}
s.timerStep("render and write robots.txt")
if err = s.render404(); err != nil {
return
}
s.timerStep("render and write 404")
return
}
func (s *Site) Initialise() (err error) {
return s.initialize()
}
func (s *Site) initialize() (err error) {
s.Menus = Menus{}
return s.initializeSiteInfo()
}
// HomeAbsURL is a convenience method giving the absolute URL to the home page.
func (s *SiteInfo) HomeAbsURL() string {
base := ""
if s.IsMultiLingual() {
base = s.Language.Lang
}
return s.owner.AbsURL(base, false)
}
// SitemapAbsURL is a convenience method giving the absolute URL to the sitemap.
func (s *SiteInfo) SitemapAbsURL() string {
sitemapDefault := parseSitemap(s.s.Cfg.GetStringMap("sitemap"))
p := s.HomeAbsURL()
if !strings.HasSuffix(p, "/") {
p += "/"
}
p += sitemapDefault.Filename
return p
}
func (s *Site) initializeSiteInfo() error {
var (
lang = s.Language
languages langs.Languages
)
if s.owner != nil && s.owner.multilingual != nil {
languages = s.owner.multilingual.Languages
}
params := lang.Params()
permalinks := make(PermalinkOverrides)
for k, v := range s.Cfg.GetStringMapString("permalinks") {
permalinks[k] = pathPattern(v)
}
defaultContentInSubDir := s.Cfg.GetBool("defaultContentLanguageInSubdir")
defaultContentLanguage := s.Cfg.GetString("defaultContentLanguage")
languagePrefix := ""
if s.multilingualEnabled() && (defaultContentInSubDir || lang.Lang != defaultContentLanguage) {
languagePrefix = "/" + lang.Lang
}
var multilingual *Multilingual
if s.owner != nil {
multilingual = s.owner.multilingual
}
var uglyURLs = func(p *Page) bool {
return false
}
v := s.Cfg.Get("uglyURLs")
if v != nil {
switch vv := v.(type) {
case bool:
uglyURLs = func(p *Page) bool {
return vv
}
case string:
// Is what be get from CLI (--uglyURLs)
vvv := cast.ToBool(vv)
uglyURLs = func(p *Page) bool {
return vvv
}
default:
m := cast.ToStringMapBool(v)
uglyURLs = func(p *Page) bool {
return m[p.Section()]
}
}
}
siteConfig, err := loadSiteConfig(lang)
if err != nil {
return err
}
s.Info = SiteInfo{
Title: lang.GetString("title"),
Author: lang.GetStringMap("author"),
Social: lang.GetStringMapString("social"),
LanguageCode: lang.GetString("languageCode"),
Copyright: lang.GetString("copyright"),
multilingual: multilingual,
Language: lang,
LanguagePrefix: languagePrefix,
Languages: languages,
defaultContentLanguageInSubdir: defaultContentInSubDir,
sectionPagesMenu: lang.GetString("sectionPagesMenu"),
BuildDrafts: s.Cfg.GetBool("buildDrafts"),
canonifyURLs: s.Cfg.GetBool("canonifyURLs"),
relativeURLs: s.Cfg.GetBool("relativeURLs"),
uglyURLs: uglyURLs,
preserveTaxonomyNames: lang.GetBool("preserveTaxonomyNames"),
PageCollections: s.PageCollections,
Menus: &s.Menus,
Params: params,
Permalinks: permalinks,
Data: &s.Data,
owner: s.owner,
s: s,
Config: siteConfig,
// TODO(bep) make this Menu and similar into delegate methods on SiteInfo
Taxonomies: s.Taxonomies,
}
rssOutputFormat, found := s.outputFormats[KindHome].GetByName(output.RSSFormat.Name)
if found {
s.Info.RSSLink = s.permalink(rssOutputFormat.BaseFilename())
}
return nil
}
func (s *Site) isI18nEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsI18n(e.Name)
}
func (s *Site) isDataDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsData(e.Name)
}
func (s *Site) isLayoutDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsLayout(e.Name)
}
func (s *Site) absContentDir() string {
return s.PathSpec.AbsPathify(s.PathSpec.ContentDir)
}
func (s *Site) isContentDirEvent(e fsnotify.Event) bool {
return s.BaseFs.IsContent(e.Name)
}
type contentCaptureResultHandler struct {
defaultContentProcessor *siteContentProcessor
contentProcessors map[string]*siteContentProcessor
}
func (c *contentCaptureResultHandler) getContentProcessor(lang string) *siteContentProcessor {
proc, found := c.contentProcessors[lang]
if found {
return proc
}
return c.defaultContentProcessor
}
func (c *contentCaptureResultHandler) handleSingles(fis ...*fileInfo) {
for _, fi := range fis {
proc := c.getContentProcessor(fi.Lang())
proc.processSingle(fi)
}
}
func (c *contentCaptureResultHandler) handleBundles(d *bundleDirs) {
for _, b := range d.bundles {
proc := c.getContentProcessor(b.fi.Lang())
proc.processBundle(b)
}
}
func (c *contentCaptureResultHandler) handleCopyFiles(files ...pathLangFile) {
for _, proc := range c.contentProcessors {
proc.processAssets(files)
}
}
func (s *Site) readAndProcessContent(filenames ...string) error {
ctx := context.Background()
g, ctx := errgroup.WithContext(ctx)
defaultContentLanguage := s.SourceSpec.DefaultContentLanguage
contentProcessors := make(map[string]*siteContentProcessor)
var defaultContentProcessor *siteContentProcessor
sites := s.owner.langSite()
for k, v := range sites {
if v.Language.Disabled {
continue
}
proc := newSiteContentProcessor(ctx, len(filenames) > 0, v)
contentProcessors[k] = proc
if k == defaultContentLanguage {
defaultContentProcessor = proc
}
g.Go(func() error {
return proc.process(ctx)
})
}
var (
handler captureResultHandler
bundleMap *contentChangeMap
)
mainHandler := &contentCaptureResultHandler{contentProcessors: contentProcessors, defaultContentProcessor: defaultContentProcessor}
sourceSpec := source.NewSourceSpec(s.PathSpec, s.BaseFs.Content.Fs)
if s.running() {
// Need to track changes.
bundleMap = s.owner.ContentChanges
handler = &captureResultHandlerChain{handlers: []captureBundlesHandler{mainHandler, bundleMap}}
} else {
handler = mainHandler
}
c := newCapturer(s.Log, sourceSpec, handler, bundleMap, filenames...)
err1 := c.capture()
for _, proc := range contentProcessors {
proc.closeInput()
}
err2 := g.Wait()
if err1 != nil {
return err1
}
return err2
}
func (s *Site) buildSiteMeta() (err error) {
defer s.timerStep("build Site meta")
if len(s.Pages) == 0 {
return
}
s.assembleTaxonomies()
for _, p := range s.AllPages {
// this depends on taxonomies
p.setValuesForKind(s)
}
return
}
func (s *Site) getMenusFromConfig() Menus {
ret := Menus{}
if menus := s.Language.GetStringMap("menu"); menus != nil {
for name, menu := range menus {
m, err := cast.ToSliceE(menu)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
} else {
for _, entry := range m {
s.Log.DEBUG.Printf("found menu: %q, in site config\n", name)
menuEntry := MenuEntry{Menu: name}
ime, err := cast.ToStringMapE(entry)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
}
menuEntry.marshallMap(ime)
menuEntry.URL = s.Info.createNodeMenuEntryURL(menuEntry.URL)
if ret[name] == nil {
ret[name] = &Menu{}
}
*ret[name] = ret[name].add(&menuEntry)
}
}
}
return ret
}
return ret
}
func (s *SiteInfo) createNodeMenuEntryURL(in string) string {
if !strings.HasPrefix(in, "/") {
return in
}
// make it match the nodes
menuEntryURL := in
menuEntryURL = helpers.SanitizeURLKeepTrailingSlash(s.s.PathSpec.URLize(menuEntryURL))
if !s.canonifyURLs {
menuEntryURL = helpers.AddContextRoot(s.s.PathSpec.BaseURL.String(), menuEntryURL)
}
return menuEntryURL
}
func (s *Site) assembleMenus() {
s.Menus = Menus{}
type twoD struct {
MenuName, EntryName string
}
flat := map[twoD]*MenuEntry{}
children := map[twoD]Menu{}
// add menu entries from config to flat hash
menuConfig := s.getMenusFromConfig()
for name, menu := range menuConfig {
for _, me := range *menu {
flat[twoD{name, me.KeyName()}] = me
}
}
sectionPagesMenu := s.Info.sectionPagesMenu
pages := s.Pages
if sectionPagesMenu != "" {
for _, p := range pages {
if p.Kind == KindSection {
// From Hugo 0.22 we have nested sections, but until we get a
// feel of how that would work in this setting, let us keep
// this menu for the top level only.
id := p.Section()
if _, ok := flat[twoD{sectionPagesMenu, id}]; ok {
continue
}
me := MenuEntry{Identifier: id,
Name: p.LinkTitle(),
Weight: p.Weight,
URL: p.RelPermalink()}
flat[twoD{sectionPagesMenu, me.KeyName()}] = &me
}
}
}
// Add menu entries provided by pages
for _, p := range pages {
for name, me := range p.Menus() {
if _, ok := flat[twoD{name, me.KeyName()}]; ok {
s.Log.ERROR.Printf("Two or more menu items have the same name/identifier in Menu %q: %q.\nRename or set an unique identifier.\n", name, me.KeyName())
continue
}
flat[twoD{name, me.KeyName()}] = me
}
}
// Create Children Menus First
for _, e := range flat {
if e.Parent != "" {
children[twoD{e.Menu, e.Parent}] = children[twoD{e.Menu, e.Parent}].add(e)
}
}
// Placing Children in Parents (in flat)
for p, childmenu := range children {
_, ok := flat[twoD{p.MenuName, p.EntryName}]
if !ok {
// if parent does not exist, create one without a URL
flat[twoD{p.MenuName, p.EntryName}] = &MenuEntry{Name: p.EntryName, URL: ""}
}
flat[twoD{p.MenuName, p.EntryName}].Children = childmenu
}
// Assembling Top Level of Tree
for menu, e := range flat {
if e.Parent == "" {
_, ok := s.Menus[menu.MenuName]
if !ok {
s.Menus[menu.MenuName] = &Menu{}
}
*s.Menus[menu.MenuName] = s.Menus[menu.MenuName].add(e)
}
}
}
func (s *Site) getTaxonomyKey(key string) string {
if s.Info.preserveTaxonomyNames {
// Keep as is
return key
}
return s.PathSpec.MakePathSanitized(key)
}
// We need to create the top level taxonomy early in the build process
// to be able to determine the page Kind correctly.
func (s *Site) createTaxonomiesEntries() {
s.Taxonomies = make(TaxonomyList)
taxonomies := s.Language.GetStringMapString("taxonomies")
for _, plural := range taxonomies {
s.Taxonomies[plural] = make(Taxonomy)
}
}
func (s *Site) assembleTaxonomies() {
s.taxonomiesPluralSingular = make(map[string]string)
s.taxonomiesOrigKey = make(map[string]string)
taxonomies := s.Language.GetStringMapString("taxonomies")
s.Log.INFO.Printf("found taxonomies: %#v\n", taxonomies)
for singular, plural := range taxonomies {
s.taxonomiesPluralSingular[plural] = singular
for _, p := range s.Pages {
vals := p.getParam(plural, !s.Info.preserveTaxonomyNames)
weight := p.getParamToLower(plural + "_weight")
if weight == nil {
weight = 0
}
if vals != nil {
if v, ok := vals.([]string); ok {
for _, idx := range v {
x := WeightedPage{weight.(int), p}
s.Taxonomies[plural].add(s.getTaxonomyKey(idx), x)
if s.Info.preserveTaxonomyNames {
// Need to track the original
s.taxonomiesOrigKey[fmt.Sprintf("%s-%s", plural, s.PathSpec.MakePathSanitized(idx))] = idx
}
}
} else if v, ok := vals.(string); ok {
x := WeightedPage{weight.(int), p}
s.Taxonomies[plural].add(s.getTaxonomyKey(v), x)
if s.Info.preserveTaxonomyNames {
// Need to track the original
s.taxonomiesOrigKey[fmt.Sprintf("%s-%s", plural, s.PathSpec.MakePathSanitized(v))] = v
}
} else {
s.Log.ERROR.Printf("Invalid %s in %s\n", plural, p.File.Path())
}
}
}
for k := range s.Taxonomies[plural] {
s.Taxonomies[plural][k].Sort()
}
}
s.Info.Taxonomies = s.Taxonomies
}
// Prepare site for a new full build.
func (s *Site) resetBuildState() {
s.relatedDocsHandler = newSearchIndexHandler(s.relatedDocsHandler.cfg)
s.PageCollections = newPageCollectionsFromPages(s.rawAllPages)
// TODO(bep) get rid of this double
s.Info.PageCollections = s.PageCollections
s.draftCount = 0
s.futureCount = 0
s.expiredCount = 0
spc = newPageCache()
for _, p := range s.rawAllPages {
p.subSections = Pages{}
p.parent = nil
p.scratch = maps.NewScratch()
p.mainPageOutput = nil
}
}
func (s *Site) kindFromSections(sections []string) string {
if len(sections) == 0 {
return KindSection
}
if _, isTaxonomy := s.Taxonomies[sections[0]]; isTaxonomy {
if len(sections) == 1 {
return KindTaxonomyTerm
}
return KindTaxonomy
}
return KindSection
}
func (s *Site) layouts(p *PageOutput) ([]string, error) {
return s.layoutHandler.For(p.layoutDescriptor, p.outputFormat)
}
func (s *Site) preparePages() error {
var errors []error
for _, p := range s.Pages {
if err := p.prepareLayouts(); err != nil {
errors = append(errors, err)
}
if err := p.prepareData(s); err != nil {
errors = append(errors, err)
}
}
if len(errors) != 0 {
return fmt.Errorf("Prepare pages failed: %.100q…", errors)
}
return nil
}
func errorCollator(results <-chan error, errs chan<- error) {
errMsgs := []string{}
for err := range results {
if err != nil {
errMsgs = append(errMsgs, err.Error())
}
}
if len(errMsgs) == 0 {
errs <- nil
} else {
errs <- errors.New(strings.Join(errMsgs, "\n"))
}
close(errs)
}
func (s *Site) appendThemeTemplates(in []string) []string {
if !s.PathSpec.ThemeSet() {
return in
}
out := []string{}
// First place all non internal templates
for _, t := range in {
if !strings.HasPrefix(t, "_internal/") {
out = append(out, t)
}
}
// Then place theme templates with the same names
for _, t := range in {
if !strings.HasPrefix(t, "_internal/") {
out = append(out, "theme/"+t)
}
}
// Lastly place internal templates
for _, t := range in {
if strings.HasPrefix(t, "_internal/") {
out = append(out, t)
}
}
return out
}
// GetPage looks up a page of a given type for the given ref.
// In Hugo <= 0.44 you had to add Page Kind (section, home) etc. as the first
// argument and then either a unix styled path (with or without a leading slash))
// or path elements separated.
// When we now remove the Kind from this API, we need to make the transition as painless
// as possible for existing sites. Most sites will use {{ .Site.GetPage "section" "my/section" }},
// i.e. 2 arguments, so we test for that.
func (s *SiteInfo) GetPage(ref ...string) (*Page, error) {
var refs []string
for _, r := range ref {
// A common construct in the wild is
// .Site.GetPage "home" "" or
// .Site.GetPage "home" "/"
if r != "" && r != "/" {
refs = append(refs, r)
}
}
var key string
if len(refs) > 2 {
// This was allowed in Hugo <= 0.44, but we cannot support this with the
// new API. This should be the most unusual case.
return nil, fmt.Errorf(`too many arguments to .Site.GetPage: %v. Use lookups on the form {{ .Site.GetPage "/posts/mypage-md" }}`, ref)
}
if len(refs) == 0 || refs[0] == KindHome {
key = "/"
} else if len(refs) == 1 {
key = refs[0]
} else {
key = refs[1]
}
key = filepath.ToSlash(key)
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
return s.getPageNew(nil, key)
}
func (s *Site) permalinkForOutputFormat(link string, f output.Format) (string, error) {
var (
baseURL string
err error
)
if f.Protocol != "" {
baseURL, err = s.PathSpec.BaseURL.WithProtocol(f.Protocol)
if err != nil {
return "", err
}
} else {
baseURL = s.PathSpec.BaseURL.String()
}
return s.PathSpec.PermalinkForBaseURL(link, baseURL), nil
}
func (s *Site) permalink(link string) string {
return s.PathSpec.PermalinkForBaseURL(link, s.PathSpec.BaseURL.String())
}
func (s *Site) renderAndWriteXML(statCounter *uint64, name string, dest string, d interface{}, layouts ...string) error {
s.Log.DEBUG.Printf("Render XML for %q to %q", name, dest)
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
renderBuffer.WriteString("<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\" ?>\n")
if err := s.renderForLayouts(name, d, renderBuffer, layouts...); err != nil {
helpers.DistinctWarnLog.Println(err)
return nil
}
outBuffer := bp.GetBuffer()
defer bp.PutBuffer(outBuffer)
var path []byte
if s.Info.relativeURLs {
path = []byte(helpers.GetDottedRelativePath(dest))
} else {
s := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(s, "/") {
s += "/"
}
path = []byte(s)
}
transformer := transform.NewChain(transform.AbsURLInXML)
if err := transformer.Apply(outBuffer, renderBuffer, path); err != nil {
s.DistinctErrorLog.Println(err)
return nil
}
return s.publish(statCounter, dest, outBuffer)
}
func (s *Site) renderAndWritePage(statCounter *uint64, name string, dest string, p *PageOutput, layouts ...string) error {
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
if err := s.renderForLayouts(p.Kind, p, renderBuffer, layouts...); err != nil {
helpers.DistinctWarnLog.Println(err)
return nil
}
if renderBuffer.Len() == 0 {
return nil
}
outBuffer := bp.GetBuffer()
defer bp.PutBuffer(outBuffer)
transformLinks := transform.NewEmptyTransforms()
isHTML := p.outputFormat.IsHTML
if isHTML {
if s.Info.relativeURLs || s.Info.canonifyURLs {
transformLinks = append(transformLinks, transform.AbsURL)
}
if s.running() && s.Cfg.GetBool("watch") && !s.Cfg.GetBool("disableLiveReload") {
transformLinks = append(transformLinks, transform.LiveReloadInject(s.Cfg.GetInt("liveReloadPort")))
}
// For performance reasons we only inject the Hugo generator tag on the home page.
if p.IsHome() {
if !s.Cfg.GetBool("disableHugoGeneratorInject") {
transformLinks = append(transformLinks, transform.HugoGeneratorInject)
}
}
}
var path []byte
if s.Info.relativeURLs {
path = []byte(helpers.GetDottedRelativePath(dest))
} else if s.Info.canonifyURLs {
url := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(url, "/") {
url += "/"
}
path = []byte(url)
}
transformer := transform.NewChain(transformLinks...)
if err := transformer.Apply(outBuffer, renderBuffer, path); err != nil {
s.DistinctErrorLog.Println(err)
return nil
}
return s.publish(statCounter, dest, outBuffer)
}
func (s *Site) renderForLayouts(name string, d interface{}, w io.Writer, layouts ...string) (err error) {
var templ tpl.Template
defer func() {
if r := recover(); r != nil {
templName := ""
if templ != nil {
templName = templ.Name()
}
s.DistinctErrorLog.Printf("Failed to render %q: %s", templName, r)
s.DistinctErrorLog.Printf("Stack Trace:\n%s", stackTrace(1200))
// TOD(bep) we really need to fix this. Also see below.
if !s.running() && !testMode {
os.Exit(-1)
}
}
}()
templ = s.findFirstTemplate(layouts...)
if templ == nil {
return fmt.Errorf("[%s] Unable to locate layout for %q: %s\n", s.Language.Lang, name, layouts)
}
if err = templ.Execute(w, d); err != nil {
// Behavior here should be dependent on if running in server or watch mode.
if p, ok := d.(*PageOutput); ok {
if p.File != nil {
s.DistinctErrorLog.Printf("Error while rendering %q in %q: %s", name, p.File.Dir(), err)
} else {
s.DistinctErrorLog.Printf("Error while rendering %q: %s", name, err)
}
} else {
s.DistinctErrorLog.Printf("Error while rendering %q: %s", name, err)
}
if !s.running() && !testMode {
// TODO(bep) check if this can be propagated
os.Exit(-1)
} else if testMode {
return
}
}
return
}
func (s *Site) findFirstTemplate(layouts ...string) tpl.Template {
for _, layout := range layouts {
if templ, found := s.Tmpl.Lookup(layout); found {
return templ
}
}
return nil
}
func (s *Site) publish(statCounter *uint64, path string, r io.Reader) (err error) {
s.PathSpec.ProcessingStats.Incr(statCounter)
return helpers.WriteToDisk(filepath.Clean(path), r, s.BaseFs.PublishFs)
}
func getGoMaxProcs() int {
if gmp := os.Getenv("GOMAXPROCS"); gmp != "" {
if p, err := strconv.Atoi(gmp); err != nil {
return p
}
}
return 1
}
func (s *Site) newNodePage(typ string, sections ...string) *Page {
p := &Page{
language: s.Language,
pageInit: &pageInit{},
pageContentInit: &pageContentInit{},
Kind: typ,
Source: Source{File: &source.FileInfo{}},
data: make(map[string]interface{}),
Site: &s.Info,
sections: sections,
s: s}
p.outputFormats = p.s.outputFormats[p.Kind]
return p
}
func (s *Site) newHomePage() *Page {
p := s.newNodePage(KindHome)
p.title = s.Info.Title
pages := Pages{}
p.data["Pages"] = pages
p.Pages = pages
return p
}
func (s *Site) newTaxonomyPage(plural, key string) *Page {
p := s.newNodePage(KindTaxonomy, plural, key)
if s.Info.preserveTaxonomyNames {
// Keep (mostly) as is in the title
// We make the first character upper case, mostly because
// it is easier to reason about in the tests.
p.title = helpers.FirstUpper(key)
key = s.PathSpec.MakePathSanitized(key)
} else {
p.title = strings.Replace(s.titleFunc(key), "-", " ", -1)
}
return p
}
func (s *Site) newSectionPage(name string) *Page {
p := s.newNodePage(KindSection, name)
sectionName := helpers.FirstUpper(name)
if s.Cfg.GetBool("pluralizeListTitles") {
p.title = inflect.Pluralize(sectionName)
} else {
p.title = sectionName
}
return p
}
func (s *Site) newTaxonomyTermsPage(plural string) *Page {
p := s.newNodePage(KindTaxonomyTerm, plural)
p.title = s.titleFunc(plural)
return p
}
|
package notification
import (
"encoding/json"
"github.com/koding/logging"
"github.com/koding/rabbitmq"
"github.com/koding/worker"
"github.com/streadway/amqp"
"socialapi/models"
)
type Action func(*NotificationWorkerController, []byte) error
type NotificationWorkerController struct {
routes map[string]Action
log logging.Logger
rmqConn *amqp.Connection
}
func (n *NotificationWorkerController) DefaultErrHandler(delivery amqp.Delivery, err error) {
n.log.Error("an error occured putting message back to queue", err)
// multiple false
// reque true
delivery.Nack(false, true)
}
func NewNotificationWorkerController(rmq *rabbitmq.RabbitMQ, log logging.Logger) (*NotificationWorkerController, error) {
rmqConn, err := rmq.Connect("NewNotificationWorkerController")
if err != nil {
return nil, err
}
nwc := &NotificationWorkerController{
log: log,
rmqConn: rmqConn.Conn(),
}
routes := map[string]Action{
"channel_message_created": (*NotificationWorkerController).CreateReplyNotification,
"interaction_created": (*NotificationWorkerController).CreateInteractionNotification,
}
nwc.routes = routes
return nwc, nil
}
// copy/paste
func (n *NotificationWorkerController) HandleEvent(event string, data []byte) error {
n.log.Debug("New Event Received %s", event)
handler, ok := n.routes[event]
if !ok {
return worker.HandlerNotFoundErr
}
return handler(n, data)
}
func (n *NotificationWorkerController) CreateReplyNotification(data []byte) error {
cm, err := mapMessageToChannelMessage(data)
if err != nil {
return err
}
mr := models.NewMessageReply()
mr.ReplyId = cm.Id
if err := mr.FetchByReplyId(); err != nil {
return err
}
rn := models.NewReplyNotification()
rn.TargetId = mr.MessageId
// hack it is
if cm.InitialChannelId == 0 {
if err := models.CreateNotification(rn); err != nil {
return err
}
}
// TODO send notification message to user
return nil
}
func (n *NotificationWorkerController) CreateInteractionNotification(data []byte) error {
i, err := mapMessageToInteraction(data)
if err != nil {
return err
}
// a bit error prune since we take interaction type as notification type
in := models.NewInteractionNotification(i.TypeConstant)
in.TargetId = i.MessageId
if err := models.CreateNotification(in); err != nil {
return err
}
// TODO send notification message to user
return nil
}
// copy/pasted from realtime package
func mapMessageToChannelMessage(data []byte) (*models.ChannelMessage, error) {
cm := models.NewChannelMessage()
if err := json.Unmarshal(data, cm); err != nil {
return nil, err
}
return cm, nil
}
// copy/pasted from realtime package
func mapMessageToInteraction(data []byte) (*models.Interaction, error) {
i := models.NewInteraction()
if err := json.Unmarshal(data, i); err != nil {
return nil, err
}
return i, nil
}
SocialApi: message types are updated with api schema
package notification
import (
"encoding/json"
"github.com/koding/logging"
"github.com/koding/rabbitmq"
"github.com/koding/worker"
"github.com/streadway/amqp"
"socialapi/models"
)
type Action func(*NotificationWorkerController, []byte) error
type NotificationWorkerController struct {
routes map[string]Action
log logging.Logger
rmqConn *amqp.Connection
}
func (n *NotificationWorkerController) DefaultErrHandler(delivery amqp.Delivery, err error) {
n.log.Error("an error occured putting message back to queue", err)
// multiple false
// reque true
delivery.Nack(false, true)
}
func NewNotificationWorkerController(rmq *rabbitmq.RabbitMQ, log logging.Logger) (*NotificationWorkerController, error) {
rmqConn, err := rmq.Connect("NewNotificationWorkerController")
if err != nil {
return nil, err
}
nwc := &NotificationWorkerController{
log: log,
rmqConn: rmqConn.Conn(),
}
routes := map[string]Action{
"api.message_reply_created": (*NotificationWorkerController).CreateReplyNotification,
"api.interaction_created": (*NotificationWorkerController).CreateInteractionNotification,
}
nwc.routes = routes
return nwc, nil
}
// copy/paste
func (n *NotificationWorkerController) HandleEvent(event string, data []byte) error {
n.log.Debug("New Event Received %s", event)
handler, ok := n.routes[event]
if !ok {
return worker.HandlerNotFoundErr
}
return handler(n, data)
}
func (n *NotificationWorkerController) CreateReplyNotification(data []byte) error {
cm, err := mapMessageToChannelMessage(data)
if err != nil {
return err
}
mr := models.NewMessageReply()
mr.ReplyId = cm.Id
if err := mr.FetchByReplyId(); err != nil {
return err
}
rn := models.NewReplyNotification()
rn.TargetId = mr.MessageId
// hack it is
if cm.InitialChannelId == 0 {
if err := models.CreateNotification(rn); err != nil {
return err
}
}
// TODO send notification message to user
return nil
}
func (n *NotificationWorkerController) CreateInteractionNotification(data []byte) error {
i, err := mapMessageToInteraction(data)
if err != nil {
return err
}
// a bit error prune since we take interaction type as notification type
in := models.NewInteractionNotification(i.TypeConstant)
in.TargetId = i.MessageId
if err := models.CreateNotification(in); err != nil {
return err
}
// TODO send notification message to user
return nil
}
// copy/pasted from realtime package
func mapMessageToChannelMessage(data []byte) (*models.ChannelMessage, error) {
cm := models.NewChannelMessage()
if err := json.Unmarshal(data, cm); err != nil {
return nil, err
}
return cm, nil
}
// copy/pasted from realtime package
func mapMessageToInteraction(data []byte) (*models.Interaction, error) {
i := models.NewInteraction()
if err := json.Unmarshal(data, i); err != nil {
return nil, err
}
return i, nil
}
|
// Copyright 2019 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"fmt"
"html/template"
"io"
"log"
"mime"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/gohugoio/hugo/resources/resource"
"github.com/gohugoio/hugo/markup/converter"
"github.com/gohugoio/hugo/hugofs/files"
"github.com/gohugoio/hugo/common/maps"
"github.com/pkg/errors"
"github.com/gohugoio/hugo/common/text"
"github.com/gohugoio/hugo/common/hugo"
"github.com/gohugoio/hugo/publisher"
_errors "github.com/pkg/errors"
"github.com/gohugoio/hugo/langs"
"github.com/gohugoio/hugo/resources/page"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/lazy"
"github.com/gohugoio/hugo/media"
"github.com/fsnotify/fsnotify"
bp "github.com/gohugoio/hugo/bufferpool"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/navigation"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/related"
"github.com/gohugoio/hugo/resources"
"github.com/gohugoio/hugo/resources/page/pagemeta"
"github.com/gohugoio/hugo/source"
"github.com/gohugoio/hugo/tpl"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
// Site contains all the information relevant for constructing a static
// site. The basic flow of information is as follows:
//
// 1. A list of Files is parsed and then converted into Pages.
//
// 2. Pages contain sections (based on the file they were generated from),
// aliases and slugs (included in a pages frontmatter) which are the
// various targets that will get generated. There will be canonical
// listing. The canonical path can be overruled based on a pattern.
//
// 3. Taxonomies are created via configuration and will present some aspect of
// the final page and typically a perm url.
//
// 4. All Pages are passed through a template based on their desired
// layout based on numerous different elements.
//
// 5. The entire collection of files is written to disk.
type Site struct {
// The owning container. When multiple languages, there will be multiple
// sites.
h *HugoSites
*PageCollections
Taxonomies TaxonomyList
Sections Taxonomy
Info SiteInfo
layoutHandler *output.LayoutHandler
language *langs.Language
siteCfg siteConfigHolder
disabledKinds map[string]bool
enableInlineShortcodes bool
// Output formats defined in site config per Page Kind, or some defaults
// if not set.
// Output formats defined in Page front matter will override these.
outputFormats map[string]output.Formats
// All the output formats and media types available for this site.
// These values will be merged from the Hugo defaults, the site config and,
// finally, the language settings.
outputFormatsConfig output.Formats
mediaTypesConfig media.Types
siteConfigConfig SiteConfig
// How to handle page front matter.
frontmatterHandler pagemeta.FrontMatterHandler
// We render each site for all the relevant output formats in serial with
// this rendering context pointing to the current one.
rc *siteRenderingContext
// The output formats that we need to render this site in. This slice
// will be fixed once set.
// This will be the union of Site.Pages' outputFormats.
// This slice will be sorted.
renderFormats output.Formats
// Logger etc.
*deps.Deps `json:"-"`
// The func used to title case titles.
titleFunc func(s string) string
relatedDocsHandler *page.RelatedDocsHandler
siteRefLinker
publisher publisher.Publisher
menus navigation.Menus
// Shortcut to the home page. Note that this may be nil if
// home page, for some odd reason, is disabled.
home *pageState
// The last modification date of this site.
lastmod time.Time
// Lazily loaded site dependencies
init *siteInit
}
type siteConfigHolder struct {
sitemap config.Sitemap
taxonomiesConfig map[string]string
timeout time.Duration
hasCJKLanguage bool
enableEmoji bool
}
// Lazily loaded site dependencies.
type siteInit struct {
prevNext *lazy.Init
prevNextInSection *lazy.Init
menus *lazy.Init
}
func (init *siteInit) Reset() {
init.prevNext.Reset()
init.prevNextInSection.Reset()
init.menus.Reset()
}
func (s *Site) initInit(init *lazy.Init, pctx pageContext) bool {
_, err := init.Do()
if err != nil {
s.h.FatalError(pctx.wrapError(err))
}
return err == nil
}
func (s *Site) prepareInits() {
s.init = &siteInit{}
var init lazy.Init
s.init.prevNext = init.Branch(func() (interface{}, error) {
regularPages := s.findWorkPagesByKind(page.KindPage)
for i, p := range regularPages {
if p.posNextPrev == nil {
continue
}
p.posNextPrev.nextPage = nil
p.posNextPrev.prevPage = nil
if i > 0 {
p.posNextPrev.nextPage = regularPages[i-1]
}
if i < len(regularPages)-1 {
p.posNextPrev.prevPage = regularPages[i+1]
}
}
return nil, nil
})
s.init.prevNextInSection = init.Branch(func() (interface{}, error) {
var rootSection []int
// TODO(bep) cm attach this to the bucket.
for i, p1 := range s.workAllPages {
if p1.IsPage() && p1.Section() == "" {
rootSection = append(rootSection, i)
}
if p1.IsSection() {
sectionPages := p1.RegularPages()
for i, p2 := range sectionPages {
p2s := p2.(*pageState)
if p2s.posNextPrevSection == nil {
continue
}
p2s.posNextPrevSection.nextPage = nil
p2s.posNextPrevSection.prevPage = nil
if i > 0 {
p2s.posNextPrevSection.nextPage = sectionPages[i-1]
}
if i < len(sectionPages)-1 {
p2s.posNextPrevSection.prevPage = sectionPages[i+1]
}
}
}
}
for i, j := range rootSection {
p := s.workAllPages[j]
if i > 0 {
p.posNextPrevSection.nextPage = s.workAllPages[rootSection[i-1]]
}
if i < len(rootSection)-1 {
p.posNextPrevSection.prevPage = s.workAllPages[rootSection[i+1]]
}
}
return nil, nil
})
s.init.menus = init.Branch(func() (interface{}, error) {
s.assembleMenus()
return nil, nil
})
}
type siteRenderingContext struct {
output.Format
}
func (s *Site) Menus() navigation.Menus {
s.init.menus.Do()
return s.menus
}
func (s *Site) initRenderFormats() {
formatSet := make(map[string]bool)
formats := output.Formats{}
for _, p := range s.workAllPages {
for _, f := range p.m.configuredOutputFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
// Add the per kind configured output formats
for _, kind := range allKindsInPages {
if siteFormats, found := s.outputFormats[kind]; found {
for _, f := range siteFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
}
sort.Sort(formats)
s.renderFormats = formats
}
func (s *Site) GetRelatedDocsHandler() *page.RelatedDocsHandler {
return s.relatedDocsHandler
}
func (s *Site) Language() *langs.Language {
return s.language
}
func (s *Site) isEnabled(kind string) bool {
if kind == kindUnknown {
panic("Unknown kind")
}
return !s.disabledKinds[kind]
}
// reset returns a new Site prepared for rebuild.
func (s *Site) reset() *Site {
return &Site{Deps: s.Deps,
layoutHandler: output.NewLayoutHandler(),
disabledKinds: s.disabledKinds,
titleFunc: s.titleFunc,
relatedDocsHandler: s.relatedDocsHandler.Clone(),
siteRefLinker: s.siteRefLinker,
outputFormats: s.outputFormats,
rc: s.rc,
outputFormatsConfig: s.outputFormatsConfig,
frontmatterHandler: s.frontmatterHandler,
mediaTypesConfig: s.mediaTypesConfig,
language: s.language,
h: s.h,
publisher: s.publisher,
siteConfigConfig: s.siteConfigConfig,
enableInlineShortcodes: s.enableInlineShortcodes,
init: s.init,
PageCollections: s.PageCollections,
siteCfg: s.siteCfg,
}
}
// newSite creates a new site with the given configuration.
func newSite(cfg deps.DepsCfg) (*Site, error) {
c := newPageCollections()
if cfg.Language == nil {
cfg.Language = langs.NewDefaultLanguage(cfg.Cfg)
}
disabledKinds := make(map[string]bool)
for _, disabled := range cast.ToStringSlice(cfg.Language.Get("disableKinds")) {
disabledKinds[disabled] = true
}
var (
mediaTypesConfig []map[string]interface{}
outputFormatsConfig []map[string]interface{}
siteOutputFormatsConfig output.Formats
siteMediaTypesConfig media.Types
err error
)
// Add language last, if set, so it gets precedence.
for _, cfg := range []config.Provider{cfg.Cfg, cfg.Language} {
if cfg.IsSet("mediaTypes") {
mediaTypesConfig = append(mediaTypesConfig, cfg.GetStringMap("mediaTypes"))
}
if cfg.IsSet("outputFormats") {
outputFormatsConfig = append(outputFormatsConfig, cfg.GetStringMap("outputFormats"))
}
}
siteMediaTypesConfig, err = media.DecodeTypes(mediaTypesConfig...)
if err != nil {
return nil, err
}
siteOutputFormatsConfig, err = output.DecodeFormats(siteMediaTypesConfig, outputFormatsConfig...)
if err != nil {
return nil, err
}
outputFormats, err := createSiteOutputFormats(siteOutputFormatsConfig, cfg.Language)
if err != nil {
return nil, err
}
taxonomies := cfg.Language.GetStringMapString("taxonomies")
var relatedContentConfig related.Config
if cfg.Language.IsSet("related") {
relatedContentConfig, err = related.DecodeConfig(cfg.Language.Get("related"))
if err != nil {
return nil, err
}
} else {
relatedContentConfig = related.DefaultConfig
if _, found := taxonomies["tag"]; found {
relatedContentConfig.Add(related.IndexConfig{Name: "tags", Weight: 80})
}
}
titleFunc := helpers.GetTitleFunc(cfg.Language.GetString("titleCaseStyle"))
frontMatterHandler, err := pagemeta.NewFrontmatterHandler(cfg.Logger, cfg.Cfg)
if err != nil {
return nil, err
}
timeout := 30 * time.Second
if cfg.Language.IsSet("timeout") {
switch v := cfg.Language.Get("timeout").(type) {
case int64:
timeout = time.Duration(v) * time.Millisecond
case string:
d, err := time.ParseDuration(v)
if err == nil {
timeout = d
}
}
}
siteConfig := siteConfigHolder{
sitemap: config.DecodeSitemap(config.Sitemap{Priority: -1, Filename: "sitemap.xml"}, cfg.Language.GetStringMap("sitemap")),
taxonomiesConfig: taxonomies,
timeout: timeout,
hasCJKLanguage: cfg.Language.GetBool("hasCJKLanguage"),
enableEmoji: cfg.Language.Cfg.GetBool("enableEmoji"),
}
s := &Site{
PageCollections: c,
layoutHandler: output.NewLayoutHandler(),
language: cfg.Language,
disabledKinds: disabledKinds,
titleFunc: titleFunc,
relatedDocsHandler: page.NewRelatedDocsHandler(relatedContentConfig),
outputFormats: outputFormats,
rc: &siteRenderingContext{output.HTMLFormat},
outputFormatsConfig: siteOutputFormatsConfig,
mediaTypesConfig: siteMediaTypesConfig,
frontmatterHandler: frontMatterHandler,
enableInlineShortcodes: cfg.Language.GetBool("enableInlineShortcodes"),
siteCfg: siteConfig,
}
s.prepareInits()
return s, nil
}
// NewSite creates a new site with the given dependency configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSite(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err = applyDeps(cfg, s); err != nil {
return nil, err
}
return s, nil
}
// NewSiteDefaultLang creates a new site in the default language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewSiteDefaultLang(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewDefaultLanguage(v), withTemplate...)
}
// NewEnglishSite creates a new site in English language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewEnglishSite(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewLanguage("en", v), withTemplate...)
}
// newSiteForLang creates a new site in the given language.
func newSiteForLang(lang *langs.Language, withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
withTemplates := func(templ tpl.TemplateHandler) error {
for _, wt := range withTemplate {
if err := wt(templ); err != nil {
return err
}
}
return nil
}
cfg := deps.DepsCfg{WithTemplate: withTemplates, Cfg: lang}
return NewSiteForCfg(cfg)
}
// NewSiteForCfg creates a new site for the given configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSiteForCfg(cfg deps.DepsCfg) (*Site, error) {
h, err := NewHugoSites(cfg)
if err != nil {
return nil, err
}
return h.Sites[0], nil
}
type SiteInfo struct {
Authors page.AuthorList
Social SiteSocial
hugoInfo hugo.Info
title string
RSSLink string
Author map[string]interface{}
LanguageCode string
Copyright string
permalinks map[string]string
LanguagePrefix string
Languages langs.Languages
BuildDrafts bool
canonifyURLs bool
relativeURLs bool
uglyURLs func(p page.Page) bool
owner *HugoSites
s *Site
language *langs.Language
defaultContentLanguageInSubdir bool
sectionPagesMenu string
}
func (s *SiteInfo) Pages() page.Pages {
return s.s.Pages()
}
func (s *SiteInfo) RegularPages() page.Pages {
return s.s.RegularPages()
}
func (s *SiteInfo) AllPages() page.Pages {
return s.s.AllPages()
}
func (s *SiteInfo) AllRegularPages() page.Pages {
return s.s.AllRegularPages()
}
func (s *SiteInfo) Permalinks() map[string]string {
// Remove in 0.57
helpers.Deprecated(".Site.Permalinks", "", false)
return s.permalinks
}
func (s *SiteInfo) LastChange() time.Time {
return s.s.lastmod
}
func (s *SiteInfo) Title() string {
return s.title
}
func (s *SiteInfo) Site() page.Site {
return s
}
func (s *SiteInfo) Menus() navigation.Menus {
return s.s.Menus()
}
// TODO(bep) type
func (s *SiteInfo) Taxonomies() interface{} {
return s.s.Taxonomies
}
func (s *SiteInfo) Params() maps.Params {
return s.s.Language().Params()
}
func (s *SiteInfo) Data() map[string]interface{} {
return s.s.h.Data()
}
func (s *SiteInfo) Language() *langs.Language {
return s.language
}
func (s *SiteInfo) Config() SiteConfig {
return s.s.siteConfigConfig
}
func (s *SiteInfo) Hugo() hugo.Info {
return s.hugoInfo
}
// Sites is a convenience method to get all the Hugo sites/languages configured.
func (s *SiteInfo) Sites() page.Sites {
return s.s.h.siteInfos()
}
func (s *SiteInfo) String() string {
return fmt.Sprintf("Site(%q)", s.title)
}
func (s *SiteInfo) BaseURL() template.URL {
return template.URL(s.s.PathSpec.BaseURL.String())
}
// ServerPort returns the port part of the BaseURL, 0 if none found.
func (s *SiteInfo) ServerPort() int {
ps := s.s.PathSpec.BaseURL.URL().Port()
if ps == "" {
return 0
}
p, err := strconv.Atoi(ps)
if err != nil {
return 0
}
return p
}
// GoogleAnalytics is kept here for historic reasons.
func (s *SiteInfo) GoogleAnalytics() string {
return s.Config().Services.GoogleAnalytics.ID
}
// DisqusShortname is kept here for historic reasons.
func (s *SiteInfo) DisqusShortname() string {
return s.Config().Services.Disqus.Shortname
}
// SiteSocial is a place to put social details on a site level. These are the
// standard keys that themes will expect to have available, but can be
// expanded to any others on a per site basis
// github
// facebook
// facebook_admin
// twitter
// twitter_domain
// pinterest
// instagram
// youtube
// linkedin
type SiteSocial map[string]string
// Param is a convenience method to do lookups in SiteInfo's Params map.
//
// This method is also implemented on Page.
func (s *SiteInfo) Param(key interface{}) (interface{}, error) {
return resource.Param(s, nil, key)
}
func (s *SiteInfo) IsMultiLingual() bool {
return len(s.Languages) > 1
}
func (s *SiteInfo) IsServer() bool {
return s.owner.running
}
type siteRefLinker struct {
s *Site
errorLogger *log.Logger
notFoundURL string
}
func newSiteRefLinker(cfg config.Provider, s *Site) (siteRefLinker, error) {
logger := s.Log.ERROR
notFoundURL := cfg.GetString("refLinksNotFoundURL")
errLevel := cfg.GetString("refLinksErrorLevel")
if strings.EqualFold(errLevel, "warning") {
logger = s.Log.WARN
}
return siteRefLinker{s: s, errorLogger: logger, notFoundURL: notFoundURL}, nil
}
func (s siteRefLinker) logNotFound(ref, what string, p page.Page, position text.Position) {
if position.IsValid() {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q: %s: %s", s.s.Lang(), ref, position.String(), what)
} else if p == nil {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q: %s", s.s.Lang(), ref, what)
} else {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q from page %q: %s", s.s.Lang(), ref, p.Path(), what)
}
}
func (s *siteRefLinker) refLink(ref string, source interface{}, relative bool, outputFormat string) (string, error) {
p, err := unwrapPage(source)
if err != nil {
return "", err
}
var refURL *url.URL
ref = filepath.ToSlash(ref)
refURL, err = url.Parse(ref)
if err != nil {
return s.notFoundURL, err
}
var target page.Page
var link string
if refURL.Path != "" {
var err error
target, err = s.s.getPageNew(p, refURL.Path)
var pos text.Position
if err != nil || target == nil {
if p, ok := source.(text.Positioner); ok {
pos = p.Position()
}
}
if err != nil {
s.logNotFound(refURL.Path, err.Error(), p, pos)
return s.notFoundURL, nil
}
if target == nil {
s.logNotFound(refURL.Path, "page not found", p, pos)
return s.notFoundURL, nil
}
var permalinker Permalinker = target
if outputFormat != "" {
o := target.OutputFormats().Get(outputFormat)
if o == nil {
s.logNotFound(refURL.Path, fmt.Sprintf("output format %q", outputFormat), p, pos)
return s.notFoundURL, nil
}
permalinker = o
}
if relative {
link = permalinker.RelPermalink()
} else {
link = permalinker.Permalink()
}
}
if refURL.Fragment != "" {
_ = target
link = link + "#" + refURL.Fragment
if pctx, ok := target.(pageContext); ok {
if refURL.Path != "" {
if di, ok := pctx.getContentConverter().(converter.DocumentInfo); ok {
link = link + di.AnchorSuffix()
}
}
} else if pctx, ok := p.(pageContext); ok {
if di, ok := pctx.getContentConverter().(converter.DocumentInfo); ok {
link = link + di.AnchorSuffix()
}
}
}
return link, nil
}
func (s *Site) running() bool {
return s.h != nil && s.h.running
}
func (s *Site) multilingual() *Multilingual {
return s.h.multilingual
}
type whatChanged struct {
source bool
other bool
files map[string]bool
}
// RegisterMediaTypes will register the Site's media types in the mime
// package, so it will behave correctly with Hugo's built-in server.
func (s *Site) RegisterMediaTypes() {
for _, mt := range s.mediaTypesConfig {
for _, suffix := range mt.Suffixes {
_ = mime.AddExtensionType(mt.Delimiter+suffix, mt.Type()+"; charset=utf-8")
}
}
}
func (s *Site) filterFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
seen := make(map[fsnotify.Event]bool)
for _, ev := range events {
// Avoid processing the same event twice.
if seen[ev] {
continue
}
seen[ev] = true
if s.SourceSpec.IgnoreFile(ev.Name) {
continue
}
// Throw away any directories
isRegular, err := s.SourceSpec.IsRegularSourceFile(ev.Name)
if err != nil && os.IsNotExist(err) && (ev.Op&fsnotify.Remove == fsnotify.Remove || ev.Op&fsnotify.Rename == fsnotify.Rename) {
// Force keep of event
isRegular = true
}
if !isRegular {
continue
}
filtered = append(filtered, ev)
}
return filtered
}
func (s *Site) translateFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
eventMap := make(map[string][]fsnotify.Event)
// We often get a Remove etc. followed by a Create, a Create followed by a Write.
// Remove the superflous events to mage the update logic simpler.
for _, ev := range events {
eventMap[ev.Name] = append(eventMap[ev.Name], ev)
}
for _, ev := range events {
mapped := eventMap[ev.Name]
// Keep one
found := false
var kept fsnotify.Event
for i, ev2 := range mapped {
if i == 0 {
kept = ev2
}
if ev2.Op&fsnotify.Write == fsnotify.Write {
kept = ev2
found = true
}
if !found && ev2.Op&fsnotify.Create == fsnotify.Create {
kept = ev2
}
}
filtered = append(filtered, kept)
}
return filtered
}
// reBuild partially rebuilds a site given the filesystem events.
// It returns whetever the content source was changed.
// TODO(bep) clean up/rewrite this method.
func (s *Site) processPartial(config *BuildCfg, init func(config *BuildCfg) error, events []fsnotify.Event) error {
events = s.filterFileEvents(events)
events = s.translateFileEvents(events)
s.Log.DEBUG.Printf("Rebuild for events %q", events)
h := s.h
// First we need to determine what changed
var (
sourceChanged = []fsnotify.Event{}
sourceReallyChanged = []fsnotify.Event{}
contentFilesChanged []string
tmplChanged = []fsnotify.Event{}
dataChanged = []fsnotify.Event{}
i18nChanged = []fsnotify.Event{}
shortcodesChanged = make(map[string]bool)
sourceFilesChanged = make(map[string]bool)
// prevent spamming the log on changes
logger = helpers.NewDistinctFeedbackLogger()
)
var cachePartitions []string
for _, ev := range events {
if assetsFilename := s.BaseFs.Assets.MakePathRelative(ev.Name); assetsFilename != "" {
cachePartitions = append(cachePartitions, resources.ResourceKeyPartitions(assetsFilename)...)
}
if s.isContentDirEvent(ev) {
logger.Println("Source changed", ev)
sourceChanged = append(sourceChanged, ev)
}
if s.isLayoutDirEvent(ev) {
logger.Println("Template changed", ev)
tmplChanged = append(tmplChanged, ev)
if strings.Contains(ev.Name, "shortcodes") {
shortcode := filepath.Base(ev.Name)
shortcode = strings.TrimSuffix(shortcode, filepath.Ext(shortcode))
shortcodesChanged[shortcode] = true
}
}
if s.isDataDirEvent(ev) {
logger.Println("Data changed", ev)
dataChanged = append(dataChanged, ev)
}
if s.isI18nEvent(ev) {
logger.Println("i18n changed", ev)
i18nChanged = append(dataChanged, ev)
}
}
changed := &whatChanged{
source: len(sourceChanged) > 0 || len(shortcodesChanged) > 0,
other: len(tmplChanged) > 0 || len(i18nChanged) > 0 || len(dataChanged) > 0,
files: sourceFilesChanged,
}
config.whatChanged = changed
if err := init(config); err != nil {
return err
}
// These in memory resource caches will be rebuilt on demand.
for _, s := range s.h.Sites {
s.ResourceSpec.ResourceCache.DeletePartitions(cachePartitions...)
}
if len(tmplChanged) > 0 || len(i18nChanged) > 0 {
sites := s.h.Sites
first := sites[0]
s.h.init.Reset()
// TOD(bep) globals clean
if err := first.Deps.LoadResources(); err != nil {
return err
}
for i := 1; i < len(sites); i++ {
site := sites[i]
var err error
depsCfg := deps.DepsCfg{
Language: site.language,
MediaTypes: site.mediaTypesConfig,
OutputFormats: site.outputFormatsConfig,
}
site.Deps, err = first.Deps.ForLanguage(depsCfg, func(d *deps.Deps) error {
d.Site = &site.Info
return nil
})
if err != nil {
return err
}
}
}
if len(dataChanged) > 0 {
s.h.init.data.Reset()
}
for _, ev := range sourceChanged {
removed := false
if ev.Op&fsnotify.Remove == fsnotify.Remove {
removed = true
}
// Some editors (Vim) sometimes issue only a Rename operation when writing an existing file
// Sometimes a rename operation means that file has been renamed other times it means
// it's been updated
if ev.Op&fsnotify.Rename == fsnotify.Rename {
// If the file is still on disk, it's only been updated, if it's not, it's been moved
if ex, err := afero.Exists(s.Fs.Source, ev.Name); !ex || err != nil {
removed = true
}
}
if removed && files.IsContentFile(ev.Name) {
h.removePageByFilename(ev.Name)
}
sourceReallyChanged = append(sourceReallyChanged, ev)
sourceFilesChanged[ev.Name] = true
}
for shortcode := range shortcodesChanged {
// There are certain scenarios that, when a shortcode changes,
// it isn't sufficient to just rerender the already parsed shortcode.
// One example is if the user adds a new shortcode to the content file first,
// and then creates the shortcode on the file system.
// To handle these scenarios, we must do a full reprocessing of the
// pages that keeps a reference to the changed shortcode.
pagesWithShortcode := h.findPagesByShortcode(shortcode)
for _, p := range pagesWithShortcode {
contentFilesChanged = append(contentFilesChanged, p.File().Filename())
}
}
if len(sourceReallyChanged) > 0 || len(contentFilesChanged) > 0 {
var filenamesChanged []string
for _, e := range sourceReallyChanged {
filenamesChanged = append(filenamesChanged, e.Name)
}
if len(contentFilesChanged) > 0 {
filenamesChanged = append(filenamesChanged, contentFilesChanged...)
}
filenamesChanged = helpers.UniqueStringsReuse(filenamesChanged)
if err := s.readAndProcessContent(filenamesChanged...); err != nil {
return err
}
}
return nil
}
func (s *Site) process(config BuildCfg) (err error) {
if err = s.initialize(); err != nil {
err = errors.Wrap(err, "initialize")
return
}
if err = s.readAndProcessContent(); err != nil {
err = errors.Wrap(err, "readAndProcessContent")
return
}
return err
}
func (s *Site) render(ctx *siteRenderContext) (err error) {
if err := page.Clear(); err != nil {
return err
}
if ctx.outIdx == 0 {
// Note that even if disableAliases is set, the aliases themselves are
// preserved on page. The motivation with this is to be able to generate
// 301 redirects in a .htacess file and similar using a custom output format.
if !s.Cfg.GetBool("disableAliases") {
// Aliases must be rendered before pages.
// Some sites, Hugo docs included, have faulty alias definitions that point
// to itself or another real page. These will be overwritten in the next
// step.
if err = s.renderAliases(); err != nil {
return
}
}
}
if err = s.renderPages(ctx); err != nil {
return
}
if ctx.outIdx == 0 {
if err = s.renderSitemap(); err != nil {
return
}
if err = s.renderRobotsTXT(); err != nil {
return
}
if err = s.render404(); err != nil {
return
}
}
if !ctx.renderSingletonPages() {
return
}
if err = s.renderMainLanguageRedirect(); err != nil {
return
}
return
}
func (s *Site) Initialise() (err error) {
return s.initialize()
}
func (s *Site) initialize() (err error) {
return s.initializeSiteInfo()
}
// HomeAbsURL is a convenience method giving the absolute URL to the home page.
func (s *SiteInfo) HomeAbsURL() string {
base := ""
if s.IsMultiLingual() {
base = s.Language().Lang
}
return s.owner.AbsURL(base, false)
}
// SitemapAbsURL is a convenience method giving the absolute URL to the sitemap.
func (s *SiteInfo) SitemapAbsURL() string {
p := s.HomeAbsURL()
if !strings.HasSuffix(p, "/") {
p += "/"
}
p += s.s.siteCfg.sitemap.Filename
return p
}
func (s *Site) initializeSiteInfo() error {
var (
lang = s.language
languages langs.Languages
)
if s.h != nil && s.h.multilingual != nil {
languages = s.h.multilingual.Languages
}
permalinks := s.Cfg.GetStringMapString("permalinks")
defaultContentInSubDir := s.Cfg.GetBool("defaultContentLanguageInSubdir")
defaultContentLanguage := s.Cfg.GetString("defaultContentLanguage")
languagePrefix := ""
if s.multilingualEnabled() && (defaultContentInSubDir || lang.Lang != defaultContentLanguage) {
languagePrefix = "/" + lang.Lang
}
var uglyURLs = func(p page.Page) bool {
return false
}
v := s.Cfg.Get("uglyURLs")
if v != nil {
switch vv := v.(type) {
case bool:
uglyURLs = func(p page.Page) bool {
return vv
}
case string:
// Is what be get from CLI (--uglyURLs)
vvv := cast.ToBool(vv)
uglyURLs = func(p page.Page) bool {
return vvv
}
default:
m := cast.ToStringMapBool(v)
uglyURLs = func(p page.Page) bool {
return m[p.Section()]
}
}
}
s.Info = SiteInfo{
title: lang.GetString("title"),
Author: lang.GetStringMap("author"),
Social: lang.GetStringMapString("social"),
LanguageCode: lang.GetString("languageCode"),
Copyright: lang.GetString("copyright"),
language: lang,
LanguagePrefix: languagePrefix,
Languages: languages,
defaultContentLanguageInSubdir: defaultContentInSubDir,
sectionPagesMenu: lang.GetString("sectionPagesMenu"),
BuildDrafts: s.Cfg.GetBool("buildDrafts"),
canonifyURLs: s.Cfg.GetBool("canonifyURLs"),
relativeURLs: s.Cfg.GetBool("relativeURLs"),
uglyURLs: uglyURLs,
permalinks: permalinks,
owner: s.h,
s: s,
hugoInfo: hugo.NewInfo(s.Cfg.GetString("environment")),
}
rssOutputFormat, found := s.outputFormats[page.KindHome].GetByName(output.RSSFormat.Name)
if found {
s.Info.RSSLink = s.permalink(rssOutputFormat.BaseFilename())
}
return nil
}
func (s *Site) isI18nEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsI18n(e.Name)
}
func (s *Site) isDataDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsData(e.Name)
}
func (s *Site) isLayoutDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsLayout(e.Name)
}
func (s *Site) isContentDirEvent(e fsnotify.Event) bool {
return s.BaseFs.IsContent(e.Name)
}
func (s *Site) readAndProcessContent(filenames ...string) error {
sourceSpec := source.NewSourceSpec(s.PathSpec, s.BaseFs.Content.Fs)
proc := newPagesProcessor(s.h, sourceSpec, len(filenames) > 0)
c := newPagesCollector(sourceSpec, s.Log, s.h.ContentChanges, proc, filenames...)
return c.Collect()
}
func (s *Site) getMenusFromConfig() navigation.Menus {
ret := navigation.Menus{}
if menus := s.language.GetStringMap("menus"); menus != nil {
for name, menu := range menus {
m, err := cast.ToSliceE(menu)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
} else {
for _, entry := range m {
s.Log.DEBUG.Printf("found menu: %q, in site config\n", name)
menuEntry := navigation.MenuEntry{Menu: name}
ime, err := maps.ToStringMapE(entry)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
}
menuEntry.MarshallMap(ime)
// TODO(bep) clean up all of this
menuEntry.ConfiguredURL = s.Info.createNodeMenuEntryURL(menuEntry.ConfiguredURL)
if ret[name] == nil {
ret[name] = navigation.Menu{}
}
ret[name] = ret[name].Add(&menuEntry)
}
}
}
return ret
}
return ret
}
func (s *SiteInfo) createNodeMenuEntryURL(in string) string {
if !strings.HasPrefix(in, "/") {
return in
}
// make it match the nodes
menuEntryURL := in
menuEntryURL = helpers.SanitizeURLKeepTrailingSlash(s.s.PathSpec.URLize(menuEntryURL))
if !s.canonifyURLs {
menuEntryURL = helpers.AddContextRoot(s.s.PathSpec.BaseURL.String(), menuEntryURL)
}
return menuEntryURL
}
func (s *Site) assembleMenus() {
s.menus = make(navigation.Menus)
type twoD struct {
MenuName, EntryName string
}
flat := map[twoD]*navigation.MenuEntry{}
children := map[twoD]navigation.Menu{}
// add menu entries from config to flat hash
menuConfig := s.getMenusFromConfig()
for name, menu := range menuConfig {
for _, me := range menu {
flat[twoD{name, me.KeyName()}] = me
}
}
sectionPagesMenu := s.Info.sectionPagesMenu
if sectionPagesMenu != "" {
for _, p := range s.workAllPages {
if p.Kind() == page.KindSection {
// From Hugo 0.22 we have nested sections, but until we get a
// feel of how that would work in this setting, let us keep
// this menu for the top level only.
id := p.Section()
if _, ok := flat[twoD{sectionPagesMenu, id}]; ok {
continue
}
me := navigation.MenuEntry{Identifier: id,
Name: p.LinkTitle(),
Weight: p.Weight(),
Page: p}
flat[twoD{sectionPagesMenu, me.KeyName()}] = &me
}
}
}
// Add menu entries provided by pages
for _, p := range s.workAllPages {
for name, me := range p.pageMenus.menus() {
if _, ok := flat[twoD{name, me.KeyName()}]; ok {
s.SendError(p.wrapError(errors.Errorf("duplicate menu entry with identifier %q in menu %q", me.KeyName(), name)))
continue
}
flat[twoD{name, me.KeyName()}] = me
}
}
// Create Children Menus First
for _, e := range flat {
if e.Parent != "" {
children[twoD{e.Menu, e.Parent}] = children[twoD{e.Menu, e.Parent}].Add(e)
}
}
// Placing Children in Parents (in flat)
for p, childmenu := range children {
_, ok := flat[twoD{p.MenuName, p.EntryName}]
if !ok {
// if parent does not exist, create one without a URL
flat[twoD{p.MenuName, p.EntryName}] = &navigation.MenuEntry{Name: p.EntryName}
}
flat[twoD{p.MenuName, p.EntryName}].Children = childmenu
}
// Assembling Top Level of Tree
for menu, e := range flat {
if e.Parent == "" {
_, ok := s.menus[menu.MenuName]
if !ok {
s.menus[menu.MenuName] = navigation.Menu{}
}
s.menus[menu.MenuName] = s.menus[menu.MenuName].Add(e)
}
}
}
// get any lanaguagecode to prefix the target file path with.
func (s *Site) getLanguageTargetPathLang(alwaysInSubDir bool) string {
if s.h.IsMultihost() {
return s.Language().Lang
}
return s.getLanguagePermalinkLang(alwaysInSubDir)
}
// get any lanaguagecode to prefix the relative permalink with.
func (s *Site) getLanguagePermalinkLang(alwaysInSubDir bool) string {
if !s.Info.IsMultiLingual() || s.h.IsMultihost() {
return ""
}
if alwaysInSubDir {
return s.Language().Lang
}
isDefault := s.Language().Lang == s.multilingual().DefaultLang.Lang
if !isDefault || s.Info.defaultContentLanguageInSubdir {
return s.Language().Lang
}
return ""
}
func (s *Site) getTaxonomyKey(key string) string {
if s.PathSpec.DisablePathToLower {
return s.PathSpec.MakePath(key)
}
return strings.ToLower(s.PathSpec.MakePath(key))
}
// Prepare site for a new full build.
func (s *Site) resetBuildState(sourceChanged bool) {
s.relatedDocsHandler = s.relatedDocsHandler.Clone()
s.init.Reset()
if sourceChanged {
s.PageCollections = newPageCollectionsFromPages(s.rawAllPages)
for _, p := range s.rawAllPages {
p.pagePages = &pagePages{}
p.parent = nil
p.Scratcher = maps.NewScratcher()
}
} else {
s.pagesMap.withEveryPage(func(p *pageState) {
p.Scratcher = maps.NewScratcher()
})
}
}
func (s *Site) errorCollator(results <-chan error, errs chan<- error) {
var errors []error
for e := range results {
errors = append(errors, e)
}
errs <- s.h.pickOneAndLogTheRest(errors)
close(errs)
}
// GetPage looks up a page of a given type for the given ref.
// In Hugo <= 0.44 you had to add Page Kind (section, home) etc. as the first
// argument and then either a unix styled path (with or without a leading slash))
// or path elements separated.
// When we now remove the Kind from this API, we need to make the transition as painless
// as possible for existing sites. Most sites will use {{ .Site.GetPage "section" "my/section" }},
// i.e. 2 arguments, so we test for that.
func (s *SiteInfo) GetPage(ref ...string) (page.Page, error) {
p, err := s.s.getPageOldVersion(ref...)
if p == nil {
// The nil struct has meaning in some situations, mostly to avoid breaking
// existing sites doing $nilpage.IsDescendant($p), which will always return
// false.
p = page.NilPage
}
return p, err
}
func (s *Site) permalink(link string) string {
return s.PathSpec.PermalinkForBaseURL(link, s.PathSpec.BaseURL.String())
}
func (s *Site) renderAndWriteXML(statCounter *uint64, name string, targetPath string, d interface{}, layouts ...string) error {
s.Log.DEBUG.Printf("Render XML for %q to %q", name, targetPath)
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
if err := s.renderForLayouts(name, "", d, renderBuffer, layouts...); err != nil {
return err
}
var path string
if s.Info.relativeURLs {
path = helpers.GetDottedRelativePath(targetPath)
} else {
s := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(s, "/") {
s += "/"
}
path = s
}
pd := publisher.Descriptor{
Src: renderBuffer,
TargetPath: targetPath,
StatCounter: statCounter,
// For the minification part of XML,
// we currently only use the MIME type.
OutputFormat: output.RSSFormat,
AbsURLPath: path,
}
return s.publisher.Publish(pd)
}
func (s *Site) renderAndWritePage(statCounter *uint64, name string, targetPath string, p *pageState, layouts ...string) error {
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
of := p.outputFormat()
if err := s.renderForLayouts(p.Kind(), of.Name, p, renderBuffer, layouts...); err != nil {
return err
}
if renderBuffer.Len() == 0 {
return nil
}
isHTML := of.IsHTML
isRSS := of.Name == "RSS"
var path string
if s.Info.relativeURLs {
path = helpers.GetDottedRelativePath(targetPath)
} else if isRSS || s.Info.canonifyURLs {
url := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(url, "/") {
url += "/"
}
path = url
}
pd := publisher.Descriptor{
Src: renderBuffer,
TargetPath: targetPath,
StatCounter: statCounter,
OutputFormat: p.outputFormat(),
}
if isRSS {
// Always canonify URLs in RSS
pd.AbsURLPath = path
} else if isHTML {
if s.Info.relativeURLs || s.Info.canonifyURLs {
pd.AbsURLPath = path
}
if s.running() && s.Cfg.GetBool("watch") && !s.Cfg.GetBool("disableLiveReload") {
pd.LiveReloadPort = s.Cfg.GetInt("liveReloadPort")
}
// For performance reasons we only inject the Hugo generator tag on the home page.
if p.IsHome() {
pd.AddHugoGeneratorTag = !s.Cfg.GetBool("disableHugoGeneratorInject")
}
}
return s.publisher.Publish(pd)
}
var infoOnMissingLayout = map[string]bool{
// The 404 layout is very much optional in Hugo, but we do look for it.
"404": true,
}
func (s *Site) renderForLayouts(name, outputFormat string, d interface{}, w io.Writer, layouts ...string) (err error) {
templ := s.findFirstTemplate(layouts...)
if templ == nil {
log := s.Log.WARN
if infoOnMissingLayout[name] {
log = s.Log.INFO
}
errMsg := "You should create a template file which matches Hugo Layouts Lookup Rules for this combination."
var args []interface{}
msg := "found no layout file for"
if outputFormat != "" {
msg += " %q"
args = append(args, outputFormat)
}
if name != "" {
msg += " for %q"
args = append(args, name)
}
msg += ": " + errMsg
log.Printf(msg, args...)
return nil
}
if err = templ.Execute(w, d); err != nil {
return _errors.Wrapf(err, "render of %q failed", name)
}
return
}
func (s *Site) findFirstTemplate(layouts ...string) tpl.Template {
for _, layout := range layouts {
if templ, found := s.Tmpl.Lookup(layout); found {
return templ
}
}
return nil
}
func (s *Site) publish(statCounter *uint64, path string, r io.Reader) (err error) {
s.PathSpec.ProcessingStats.Incr(statCounter)
return helpers.WriteToDisk(filepath.Clean(path), r, s.BaseFs.PublishFs)
}
func (s *Site) kindFromFileInfoOrSections(fi *fileInfo, sections []string) string {
if fi.TranslationBaseName() == "_index" {
if fi.Dir() == "" {
return page.KindHome
}
return s.kindFromSections(sections)
}
return page.KindPage
}
func (s *Site) kindFromSections(sections []string) string {
if len(sections) == 0 {
return page.KindHome
}
return s.kindFromSectionPath(path.Join(sections...))
}
func (s *Site) kindFromSectionPath(sectionPath string) string {
for _, plural := range s.siteCfg.taxonomiesConfig {
if plural == sectionPath {
return page.KindTaxonomyTerm
}
if strings.HasPrefix(sectionPath, plural) {
return page.KindTaxonomy
}
}
return page.KindSection
}
func (s *Site) newTaxonomyPage(title string, sections ...string) *pageState {
p, err := newPageFromMeta(
map[string]interface{}{"title": title},
&pageMeta{
s: s,
kind: page.KindTaxonomy,
sections: sections,
})
if err != nil {
panic(err)
}
return p
}
func (s *Site) newPage(kind string, sections ...string) *pageState {
p, err := newPageFromMeta(
map[string]interface{}{},
&pageMeta{
s: s,
kind: kind,
sections: sections,
})
if err != nil {
panic(err)
}
return p
}
func (s *Site) shouldBuild(p page.Page) bool {
return shouldBuild(s.BuildFuture, s.BuildExpired,
s.BuildDrafts, p.Draft(), p.PublishDate(), p.ExpiryDate())
}
func shouldBuild(buildFuture bool, buildExpired bool, buildDrafts bool, Draft bool,
publishDate time.Time, expiryDate time.Time) bool {
if !(buildDrafts || !Draft) {
return false
}
if !buildFuture && !publishDate.IsZero() && publishDate.After(time.Now()) {
return false
}
if !buildExpired && !expiryDate.IsZero() && expiryDate.Before(time.Now()) {
return false
}
return true
}
hugolib: Adjust .Site.Permalinks deprecation level
// Copyright 2019 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"fmt"
"html/template"
"io"
"log"
"mime"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/gohugoio/hugo/resources/resource"
"github.com/gohugoio/hugo/markup/converter"
"github.com/gohugoio/hugo/hugofs/files"
"github.com/gohugoio/hugo/common/maps"
"github.com/pkg/errors"
"github.com/gohugoio/hugo/common/text"
"github.com/gohugoio/hugo/common/hugo"
"github.com/gohugoio/hugo/publisher"
_errors "github.com/pkg/errors"
"github.com/gohugoio/hugo/langs"
"github.com/gohugoio/hugo/resources/page"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/lazy"
"github.com/gohugoio/hugo/media"
"github.com/fsnotify/fsnotify"
bp "github.com/gohugoio/hugo/bufferpool"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/navigation"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/related"
"github.com/gohugoio/hugo/resources"
"github.com/gohugoio/hugo/resources/page/pagemeta"
"github.com/gohugoio/hugo/source"
"github.com/gohugoio/hugo/tpl"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
// Site contains all the information relevant for constructing a static
// site. The basic flow of information is as follows:
//
// 1. A list of Files is parsed and then converted into Pages.
//
// 2. Pages contain sections (based on the file they were generated from),
// aliases and slugs (included in a pages frontmatter) which are the
// various targets that will get generated. There will be canonical
// listing. The canonical path can be overruled based on a pattern.
//
// 3. Taxonomies are created via configuration and will present some aspect of
// the final page and typically a perm url.
//
// 4. All Pages are passed through a template based on their desired
// layout based on numerous different elements.
//
// 5. The entire collection of files is written to disk.
type Site struct {
// The owning container. When multiple languages, there will be multiple
// sites.
h *HugoSites
*PageCollections
Taxonomies TaxonomyList
Sections Taxonomy
Info SiteInfo
layoutHandler *output.LayoutHandler
language *langs.Language
siteCfg siteConfigHolder
disabledKinds map[string]bool
enableInlineShortcodes bool
// Output formats defined in site config per Page Kind, or some defaults
// if not set.
// Output formats defined in Page front matter will override these.
outputFormats map[string]output.Formats
// All the output formats and media types available for this site.
// These values will be merged from the Hugo defaults, the site config and,
// finally, the language settings.
outputFormatsConfig output.Formats
mediaTypesConfig media.Types
siteConfigConfig SiteConfig
// How to handle page front matter.
frontmatterHandler pagemeta.FrontMatterHandler
// We render each site for all the relevant output formats in serial with
// this rendering context pointing to the current one.
rc *siteRenderingContext
// The output formats that we need to render this site in. This slice
// will be fixed once set.
// This will be the union of Site.Pages' outputFormats.
// This slice will be sorted.
renderFormats output.Formats
// Logger etc.
*deps.Deps `json:"-"`
// The func used to title case titles.
titleFunc func(s string) string
relatedDocsHandler *page.RelatedDocsHandler
siteRefLinker
publisher publisher.Publisher
menus navigation.Menus
// Shortcut to the home page. Note that this may be nil if
// home page, for some odd reason, is disabled.
home *pageState
// The last modification date of this site.
lastmod time.Time
// Lazily loaded site dependencies
init *siteInit
}
type siteConfigHolder struct {
sitemap config.Sitemap
taxonomiesConfig map[string]string
timeout time.Duration
hasCJKLanguage bool
enableEmoji bool
}
// Lazily loaded site dependencies.
type siteInit struct {
prevNext *lazy.Init
prevNextInSection *lazy.Init
menus *lazy.Init
}
func (init *siteInit) Reset() {
init.prevNext.Reset()
init.prevNextInSection.Reset()
init.menus.Reset()
}
func (s *Site) initInit(init *lazy.Init, pctx pageContext) bool {
_, err := init.Do()
if err != nil {
s.h.FatalError(pctx.wrapError(err))
}
return err == nil
}
func (s *Site) prepareInits() {
s.init = &siteInit{}
var init lazy.Init
s.init.prevNext = init.Branch(func() (interface{}, error) {
regularPages := s.findWorkPagesByKind(page.KindPage)
for i, p := range regularPages {
if p.posNextPrev == nil {
continue
}
p.posNextPrev.nextPage = nil
p.posNextPrev.prevPage = nil
if i > 0 {
p.posNextPrev.nextPage = regularPages[i-1]
}
if i < len(regularPages)-1 {
p.posNextPrev.prevPage = regularPages[i+1]
}
}
return nil, nil
})
s.init.prevNextInSection = init.Branch(func() (interface{}, error) {
var rootSection []int
// TODO(bep) cm attach this to the bucket.
for i, p1 := range s.workAllPages {
if p1.IsPage() && p1.Section() == "" {
rootSection = append(rootSection, i)
}
if p1.IsSection() {
sectionPages := p1.RegularPages()
for i, p2 := range sectionPages {
p2s := p2.(*pageState)
if p2s.posNextPrevSection == nil {
continue
}
p2s.posNextPrevSection.nextPage = nil
p2s.posNextPrevSection.prevPage = nil
if i > 0 {
p2s.posNextPrevSection.nextPage = sectionPages[i-1]
}
if i < len(sectionPages)-1 {
p2s.posNextPrevSection.prevPage = sectionPages[i+1]
}
}
}
}
for i, j := range rootSection {
p := s.workAllPages[j]
if i > 0 {
p.posNextPrevSection.nextPage = s.workAllPages[rootSection[i-1]]
}
if i < len(rootSection)-1 {
p.posNextPrevSection.prevPage = s.workAllPages[rootSection[i+1]]
}
}
return nil, nil
})
s.init.menus = init.Branch(func() (interface{}, error) {
s.assembleMenus()
return nil, nil
})
}
type siteRenderingContext struct {
output.Format
}
func (s *Site) Menus() navigation.Menus {
s.init.menus.Do()
return s.menus
}
func (s *Site) initRenderFormats() {
formatSet := make(map[string]bool)
formats := output.Formats{}
for _, p := range s.workAllPages {
for _, f := range p.m.configuredOutputFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
// Add the per kind configured output formats
for _, kind := range allKindsInPages {
if siteFormats, found := s.outputFormats[kind]; found {
for _, f := range siteFormats {
if !formatSet[f.Name] {
formats = append(formats, f)
formatSet[f.Name] = true
}
}
}
}
sort.Sort(formats)
s.renderFormats = formats
}
func (s *Site) GetRelatedDocsHandler() *page.RelatedDocsHandler {
return s.relatedDocsHandler
}
func (s *Site) Language() *langs.Language {
return s.language
}
func (s *Site) isEnabled(kind string) bool {
if kind == kindUnknown {
panic("Unknown kind")
}
return !s.disabledKinds[kind]
}
// reset returns a new Site prepared for rebuild.
func (s *Site) reset() *Site {
return &Site{Deps: s.Deps,
layoutHandler: output.NewLayoutHandler(),
disabledKinds: s.disabledKinds,
titleFunc: s.titleFunc,
relatedDocsHandler: s.relatedDocsHandler.Clone(),
siteRefLinker: s.siteRefLinker,
outputFormats: s.outputFormats,
rc: s.rc,
outputFormatsConfig: s.outputFormatsConfig,
frontmatterHandler: s.frontmatterHandler,
mediaTypesConfig: s.mediaTypesConfig,
language: s.language,
h: s.h,
publisher: s.publisher,
siteConfigConfig: s.siteConfigConfig,
enableInlineShortcodes: s.enableInlineShortcodes,
init: s.init,
PageCollections: s.PageCollections,
siteCfg: s.siteCfg,
}
}
// newSite creates a new site with the given configuration.
func newSite(cfg deps.DepsCfg) (*Site, error) {
c := newPageCollections()
if cfg.Language == nil {
cfg.Language = langs.NewDefaultLanguage(cfg.Cfg)
}
disabledKinds := make(map[string]bool)
for _, disabled := range cast.ToStringSlice(cfg.Language.Get("disableKinds")) {
disabledKinds[disabled] = true
}
var (
mediaTypesConfig []map[string]interface{}
outputFormatsConfig []map[string]interface{}
siteOutputFormatsConfig output.Formats
siteMediaTypesConfig media.Types
err error
)
// Add language last, if set, so it gets precedence.
for _, cfg := range []config.Provider{cfg.Cfg, cfg.Language} {
if cfg.IsSet("mediaTypes") {
mediaTypesConfig = append(mediaTypesConfig, cfg.GetStringMap("mediaTypes"))
}
if cfg.IsSet("outputFormats") {
outputFormatsConfig = append(outputFormatsConfig, cfg.GetStringMap("outputFormats"))
}
}
siteMediaTypesConfig, err = media.DecodeTypes(mediaTypesConfig...)
if err != nil {
return nil, err
}
siteOutputFormatsConfig, err = output.DecodeFormats(siteMediaTypesConfig, outputFormatsConfig...)
if err != nil {
return nil, err
}
outputFormats, err := createSiteOutputFormats(siteOutputFormatsConfig, cfg.Language)
if err != nil {
return nil, err
}
taxonomies := cfg.Language.GetStringMapString("taxonomies")
var relatedContentConfig related.Config
if cfg.Language.IsSet("related") {
relatedContentConfig, err = related.DecodeConfig(cfg.Language.Get("related"))
if err != nil {
return nil, err
}
} else {
relatedContentConfig = related.DefaultConfig
if _, found := taxonomies["tag"]; found {
relatedContentConfig.Add(related.IndexConfig{Name: "tags", Weight: 80})
}
}
titleFunc := helpers.GetTitleFunc(cfg.Language.GetString("titleCaseStyle"))
frontMatterHandler, err := pagemeta.NewFrontmatterHandler(cfg.Logger, cfg.Cfg)
if err != nil {
return nil, err
}
timeout := 30 * time.Second
if cfg.Language.IsSet("timeout") {
switch v := cfg.Language.Get("timeout").(type) {
case int64:
timeout = time.Duration(v) * time.Millisecond
case string:
d, err := time.ParseDuration(v)
if err == nil {
timeout = d
}
}
}
siteConfig := siteConfigHolder{
sitemap: config.DecodeSitemap(config.Sitemap{Priority: -1, Filename: "sitemap.xml"}, cfg.Language.GetStringMap("sitemap")),
taxonomiesConfig: taxonomies,
timeout: timeout,
hasCJKLanguage: cfg.Language.GetBool("hasCJKLanguage"),
enableEmoji: cfg.Language.Cfg.GetBool("enableEmoji"),
}
s := &Site{
PageCollections: c,
layoutHandler: output.NewLayoutHandler(),
language: cfg.Language,
disabledKinds: disabledKinds,
titleFunc: titleFunc,
relatedDocsHandler: page.NewRelatedDocsHandler(relatedContentConfig),
outputFormats: outputFormats,
rc: &siteRenderingContext{output.HTMLFormat},
outputFormatsConfig: siteOutputFormatsConfig,
mediaTypesConfig: siteMediaTypesConfig,
frontmatterHandler: frontMatterHandler,
enableInlineShortcodes: cfg.Language.GetBool("enableInlineShortcodes"),
siteCfg: siteConfig,
}
s.prepareInits()
return s, nil
}
// NewSite creates a new site with the given dependency configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSite(cfg deps.DepsCfg) (*Site, error) {
s, err := newSite(cfg)
if err != nil {
return nil, err
}
if err = applyDeps(cfg, s); err != nil {
return nil, err
}
return s, nil
}
// NewSiteDefaultLang creates a new site in the default language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewSiteDefaultLang(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewDefaultLanguage(v), withTemplate...)
}
// NewEnglishSite creates a new site in English language.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
// TODO(bep) test refactor -- remove
func NewEnglishSite(withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
v := viper.New()
if err := loadDefaultSettingsFor(v); err != nil {
return nil, err
}
return newSiteForLang(langs.NewLanguage("en", v), withTemplate...)
}
// newSiteForLang creates a new site in the given language.
func newSiteForLang(lang *langs.Language, withTemplate ...func(templ tpl.TemplateHandler) error) (*Site, error) {
withTemplates := func(templ tpl.TemplateHandler) error {
for _, wt := range withTemplate {
if err := wt(templ); err != nil {
return err
}
}
return nil
}
cfg := deps.DepsCfg{WithTemplate: withTemplates, Cfg: lang}
return NewSiteForCfg(cfg)
}
// NewSiteForCfg creates a new site for the given configuration.
// The site will have a template system loaded and ready to use.
// Note: This is mainly used in single site tests.
func NewSiteForCfg(cfg deps.DepsCfg) (*Site, error) {
h, err := NewHugoSites(cfg)
if err != nil {
return nil, err
}
return h.Sites[0], nil
}
type SiteInfo struct {
Authors page.AuthorList
Social SiteSocial
hugoInfo hugo.Info
title string
RSSLink string
Author map[string]interface{}
LanguageCode string
Copyright string
permalinks map[string]string
LanguagePrefix string
Languages langs.Languages
BuildDrafts bool
canonifyURLs bool
relativeURLs bool
uglyURLs func(p page.Page) bool
owner *HugoSites
s *Site
language *langs.Language
defaultContentLanguageInSubdir bool
sectionPagesMenu string
}
func (s *SiteInfo) Pages() page.Pages {
return s.s.Pages()
}
func (s *SiteInfo) RegularPages() page.Pages {
return s.s.RegularPages()
}
func (s *SiteInfo) AllPages() page.Pages {
return s.s.AllPages()
}
func (s *SiteInfo) AllRegularPages() page.Pages {
return s.s.AllRegularPages()
}
func (s *SiteInfo) Permalinks() map[string]string {
// Remove in 0.61
helpers.Deprecated(".Site.Permalinks", "", true)
return s.permalinks
}
func (s *SiteInfo) LastChange() time.Time {
return s.s.lastmod
}
func (s *SiteInfo) Title() string {
return s.title
}
func (s *SiteInfo) Site() page.Site {
return s
}
func (s *SiteInfo) Menus() navigation.Menus {
return s.s.Menus()
}
// TODO(bep) type
func (s *SiteInfo) Taxonomies() interface{} {
return s.s.Taxonomies
}
func (s *SiteInfo) Params() maps.Params {
return s.s.Language().Params()
}
func (s *SiteInfo) Data() map[string]interface{} {
return s.s.h.Data()
}
func (s *SiteInfo) Language() *langs.Language {
return s.language
}
func (s *SiteInfo) Config() SiteConfig {
return s.s.siteConfigConfig
}
func (s *SiteInfo) Hugo() hugo.Info {
return s.hugoInfo
}
// Sites is a convenience method to get all the Hugo sites/languages configured.
func (s *SiteInfo) Sites() page.Sites {
return s.s.h.siteInfos()
}
func (s *SiteInfo) String() string {
return fmt.Sprintf("Site(%q)", s.title)
}
func (s *SiteInfo) BaseURL() template.URL {
return template.URL(s.s.PathSpec.BaseURL.String())
}
// ServerPort returns the port part of the BaseURL, 0 if none found.
func (s *SiteInfo) ServerPort() int {
ps := s.s.PathSpec.BaseURL.URL().Port()
if ps == "" {
return 0
}
p, err := strconv.Atoi(ps)
if err != nil {
return 0
}
return p
}
// GoogleAnalytics is kept here for historic reasons.
func (s *SiteInfo) GoogleAnalytics() string {
return s.Config().Services.GoogleAnalytics.ID
}
// DisqusShortname is kept here for historic reasons.
func (s *SiteInfo) DisqusShortname() string {
return s.Config().Services.Disqus.Shortname
}
// SiteSocial is a place to put social details on a site level. These are the
// standard keys that themes will expect to have available, but can be
// expanded to any others on a per site basis
// github
// facebook
// facebook_admin
// twitter
// twitter_domain
// pinterest
// instagram
// youtube
// linkedin
type SiteSocial map[string]string
// Param is a convenience method to do lookups in SiteInfo's Params map.
//
// This method is also implemented on Page.
func (s *SiteInfo) Param(key interface{}) (interface{}, error) {
return resource.Param(s, nil, key)
}
func (s *SiteInfo) IsMultiLingual() bool {
return len(s.Languages) > 1
}
func (s *SiteInfo) IsServer() bool {
return s.owner.running
}
type siteRefLinker struct {
s *Site
errorLogger *log.Logger
notFoundURL string
}
func newSiteRefLinker(cfg config.Provider, s *Site) (siteRefLinker, error) {
logger := s.Log.ERROR
notFoundURL := cfg.GetString("refLinksNotFoundURL")
errLevel := cfg.GetString("refLinksErrorLevel")
if strings.EqualFold(errLevel, "warning") {
logger = s.Log.WARN
}
return siteRefLinker{s: s, errorLogger: logger, notFoundURL: notFoundURL}, nil
}
func (s siteRefLinker) logNotFound(ref, what string, p page.Page, position text.Position) {
if position.IsValid() {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q: %s: %s", s.s.Lang(), ref, position.String(), what)
} else if p == nil {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q: %s", s.s.Lang(), ref, what)
} else {
s.errorLogger.Printf("[%s] REF_NOT_FOUND: Ref %q from page %q: %s", s.s.Lang(), ref, p.Path(), what)
}
}
func (s *siteRefLinker) refLink(ref string, source interface{}, relative bool, outputFormat string) (string, error) {
p, err := unwrapPage(source)
if err != nil {
return "", err
}
var refURL *url.URL
ref = filepath.ToSlash(ref)
refURL, err = url.Parse(ref)
if err != nil {
return s.notFoundURL, err
}
var target page.Page
var link string
if refURL.Path != "" {
var err error
target, err = s.s.getPageNew(p, refURL.Path)
var pos text.Position
if err != nil || target == nil {
if p, ok := source.(text.Positioner); ok {
pos = p.Position()
}
}
if err != nil {
s.logNotFound(refURL.Path, err.Error(), p, pos)
return s.notFoundURL, nil
}
if target == nil {
s.logNotFound(refURL.Path, "page not found", p, pos)
return s.notFoundURL, nil
}
var permalinker Permalinker = target
if outputFormat != "" {
o := target.OutputFormats().Get(outputFormat)
if o == nil {
s.logNotFound(refURL.Path, fmt.Sprintf("output format %q", outputFormat), p, pos)
return s.notFoundURL, nil
}
permalinker = o
}
if relative {
link = permalinker.RelPermalink()
} else {
link = permalinker.Permalink()
}
}
if refURL.Fragment != "" {
_ = target
link = link + "#" + refURL.Fragment
if pctx, ok := target.(pageContext); ok {
if refURL.Path != "" {
if di, ok := pctx.getContentConverter().(converter.DocumentInfo); ok {
link = link + di.AnchorSuffix()
}
}
} else if pctx, ok := p.(pageContext); ok {
if di, ok := pctx.getContentConverter().(converter.DocumentInfo); ok {
link = link + di.AnchorSuffix()
}
}
}
return link, nil
}
func (s *Site) running() bool {
return s.h != nil && s.h.running
}
func (s *Site) multilingual() *Multilingual {
return s.h.multilingual
}
type whatChanged struct {
source bool
other bool
files map[string]bool
}
// RegisterMediaTypes will register the Site's media types in the mime
// package, so it will behave correctly with Hugo's built-in server.
func (s *Site) RegisterMediaTypes() {
for _, mt := range s.mediaTypesConfig {
for _, suffix := range mt.Suffixes {
_ = mime.AddExtensionType(mt.Delimiter+suffix, mt.Type()+"; charset=utf-8")
}
}
}
func (s *Site) filterFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
seen := make(map[fsnotify.Event]bool)
for _, ev := range events {
// Avoid processing the same event twice.
if seen[ev] {
continue
}
seen[ev] = true
if s.SourceSpec.IgnoreFile(ev.Name) {
continue
}
// Throw away any directories
isRegular, err := s.SourceSpec.IsRegularSourceFile(ev.Name)
if err != nil && os.IsNotExist(err) && (ev.Op&fsnotify.Remove == fsnotify.Remove || ev.Op&fsnotify.Rename == fsnotify.Rename) {
// Force keep of event
isRegular = true
}
if !isRegular {
continue
}
filtered = append(filtered, ev)
}
return filtered
}
func (s *Site) translateFileEvents(events []fsnotify.Event) []fsnotify.Event {
var filtered []fsnotify.Event
eventMap := make(map[string][]fsnotify.Event)
// We often get a Remove etc. followed by a Create, a Create followed by a Write.
// Remove the superflous events to mage the update logic simpler.
for _, ev := range events {
eventMap[ev.Name] = append(eventMap[ev.Name], ev)
}
for _, ev := range events {
mapped := eventMap[ev.Name]
// Keep one
found := false
var kept fsnotify.Event
for i, ev2 := range mapped {
if i == 0 {
kept = ev2
}
if ev2.Op&fsnotify.Write == fsnotify.Write {
kept = ev2
found = true
}
if !found && ev2.Op&fsnotify.Create == fsnotify.Create {
kept = ev2
}
}
filtered = append(filtered, kept)
}
return filtered
}
// reBuild partially rebuilds a site given the filesystem events.
// It returns whetever the content source was changed.
// TODO(bep) clean up/rewrite this method.
func (s *Site) processPartial(config *BuildCfg, init func(config *BuildCfg) error, events []fsnotify.Event) error {
events = s.filterFileEvents(events)
events = s.translateFileEvents(events)
s.Log.DEBUG.Printf("Rebuild for events %q", events)
h := s.h
// First we need to determine what changed
var (
sourceChanged = []fsnotify.Event{}
sourceReallyChanged = []fsnotify.Event{}
contentFilesChanged []string
tmplChanged = []fsnotify.Event{}
dataChanged = []fsnotify.Event{}
i18nChanged = []fsnotify.Event{}
shortcodesChanged = make(map[string]bool)
sourceFilesChanged = make(map[string]bool)
// prevent spamming the log on changes
logger = helpers.NewDistinctFeedbackLogger()
)
var cachePartitions []string
for _, ev := range events {
if assetsFilename := s.BaseFs.Assets.MakePathRelative(ev.Name); assetsFilename != "" {
cachePartitions = append(cachePartitions, resources.ResourceKeyPartitions(assetsFilename)...)
}
if s.isContentDirEvent(ev) {
logger.Println("Source changed", ev)
sourceChanged = append(sourceChanged, ev)
}
if s.isLayoutDirEvent(ev) {
logger.Println("Template changed", ev)
tmplChanged = append(tmplChanged, ev)
if strings.Contains(ev.Name, "shortcodes") {
shortcode := filepath.Base(ev.Name)
shortcode = strings.TrimSuffix(shortcode, filepath.Ext(shortcode))
shortcodesChanged[shortcode] = true
}
}
if s.isDataDirEvent(ev) {
logger.Println("Data changed", ev)
dataChanged = append(dataChanged, ev)
}
if s.isI18nEvent(ev) {
logger.Println("i18n changed", ev)
i18nChanged = append(dataChanged, ev)
}
}
changed := &whatChanged{
source: len(sourceChanged) > 0 || len(shortcodesChanged) > 0,
other: len(tmplChanged) > 0 || len(i18nChanged) > 0 || len(dataChanged) > 0,
files: sourceFilesChanged,
}
config.whatChanged = changed
if err := init(config); err != nil {
return err
}
// These in memory resource caches will be rebuilt on demand.
for _, s := range s.h.Sites {
s.ResourceSpec.ResourceCache.DeletePartitions(cachePartitions...)
}
if len(tmplChanged) > 0 || len(i18nChanged) > 0 {
sites := s.h.Sites
first := sites[0]
s.h.init.Reset()
// TOD(bep) globals clean
if err := first.Deps.LoadResources(); err != nil {
return err
}
for i := 1; i < len(sites); i++ {
site := sites[i]
var err error
depsCfg := deps.DepsCfg{
Language: site.language,
MediaTypes: site.mediaTypesConfig,
OutputFormats: site.outputFormatsConfig,
}
site.Deps, err = first.Deps.ForLanguage(depsCfg, func(d *deps.Deps) error {
d.Site = &site.Info
return nil
})
if err != nil {
return err
}
}
}
if len(dataChanged) > 0 {
s.h.init.data.Reset()
}
for _, ev := range sourceChanged {
removed := false
if ev.Op&fsnotify.Remove == fsnotify.Remove {
removed = true
}
// Some editors (Vim) sometimes issue only a Rename operation when writing an existing file
// Sometimes a rename operation means that file has been renamed other times it means
// it's been updated
if ev.Op&fsnotify.Rename == fsnotify.Rename {
// If the file is still on disk, it's only been updated, if it's not, it's been moved
if ex, err := afero.Exists(s.Fs.Source, ev.Name); !ex || err != nil {
removed = true
}
}
if removed && files.IsContentFile(ev.Name) {
h.removePageByFilename(ev.Name)
}
sourceReallyChanged = append(sourceReallyChanged, ev)
sourceFilesChanged[ev.Name] = true
}
for shortcode := range shortcodesChanged {
// There are certain scenarios that, when a shortcode changes,
// it isn't sufficient to just rerender the already parsed shortcode.
// One example is if the user adds a new shortcode to the content file first,
// and then creates the shortcode on the file system.
// To handle these scenarios, we must do a full reprocessing of the
// pages that keeps a reference to the changed shortcode.
pagesWithShortcode := h.findPagesByShortcode(shortcode)
for _, p := range pagesWithShortcode {
contentFilesChanged = append(contentFilesChanged, p.File().Filename())
}
}
if len(sourceReallyChanged) > 0 || len(contentFilesChanged) > 0 {
var filenamesChanged []string
for _, e := range sourceReallyChanged {
filenamesChanged = append(filenamesChanged, e.Name)
}
if len(contentFilesChanged) > 0 {
filenamesChanged = append(filenamesChanged, contentFilesChanged...)
}
filenamesChanged = helpers.UniqueStringsReuse(filenamesChanged)
if err := s.readAndProcessContent(filenamesChanged...); err != nil {
return err
}
}
return nil
}
func (s *Site) process(config BuildCfg) (err error) {
if err = s.initialize(); err != nil {
err = errors.Wrap(err, "initialize")
return
}
if err = s.readAndProcessContent(); err != nil {
err = errors.Wrap(err, "readAndProcessContent")
return
}
return err
}
func (s *Site) render(ctx *siteRenderContext) (err error) {
if err := page.Clear(); err != nil {
return err
}
if ctx.outIdx == 0 {
// Note that even if disableAliases is set, the aliases themselves are
// preserved on page. The motivation with this is to be able to generate
// 301 redirects in a .htacess file and similar using a custom output format.
if !s.Cfg.GetBool("disableAliases") {
// Aliases must be rendered before pages.
// Some sites, Hugo docs included, have faulty alias definitions that point
// to itself or another real page. These will be overwritten in the next
// step.
if err = s.renderAliases(); err != nil {
return
}
}
}
if err = s.renderPages(ctx); err != nil {
return
}
if ctx.outIdx == 0 {
if err = s.renderSitemap(); err != nil {
return
}
if err = s.renderRobotsTXT(); err != nil {
return
}
if err = s.render404(); err != nil {
return
}
}
if !ctx.renderSingletonPages() {
return
}
if err = s.renderMainLanguageRedirect(); err != nil {
return
}
return
}
func (s *Site) Initialise() (err error) {
return s.initialize()
}
func (s *Site) initialize() (err error) {
return s.initializeSiteInfo()
}
// HomeAbsURL is a convenience method giving the absolute URL to the home page.
func (s *SiteInfo) HomeAbsURL() string {
base := ""
if s.IsMultiLingual() {
base = s.Language().Lang
}
return s.owner.AbsURL(base, false)
}
// SitemapAbsURL is a convenience method giving the absolute URL to the sitemap.
func (s *SiteInfo) SitemapAbsURL() string {
p := s.HomeAbsURL()
if !strings.HasSuffix(p, "/") {
p += "/"
}
p += s.s.siteCfg.sitemap.Filename
return p
}
func (s *Site) initializeSiteInfo() error {
var (
lang = s.language
languages langs.Languages
)
if s.h != nil && s.h.multilingual != nil {
languages = s.h.multilingual.Languages
}
permalinks := s.Cfg.GetStringMapString("permalinks")
defaultContentInSubDir := s.Cfg.GetBool("defaultContentLanguageInSubdir")
defaultContentLanguage := s.Cfg.GetString("defaultContentLanguage")
languagePrefix := ""
if s.multilingualEnabled() && (defaultContentInSubDir || lang.Lang != defaultContentLanguage) {
languagePrefix = "/" + lang.Lang
}
var uglyURLs = func(p page.Page) bool {
return false
}
v := s.Cfg.Get("uglyURLs")
if v != nil {
switch vv := v.(type) {
case bool:
uglyURLs = func(p page.Page) bool {
return vv
}
case string:
// Is what be get from CLI (--uglyURLs)
vvv := cast.ToBool(vv)
uglyURLs = func(p page.Page) bool {
return vvv
}
default:
m := cast.ToStringMapBool(v)
uglyURLs = func(p page.Page) bool {
return m[p.Section()]
}
}
}
s.Info = SiteInfo{
title: lang.GetString("title"),
Author: lang.GetStringMap("author"),
Social: lang.GetStringMapString("social"),
LanguageCode: lang.GetString("languageCode"),
Copyright: lang.GetString("copyright"),
language: lang,
LanguagePrefix: languagePrefix,
Languages: languages,
defaultContentLanguageInSubdir: defaultContentInSubDir,
sectionPagesMenu: lang.GetString("sectionPagesMenu"),
BuildDrafts: s.Cfg.GetBool("buildDrafts"),
canonifyURLs: s.Cfg.GetBool("canonifyURLs"),
relativeURLs: s.Cfg.GetBool("relativeURLs"),
uglyURLs: uglyURLs,
permalinks: permalinks,
owner: s.h,
s: s,
hugoInfo: hugo.NewInfo(s.Cfg.GetString("environment")),
}
rssOutputFormat, found := s.outputFormats[page.KindHome].GetByName(output.RSSFormat.Name)
if found {
s.Info.RSSLink = s.permalink(rssOutputFormat.BaseFilename())
}
return nil
}
func (s *Site) isI18nEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsI18n(e.Name)
}
func (s *Site) isDataDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsData(e.Name)
}
func (s *Site) isLayoutDirEvent(e fsnotify.Event) bool {
return s.BaseFs.SourceFilesystems.IsLayout(e.Name)
}
func (s *Site) isContentDirEvent(e fsnotify.Event) bool {
return s.BaseFs.IsContent(e.Name)
}
func (s *Site) readAndProcessContent(filenames ...string) error {
sourceSpec := source.NewSourceSpec(s.PathSpec, s.BaseFs.Content.Fs)
proc := newPagesProcessor(s.h, sourceSpec, len(filenames) > 0)
c := newPagesCollector(sourceSpec, s.Log, s.h.ContentChanges, proc, filenames...)
return c.Collect()
}
func (s *Site) getMenusFromConfig() navigation.Menus {
ret := navigation.Menus{}
if menus := s.language.GetStringMap("menus"); menus != nil {
for name, menu := range menus {
m, err := cast.ToSliceE(menu)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
} else {
for _, entry := range m {
s.Log.DEBUG.Printf("found menu: %q, in site config\n", name)
menuEntry := navigation.MenuEntry{Menu: name}
ime, err := maps.ToStringMapE(entry)
if err != nil {
s.Log.ERROR.Printf("unable to process menus in site config\n")
s.Log.ERROR.Println(err)
}
menuEntry.MarshallMap(ime)
// TODO(bep) clean up all of this
menuEntry.ConfiguredURL = s.Info.createNodeMenuEntryURL(menuEntry.ConfiguredURL)
if ret[name] == nil {
ret[name] = navigation.Menu{}
}
ret[name] = ret[name].Add(&menuEntry)
}
}
}
return ret
}
return ret
}
func (s *SiteInfo) createNodeMenuEntryURL(in string) string {
if !strings.HasPrefix(in, "/") {
return in
}
// make it match the nodes
menuEntryURL := in
menuEntryURL = helpers.SanitizeURLKeepTrailingSlash(s.s.PathSpec.URLize(menuEntryURL))
if !s.canonifyURLs {
menuEntryURL = helpers.AddContextRoot(s.s.PathSpec.BaseURL.String(), menuEntryURL)
}
return menuEntryURL
}
func (s *Site) assembleMenus() {
s.menus = make(navigation.Menus)
type twoD struct {
MenuName, EntryName string
}
flat := map[twoD]*navigation.MenuEntry{}
children := map[twoD]navigation.Menu{}
// add menu entries from config to flat hash
menuConfig := s.getMenusFromConfig()
for name, menu := range menuConfig {
for _, me := range menu {
flat[twoD{name, me.KeyName()}] = me
}
}
sectionPagesMenu := s.Info.sectionPagesMenu
if sectionPagesMenu != "" {
for _, p := range s.workAllPages {
if p.Kind() == page.KindSection {
// From Hugo 0.22 we have nested sections, but until we get a
// feel of how that would work in this setting, let us keep
// this menu for the top level only.
id := p.Section()
if _, ok := flat[twoD{sectionPagesMenu, id}]; ok {
continue
}
me := navigation.MenuEntry{Identifier: id,
Name: p.LinkTitle(),
Weight: p.Weight(),
Page: p}
flat[twoD{sectionPagesMenu, me.KeyName()}] = &me
}
}
}
// Add menu entries provided by pages
for _, p := range s.workAllPages {
for name, me := range p.pageMenus.menus() {
if _, ok := flat[twoD{name, me.KeyName()}]; ok {
s.SendError(p.wrapError(errors.Errorf("duplicate menu entry with identifier %q in menu %q", me.KeyName(), name)))
continue
}
flat[twoD{name, me.KeyName()}] = me
}
}
// Create Children Menus First
for _, e := range flat {
if e.Parent != "" {
children[twoD{e.Menu, e.Parent}] = children[twoD{e.Menu, e.Parent}].Add(e)
}
}
// Placing Children in Parents (in flat)
for p, childmenu := range children {
_, ok := flat[twoD{p.MenuName, p.EntryName}]
if !ok {
// if parent does not exist, create one without a URL
flat[twoD{p.MenuName, p.EntryName}] = &navigation.MenuEntry{Name: p.EntryName}
}
flat[twoD{p.MenuName, p.EntryName}].Children = childmenu
}
// Assembling Top Level of Tree
for menu, e := range flat {
if e.Parent == "" {
_, ok := s.menus[menu.MenuName]
if !ok {
s.menus[menu.MenuName] = navigation.Menu{}
}
s.menus[menu.MenuName] = s.menus[menu.MenuName].Add(e)
}
}
}
// get any lanaguagecode to prefix the target file path with.
func (s *Site) getLanguageTargetPathLang(alwaysInSubDir bool) string {
if s.h.IsMultihost() {
return s.Language().Lang
}
return s.getLanguagePermalinkLang(alwaysInSubDir)
}
// get any lanaguagecode to prefix the relative permalink with.
func (s *Site) getLanguagePermalinkLang(alwaysInSubDir bool) string {
if !s.Info.IsMultiLingual() || s.h.IsMultihost() {
return ""
}
if alwaysInSubDir {
return s.Language().Lang
}
isDefault := s.Language().Lang == s.multilingual().DefaultLang.Lang
if !isDefault || s.Info.defaultContentLanguageInSubdir {
return s.Language().Lang
}
return ""
}
func (s *Site) getTaxonomyKey(key string) string {
if s.PathSpec.DisablePathToLower {
return s.PathSpec.MakePath(key)
}
return strings.ToLower(s.PathSpec.MakePath(key))
}
// Prepare site for a new full build.
func (s *Site) resetBuildState(sourceChanged bool) {
s.relatedDocsHandler = s.relatedDocsHandler.Clone()
s.init.Reset()
if sourceChanged {
s.PageCollections = newPageCollectionsFromPages(s.rawAllPages)
for _, p := range s.rawAllPages {
p.pagePages = &pagePages{}
p.parent = nil
p.Scratcher = maps.NewScratcher()
}
} else {
s.pagesMap.withEveryPage(func(p *pageState) {
p.Scratcher = maps.NewScratcher()
})
}
}
func (s *Site) errorCollator(results <-chan error, errs chan<- error) {
var errors []error
for e := range results {
errors = append(errors, e)
}
errs <- s.h.pickOneAndLogTheRest(errors)
close(errs)
}
// GetPage looks up a page of a given type for the given ref.
// In Hugo <= 0.44 you had to add Page Kind (section, home) etc. as the first
// argument and then either a unix styled path (with or without a leading slash))
// or path elements separated.
// When we now remove the Kind from this API, we need to make the transition as painless
// as possible for existing sites. Most sites will use {{ .Site.GetPage "section" "my/section" }},
// i.e. 2 arguments, so we test for that.
func (s *SiteInfo) GetPage(ref ...string) (page.Page, error) {
p, err := s.s.getPageOldVersion(ref...)
if p == nil {
// The nil struct has meaning in some situations, mostly to avoid breaking
// existing sites doing $nilpage.IsDescendant($p), which will always return
// false.
p = page.NilPage
}
return p, err
}
func (s *Site) permalink(link string) string {
return s.PathSpec.PermalinkForBaseURL(link, s.PathSpec.BaseURL.String())
}
func (s *Site) renderAndWriteXML(statCounter *uint64, name string, targetPath string, d interface{}, layouts ...string) error {
s.Log.DEBUG.Printf("Render XML for %q to %q", name, targetPath)
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
if err := s.renderForLayouts(name, "", d, renderBuffer, layouts...); err != nil {
return err
}
var path string
if s.Info.relativeURLs {
path = helpers.GetDottedRelativePath(targetPath)
} else {
s := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(s, "/") {
s += "/"
}
path = s
}
pd := publisher.Descriptor{
Src: renderBuffer,
TargetPath: targetPath,
StatCounter: statCounter,
// For the minification part of XML,
// we currently only use the MIME type.
OutputFormat: output.RSSFormat,
AbsURLPath: path,
}
return s.publisher.Publish(pd)
}
func (s *Site) renderAndWritePage(statCounter *uint64, name string, targetPath string, p *pageState, layouts ...string) error {
renderBuffer := bp.GetBuffer()
defer bp.PutBuffer(renderBuffer)
of := p.outputFormat()
if err := s.renderForLayouts(p.Kind(), of.Name, p, renderBuffer, layouts...); err != nil {
return err
}
if renderBuffer.Len() == 0 {
return nil
}
isHTML := of.IsHTML
isRSS := of.Name == "RSS"
var path string
if s.Info.relativeURLs {
path = helpers.GetDottedRelativePath(targetPath)
} else if isRSS || s.Info.canonifyURLs {
url := s.PathSpec.BaseURL.String()
if !strings.HasSuffix(url, "/") {
url += "/"
}
path = url
}
pd := publisher.Descriptor{
Src: renderBuffer,
TargetPath: targetPath,
StatCounter: statCounter,
OutputFormat: p.outputFormat(),
}
if isRSS {
// Always canonify URLs in RSS
pd.AbsURLPath = path
} else if isHTML {
if s.Info.relativeURLs || s.Info.canonifyURLs {
pd.AbsURLPath = path
}
if s.running() && s.Cfg.GetBool("watch") && !s.Cfg.GetBool("disableLiveReload") {
pd.LiveReloadPort = s.Cfg.GetInt("liveReloadPort")
}
// For performance reasons we only inject the Hugo generator tag on the home page.
if p.IsHome() {
pd.AddHugoGeneratorTag = !s.Cfg.GetBool("disableHugoGeneratorInject")
}
}
return s.publisher.Publish(pd)
}
var infoOnMissingLayout = map[string]bool{
// The 404 layout is very much optional in Hugo, but we do look for it.
"404": true,
}
func (s *Site) renderForLayouts(name, outputFormat string, d interface{}, w io.Writer, layouts ...string) (err error) {
templ := s.findFirstTemplate(layouts...)
if templ == nil {
log := s.Log.WARN
if infoOnMissingLayout[name] {
log = s.Log.INFO
}
errMsg := "You should create a template file which matches Hugo Layouts Lookup Rules for this combination."
var args []interface{}
msg := "found no layout file for"
if outputFormat != "" {
msg += " %q"
args = append(args, outputFormat)
}
if name != "" {
msg += " for %q"
args = append(args, name)
}
msg += ": " + errMsg
log.Printf(msg, args...)
return nil
}
if err = templ.Execute(w, d); err != nil {
return _errors.Wrapf(err, "render of %q failed", name)
}
return
}
func (s *Site) findFirstTemplate(layouts ...string) tpl.Template {
for _, layout := range layouts {
if templ, found := s.Tmpl.Lookup(layout); found {
return templ
}
}
return nil
}
func (s *Site) publish(statCounter *uint64, path string, r io.Reader) (err error) {
s.PathSpec.ProcessingStats.Incr(statCounter)
return helpers.WriteToDisk(filepath.Clean(path), r, s.BaseFs.PublishFs)
}
func (s *Site) kindFromFileInfoOrSections(fi *fileInfo, sections []string) string {
if fi.TranslationBaseName() == "_index" {
if fi.Dir() == "" {
return page.KindHome
}
return s.kindFromSections(sections)
}
return page.KindPage
}
func (s *Site) kindFromSections(sections []string) string {
if len(sections) == 0 {
return page.KindHome
}
return s.kindFromSectionPath(path.Join(sections...))
}
func (s *Site) kindFromSectionPath(sectionPath string) string {
for _, plural := range s.siteCfg.taxonomiesConfig {
if plural == sectionPath {
return page.KindTaxonomyTerm
}
if strings.HasPrefix(sectionPath, plural) {
return page.KindTaxonomy
}
}
return page.KindSection
}
func (s *Site) newTaxonomyPage(title string, sections ...string) *pageState {
p, err := newPageFromMeta(
map[string]interface{}{"title": title},
&pageMeta{
s: s,
kind: page.KindTaxonomy,
sections: sections,
})
if err != nil {
panic(err)
}
return p
}
func (s *Site) newPage(kind string, sections ...string) *pageState {
p, err := newPageFromMeta(
map[string]interface{}{},
&pageMeta{
s: s,
kind: kind,
sections: sections,
})
if err != nil {
panic(err)
}
return p
}
func (s *Site) shouldBuild(p page.Page) bool {
return shouldBuild(s.BuildFuture, s.BuildExpired,
s.BuildDrafts, p.Draft(), p.PublishDate(), p.ExpiryDate())
}
func shouldBuild(buildFuture bool, buildExpired bool, buildDrafts bool, Draft bool,
publishDate time.Time, expiryDate time.Time) bool {
if !(buildDrafts || !Draft) {
return false
}
if !buildFuture && !publishDate.IsZero() && publishDate.After(time.Now()) {
return false
}
if !buildExpired && !expiryDate.IsZero() && expiryDate.Before(time.Now()) {
return false
}
return true
}
|
package broker
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"github.com/jinzhu/gorm"
"github.com/streadway/amqp"
"labix.org/v2/mgo"
)
var ErrHandlerNotFoundErr = errors.New("handler does not exist")
type Handler interface {
HandleEvent(string, []byte) error
ErrHandler
}
type ErrHandler interface {
// bool is whether publishing the message to maintenance qeueue or not
DefaultErrHandler(amqp.Delivery, error) bool
}
func (c *Consumer) Start() func(delivery amqp.Delivery) {
c.Log.Info("Broker sarted to consume")
return func(delivery amqp.Delivery) {
if _, ok := c.handlers[delivery.Type]; !ok {
// if no handler found, just ack message
c.Log.Debug("No handler for %s", delivery.Type)
delivery.Ack(false)
return
}
for _, handler := range c.handlers[delivery.Type] {
err := handler.HandleEvent(c.contextValue, delivery.Body)
switch err {
case nil:
delivery.Ack(false)
case ErrHandlerNotFoundErr:
c.Log.Debug("unknown event type (%s) recieved, deleting message from RMQ", delivery.Type)
delivery.Ack(false)
case gorm.RecordNotFound:
c.Log.Warning("Record not found in our db (%s) recieved, deleting message from RMQ", string(delivery.Body))
delivery.Ack(false)
case mgo.ErrNotFound:
c.Log.Warning("Record not found in our mongo db (%s) recieved, deleting message from RMQ", string(delivery.Body))
delivery.Ack(false)
default:
// default err handler should handle the ack process
if c.context.DefaultErrHandler(delivery, err) {
if c.MaintenancePublisher == nil {
continue
}
data, err := json.Marshal(delivery)
if err != nil {
continue
}
msg := amqp.Publishing{
Body: []byte(data),
AppId: c.WorkerName,
}
c.MaintenancePublisher.Publish(msg)
}
}
}
}
}
type SubscriptionHandler struct {
v reflect.Value
}
func NewSubscriptionHandler(i interface{}) (*SubscriptionHandler, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Func {
return nil, fmt.Errorf("kind was %v, not Func", t.Kind())
}
// check input parameter count
if t.NumIn() != 2 {
return nil, fmt.Errorf("input arity was %v, not 2", t.NumIn())
}
// check output parameter count
if t.NumOut() != 1 {
return nil, fmt.Errorf("output arity was %v, not 1", t.NumOut())
}
// output should be errors
if t.Out(0).String() != "error" {
return nil, fmt.Errorf("type of return value was %v, not error", t.Out(0))
}
return &SubscriptionHandler{reflect.ValueOf(i)}, nil
}
var nilParameter = reflect.ValueOf((*interface{})(nil))
func (m *SubscriptionHandler) HandleEvent(controllerValue reflect.Value, data []byte) error {
var parameter reflect.Value
if m.v.Type().NumIn() == 2 {
in2 := m.v.Type().In(1)
// if incoming paramter is an empty interface
if reflect.Interface == in2.Kind() && in2.NumMethod() == 0 {
parameter = nilParameter
// if incoming parameter is a slice or a map
} else if reflect.Slice == in2.Kind() || reflect.Map == in2.Kind() {
// non-pointer maps/slices require special treatment because
// json.Unmarshal won't work on a non-pointer destination. We
// add a level indirection here, then deref it before .Call()
parameter = reflect.New(in2)
} else {
// if it is a struct
parameter = reflect.New(in2.Elem())
}
} else {
// if handler doesnt have any incoming paramters
parameter = nilParameter
}
// this is where magic happens:)
// first read incoming []byte data into a io.reader then
// put this data into a decoder(create a decoder out of it)
// finally decode this data into given
decoder := reflect.ValueOf(json.NewDecoder(bytes.NewReader(data)))
res := decoder.MethodByName("Decode").Call([]reflect.Value{parameter})
if len(res) > 0 && !res[0].IsNil() {
return res[0].Interface().(error)
}
if reflect.Slice == parameter.Elem().Kind() || reflect.Map == parameter.Elem().Kind() {
parameter = parameter.Elem()
}
var out []reflect.Value
switch m.v.Type().NumIn() {
case 2:
out = m.v.Call([]reflect.Value{
controllerValue,
parameter,
})
default:
return fmt.Errorf("unknown signature %s", m.v.Type())
}
if len(out) > 0 && !out[0].IsNil() {
return out[0].Interface().(error)
}
return nil
}
Social: add metrics into consumer
package broker
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"github.com/jinzhu/gorm"
"github.com/streadway/amqp"
"labix.org/v2/mgo"
)
type Handler interface {
HandleEvent(string, []byte) error
ErrHandler
}
type ErrHandler interface {
// bool is whether publishing the message to maintenance qeueue or not
DefaultErrHandler(amqp.Delivery, error) bool
}
func (c *Consumer) withMetrics(handler *SubscriptionHandler, delivery amqp.Delivery) error {
// if metrics is not enabled just invoke the function
if c.Metrics == nil {
return handler.HandleEvent(c.contextValue, delivery.Body)
}
c.Metrics.GetCounter("message").Inc(1)
var err error
c.Metrics.GetTimer(delivery.Type).Time(func() {
err = handler.HandleEvent(c.contextValue, delivery.Body)
})
return err
}
func (c *Consumer) withCounter(counterName string, fn func()) {
if c.Metrics == nil {
fn()
}
c.Metrics.GetCounter(counterName).Inc(1)
fn()
}
func (c *Consumer) Start() func(delivery amqp.Delivery) {
c.Log.Info("Broker sarted to consume")
return func(delivery amqp.Delivery) {
if _, ok := c.handlers[delivery.Type]; !ok {
// if no handler found, just ack message
c.Log.Debug("No handler for %s", delivery.Type)
c.withCounter("nohandlerforevent", func() { delivery.Ack(false) })
return
}
var err error
for _, handler := range c.handlers[delivery.Type] {
// do not continue, if one of the handler gives error
if err != nil {
break
}
err = c.withMetrics(handler, delivery)
}
c.handleError(err, delivery)
}
}
func (c *Consumer) handleError(err error, delivery amqp.Delivery) {
switch err {
case nil:
c.withCounter("success", func() { delivery.Ack(false) })
case ErrNoHandlerFound:
c.withCounter("handlernotfound", func() { delivery.Ack(false) })
c.Log.Debug("unknown event type (%s) recieved, deleting message from RMQ", delivery.Type)
case gorm.RecordNotFound:
c.withCounter("gormrecordnotfound", func() { delivery.Ack(false) })
c.Log.Warning("Record not found in our db (%s) recieved, deleting message from RMQ", string(delivery.Body))
case mgo.ErrNotFound:
c.withCounter("mgorecordnotfound", func() { delivery.Ack(false) })
c.Log.Warning("Record not found in our mongo db (%s) recieved, deleting message from RMQ", string(delivery.Body))
default:
c.withCounter("othererror", func() {
// default err handler should handle the ack process
if c.context.DefaultErrHandler(delivery, err) {
if c.MaintenancePublisher == nil {
return
}
data, err := json.Marshal(delivery)
if err != nil {
return
}
msg := amqp.Publishing{
Body: []byte(data),
AppId: c.WorkerName,
}
c.withCounter("publishedtomaintenancequeue", func() {
c.MaintenancePublisher.Publish(msg)
})
}
})
}
}
type SubscriptionHandler struct {
v reflect.Value
}
func NewSubscriptionHandler(i interface{}) (*SubscriptionHandler, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Func {
return nil, fmt.Errorf("kind was %v, not Func", t.Kind())
}
// check input parameter count
if t.NumIn() != 2 {
return nil, fmt.Errorf("input arity was %v, not 2", t.NumIn())
}
// check output parameter count
if t.NumOut() != 1 {
return nil, fmt.Errorf("output arity was %v, not 1", t.NumOut())
}
// output should be errors
if t.Out(0).String() != "error" {
return nil, fmt.Errorf("type of return value was %v, not error", t.Out(0))
}
return &SubscriptionHandler{reflect.ValueOf(i)}, nil
}
var nilParameter = reflect.ValueOf((*interface{})(nil))
func (m *SubscriptionHandler) HandleEvent(controllerValue reflect.Value, data []byte) error {
var parameter reflect.Value
if m.v.Type().NumIn() == 2 {
in2 := m.v.Type().In(1)
// if incoming paramter is an empty interface
if reflect.Interface == in2.Kind() && in2.NumMethod() == 0 {
parameter = nilParameter
// if incoming parameter is a slice or a map
} else if reflect.Slice == in2.Kind() || reflect.Map == in2.Kind() {
// non-pointer maps/slices require special treatment because
// json.Unmarshal won't work on a non-pointer destination. We
// add a level indirection here, then deref it before .Call()
parameter = reflect.New(in2)
} else {
// if it is a struct
parameter = reflect.New(in2.Elem())
}
} else {
// if handler doesnt have any incoming paramters
parameter = nilParameter
}
// this is where magic happens:)
// first read incoming []byte data into a io.reader then
// put this data into a decoder(create a decoder out of it)
// finally decode this data into given
decoder := reflect.ValueOf(json.NewDecoder(bytes.NewReader(data)))
res := decoder.MethodByName("Decode").Call([]reflect.Value{parameter})
if len(res) > 0 && !res[0].IsNil() {
return res[0].Interface().(error)
}
if reflect.Slice == parameter.Elem().Kind() || reflect.Map == parameter.Elem().Kind() {
parameter = parameter.Elem()
}
var out []reflect.Value
switch m.v.Type().NumIn() {
case 2:
out = m.v.Call([]reflect.Value{
controllerValue,
parameter,
})
default:
return fmt.Errorf("unknown signature %s", m.v.Type())
}
if len(out) > 0 && !out[0].IsNil() {
return out[0].Interface().(error)
}
return nil
}
|
package mux
import (
"fmt"
"koding/artifact"
"net/http"
"socialapi/models"
"socialapi/workers/common/handler"
"github.com/koding/logging"
"github.com/koding/metrics"
"github.com/rcrowley/go-tigertonic"
)
type Config struct {
Name string
Host string
Port string
Debug bool
}
func NewConfig(name, host string, port string) *Config {
return &Config{
Name: name,
Host: host,
Port: port,
}
}
type Mux struct {
Metrics *metrics.Metrics
mux *tigertonic.TrieServeMux
nsMux *tigertonic.TrieServeMux
server *tigertonic.Server
config *Config
log logging.Logger
}
func New(mc *Config, log logging.Logger) *Mux {
m := &Mux{
mux: tigertonic.NewTrieServeMux(),
nsMux: tigertonic.NewTrieServeMux(),
}
// add namespace support into
// all handlers
m.nsMux.HandleNamespace("", m.mux)
m.nsMux.HandleNamespace("/1.0", m.mux)
tigertonic.SnakeCaseHTTPEquivErrors = true
m.log = log
m.config = mc
m.addDefaultHandlers()
return m
}
func (m *Mux) AddHandler(request handler.Request) {
request.Metrics = m.Metrics
hHandler := handler.Wrapper(request)
hHandler = handler.BuildHandlerWithContext(hHandler)
m.mux.Handle(request.Type, request.Endpoint, hHandler)
}
func (m *Mux) AddSessionlessHandler(request handler.Request) {
request.Metrics = m.Metrics
hHandler := handler.Wrapper(request)
m.mux.Handle(request.Type, request.Endpoint, hHandler)
}
func (m *Mux) AddUnscopedHandler(request handler.Request) {
m.mux.HandleFunc(request.Type, request.Endpoint, request.Handler.(func(http.ResponseWriter, *http.Request)))
}
func (m *Mux) addDefaultHandlers() *tigertonic.TrieServeMux {
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/version",
Handler: artifact.VersionHandler(),
})
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/healthCheck",
Handler: artifact.HealthCheckHandler(m.config.Name),
})
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/",
Handler: func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello from %s", m.config.Name)
},
})
return m.mux
}
func (m *Mux) Listen() {
// go metrics.Log(
// metrics.DefaultRegistry,
// 60e9,
// stdlog.New(os.Stderr, "metrics ", stdlog.Lmicroseconds),
// )
var handler http.Handler
handler = tigertonic.WithContext(m.nsMux, models.Context{})
if m.config.Debug {
h := tigertonic.Logged(handler, nil)
h.Logger = NewTigerTonicLogger(m.log)
handler = h
}
addr := fmt.Sprintf("%s:%s", m.config.Host, m.config.Port)
m.server = tigertonic.NewServer(addr, handler)
go m.listener()
}
func (m *Mux) Close() {
m.server.Close()
}
func (m *Mux) listener() {
if err := m.server.ListenAndServe(); err != nil {
panic(err)
}
}
mux: override Handler method for supporting tigertonic http mocking
package mux
import (
"fmt"
"koding/artifact"
"net/http"
"socialapi/models"
"socialapi/workers/common/handler"
"github.com/koding/logging"
"github.com/koding/metrics"
"github.com/rcrowley/go-tigertonic"
)
type Config struct {
Name string
Host string
Port string
Debug bool
}
func NewConfig(name, host string, port string) *Config {
return &Config{
Name: name,
Host: host,
Port: port,
}
}
type Mux struct {
Metrics *metrics.Metrics
mux *tigertonic.TrieServeMux
nsMux *tigertonic.TrieServeMux
server *tigertonic.Server
config *Config
log logging.Logger
}
func New(mc *Config, log logging.Logger) *Mux {
m := &Mux{
mux: tigertonic.NewTrieServeMux(),
nsMux: tigertonic.NewTrieServeMux(),
}
// add namespace support into
// all handlers
m.nsMux.HandleNamespace("", m.mux)
m.nsMux.HandleNamespace("/1.0", m.mux)
tigertonic.SnakeCaseHTTPEquivErrors = true
m.log = log
m.config = mc
m.addDefaultHandlers()
return m
}
func (m *Mux) AddHandler(request handler.Request) {
request.Metrics = m.Metrics
hHandler := handler.Wrapper(request)
hHandler = handler.BuildHandlerWithContext(hHandler)
m.mux.Handle(request.Type, request.Endpoint, hHandler)
}
func (m *Mux) AddSessionlessHandler(request handler.Request) {
request.Metrics = m.Metrics
hHandler := handler.Wrapper(request)
m.mux.Handle(request.Type, request.Endpoint, hHandler)
}
func (m *Mux) AddUnscopedHandler(request handler.Request) {
m.mux.HandleFunc(request.Type, request.Endpoint, request.Handler.(func(http.ResponseWriter, *http.Request)))
}
func (m *Mux) addDefaultHandlers() *tigertonic.TrieServeMux {
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/version",
Handler: artifact.VersionHandler(),
})
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/healthCheck",
Handler: artifact.HealthCheckHandler(m.config.Name),
})
m.AddUnscopedHandler(handler.Request{
Type: handler.GetRequest,
Endpoint: "/",
Handler: func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello from %s", m.config.Name)
},
})
return m.mux
}
func (m *Mux) Listen() {
// go metrics.Log(
// metrics.DefaultRegistry,
// 60e9,
// stdlog.New(os.Stderr, "metrics ", stdlog.Lmicroseconds),
// )
var handler http.Handler
handler = tigertonic.WithContext(m.nsMux, models.Context{})
if m.config.Debug {
h := tigertonic.Logged(handler, nil)
h.Logger = NewTigerTonicLogger(m.log)
handler = h
}
addr := fmt.Sprintf("%s:%s", m.config.Host, m.config.Port)
m.server = tigertonic.NewServer(addr, handler)
go m.listener()
}
func (m *Mux) Handler(r *http.Request) (http.Handler, string) {
return m.mux.Handler(r)
}
func (m *Mux) Close() {
m.server.Close()
}
func (m *Mux) listener() {
if err := m.server.ListenAndServe(); err != nil {
panic(err)
}
}
|
// Tool generates JSON document using monster productions.
// * productions are defined for `default`, `users` and `projects` bucket.
// * parallel load can be generated using `-par` switch.
// * `-count` switch specify no. of documents to be generated by each routine.
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"net/url"
"os"
"path"
"runtime"
"strings"
"time"
"github.com/couchbase/indexing/secondary/dcp"
"github.com/prataprc/monster"
)
var options struct {
seed int // seed for monster tool
buckets []string // buckets to populate
parallel int // number of parallel routines per bucket
count int // number of documents to be generated per routine
expiry int // set expiry for the document, in seconds
}
var testDir string
var bagDir string
var bucketProds = map[string]string{ // name -> production file path
"default": "",
"users": "",
"projects": "",
}
var done = make(chan bool, 16)
func argParse() string {
var buckets string
seed := time.Now().UTC().Second()
flag.IntVar(&options.seed, "seed", seed,
"seed for monster tool")
flag.StringVar(&buckets, "buckets", "default",
"buckets to populate")
flag.IntVar(&options.parallel, "par", 1,
"number of parallel routines per bucket")
flag.IntVar(&options.count, "count", 0,
"number of documents to be generated per routine")
flag.IntVar(&options.expiry, "expiry", 0,
"expiry duration for a document (TTL)")
flag.Parse()
options.buckets = strings.Split(buckets, ",")
// collect production files.
_, filename, _, _ := runtime.Caller(1)
testDir = path.Join(path.Dir(path.Dir(path.Dir(filename))), "testdata")
bagDir = testDir
bucketProds["default"] = path.Join(testDir, "users.prod")
bucketProds["users"] = path.Join(testDir, "users.prod")
bucketProds["projects"] = path.Join(testDir, "projects.prod")
args := flag.Args()
if len(args) < 1 {
usage()
os.Exit(1)
}
return args[0]
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage : %s [OPTIONS] <cluster-addr> \n", os.Args[0])
flag.PrintDefaults()
}
func main() {
cluster := argParse()
if !strings.HasPrefix(cluster, "http://") {
cluster = "http://" + cluster
}
n := 0
for _, bucket := range options.buckets {
n += loadBucket(cluster, bucket, bucketProds[bucket], options.count)
}
for n > 0 {
<-done
n--
}
}
func loadBucket(cluster, bucket, prodfile string, count int) int {
u, err := url.Parse(cluster)
mf(err, "parse")
c, err := couchbase.Connect(u.String())
mf(err, "connect - "+u.String())
p, err := c.GetPool("default")
mf(err, "pool")
bs := make([]*couchbase.Bucket, 0, options.parallel)
for i := 0; i < options.parallel; i++ {
b, err := p.GetBucket(bucket)
mf(err, "bucket")
bs = append(bs, b)
go genDocuments(b, prodfile, i+1, options.count)
}
return options.parallel
}
func genDocuments(b *couchbase.Bucket, prodfile string, idx, n int) {
conf := make(map[string]interface{})
start, err := monster.Parse(prodfile, conf)
mf(err, "monster - ")
nonterminals, root := monster.Build(start)
c := map[string]interface{}{
"_nonterminals": nonterminals,
// rand.Rand is not thread safe.
"_random": rand.New(rand.NewSource(int64(options.seed))),
"_bagdir": bagDir,
"_prodfile": prodfile,
}
msg := fmt.Sprintf("%s - set", b.Name)
for i := 0; i < n; i++ {
monster.Initialize(c)
doc := root.Generate(c)
key := fmt.Sprintf("%s-%v-%v", b.Name, idx, i+1)
err = b.SetRaw(key, options.expiry, []byte(doc))
if err != nil {
fmt.Printf("%T %v\n", err, err)
}
mf(err, msg)
}
fmt.Printf("routine %v generated %v documents for %q\n", idx, n, b.Name)
done <- true
}
func mf(err error, msg string) {
if err != nil {
log.Fatalf("%v: %v", msg, err)
}
}
Add a switch in loadgen to accept production file.
Change-Id: I784a86454c1eed33dc5261f6defbd26adf6efbd6
// Tool generates JSON document using monster productions.
// * productions are defined for `default`, `users` and `projects` bucket.
// * parallel load can be generated using `-par` switch.
// * `-count` switch specify no. of documents to be generated by each routine.
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"net/url"
"os"
"path"
"runtime"
"strings"
"time"
"github.com/couchbase/indexing/secondary/dcp"
"github.com/prataprc/monster"
)
var options struct {
seed int // seed for monster tool
buckets []string // buckets to populate
prods []string
parallel int // number of parallel routines per bucket
count int // number of documents to be generated per routine
expiry int // set expiry for the document, in seconds
}
var testDir string
var bagDir string
var done = make(chan bool, 16)
func argParse() string {
var buckets, prods string
seed := time.Now().UTC().Second()
flag.IntVar(&options.seed, "seed", seed,
"seed for monster tool")
flag.StringVar(&buckets, "buckets", "default",
"buckets to populate")
flag.StringVar(&prods, "prods", "users.prod",
"command separated list of production files for each bucket")
flag.IntVar(&options.parallel, "par", 1,
"number of parallel routines per bucket")
flag.IntVar(&options.count, "count", 0,
"number of documents to be generated per routine")
flag.IntVar(&options.expiry, "expiry", 0,
"expiry duration for a document (TTL)")
flag.Parse()
options.buckets = strings.Split(buckets, ",")
options.prods = strings.Split(prods, ",")
// collect production files.
_, filename, _, _ := runtime.Caller(1)
testDir = path.Join(path.Dir(path.Dir(path.Dir(filename))), "testdata")
bagDir = testDir
args := flag.Args()
if len(args) < 1 {
usage()
os.Exit(1)
}
return args[0]
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage : %s [OPTIONS] <cluster-addr> \n", os.Args[0])
flag.PrintDefaults()
}
func main() {
cluster := argParse()
if !strings.HasPrefix(cluster, "http://") {
cluster = "http://" + cluster
}
n := 0
for i, bucket := range options.buckets {
prodfile := getProdfilePath(options.prods[i])
n += loadBucket(cluster, bucket, prodfile, options.count)
}
for n > 0 {
<-done
n--
}
}
func loadBucket(cluster, bucket, prodfile string, count int) int {
u, err := url.Parse(cluster)
mf(err, "parse")
c, err := couchbase.Connect(u.String())
mf(err, "connect - "+u.String())
p, err := c.GetPool("default")
mf(err, "pool")
bs := make([]*couchbase.Bucket, 0, options.parallel)
for i := 0; i < options.parallel; i++ {
b, err := p.GetBucket(bucket)
mf(err, "bucket")
bs = append(bs, b)
go genDocuments(b, prodfile, i+1, options.count)
}
return options.parallel
}
func genDocuments(b *couchbase.Bucket, prodfile string, idx, n int) {
conf := make(map[string]interface{})
start, err := monster.Parse(prodfile, conf)
mf(err, "monster - ")
nonterminals, root := monster.Build(start)
c := map[string]interface{}{
"_nonterminals": nonterminals,
// rand.Rand is not thread safe.
"_random": rand.New(rand.NewSource(int64(options.seed))),
"_bagdir": bagDir,
"_prodfile": prodfile,
}
msg := fmt.Sprintf("%s - set", b.Name)
for i := 0; i < n; i++ {
monster.Initialize(c)
doc := root.Generate(c)
key := fmt.Sprintf("%s-%v-%v", b.Name, idx, i+1)
err = b.SetRaw(key, options.expiry, []byte(doc))
if err != nil {
fmt.Printf("%T %v\n", err, err)
}
mf(err, msg)
}
fmt.Printf("routine %v generated %v documents for %q\n", idx, n, b.Name)
done <- true
}
func mf(err error, msg string) {
if err != nil {
log.Fatalf("%v: %v", msg, err)
}
}
func getProdfilePath(name string) string {
return path.Join(testDir, name)
}
|
package cassandra
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/gocql/gocql"
"github.com/intelsdi-x/swan/pkg/cassandra"
. "github.com/smartystreets/goconvey/convey"
"math/rand"
"testing"
"time"
)
func insertDataIntoCassandra(session *gocql.Session, metrics *cassandra.Metrics) error {
// TODO(CD): Consider getting schema from the cassandra publisher plugin
session.Query(`CREATE TABLE IF NOT EXISTS snap.metrics (
ns text,
ver int,
host text,
time timestamp,
valtype text,
doubleVal double,
boolVal boolean,
strVal text,
tags map<text,text>,
PRIMARY KEY ((ns, ver, host), time)
) WITH CLUSTERING ORDER BY (time DESC);`,
).Exec()
err := session.Query(`insert into snap.metrics(
ns, ver, host, time, boolval,
doubleval, strval, tags, valtype) values
(?, ?, ?, ?, ?, ?, ?, ?, ?)`,
metrics.Namespace(), metrics.Version(), metrics.Host(), metrics.Time(), metrics.Boolval(),
metrics.Doubleval(), metrics.Strval(), metrics.Tags(), metrics.Valtype(),
).Exec()
if err != nil {
return err
}
return nil
}
func TestValuesGatherer(t *testing.T) {
// Create fake experiment ID.
rand.Seed(int64(time.Now().Nanosecond()))
value := rand.Int()
experimentID := fmt.Sprintf("%d", value)
expectedTagsMap := map[string]string{"swan_experiment": experimentID, "swan_phase": "p2", "swan_repetition": "2"}
//Create Metrics struct that will be inserted into cassandra.
metrics := cassandra.NewMetrics(experimentID, 1, "abc", time.Now(), false, 10, "c", expectedTagsMap, "boolval")
logrus.SetLevel(logrus.ErrorLevel)
Convey("While connecting to Cassandra with proper parameters", t, func() {
cassandraConfig, err := cassandra.CreateConfigWithSession("127.0.0.1", "snap")
So(err, ShouldBeNil)
session := cassandraConfig.CassandraSession()
Convey("I should receive not empty session", func() {
So(session, ShouldNotBeNil)
So(err, ShouldBeNil)
Convey("I should be able to insert data into cassandra", func() {
err := insertDataIntoCassandra(session, metrics)
So(err, ShouldBeNil)
Convey("and I should be able to receive expected values and close session", func() {
metricsList, err := cassandraConfig.GetValuesForGivenExperiment(experimentID)
So(len(metricsList), ShouldBeGreaterThan, 0)
So(err, ShouldBeNil)
resultedMetrics := metricsList[0]
// Check values of metrics.
So(resultedMetrics.Namespace(), ShouldEqual, metrics.Namespace())
So(resultedMetrics.Version(), ShouldEqual, metrics.Version())
So(resultedMetrics.Host(), ShouldEqual, metrics.Host())
// Cassandra stores time values in UTC by default. So, we
// convert the expected time value to UTC to avoid discrepancies
// in the interpreted calendar date and the test flakiness
// that could cause. For completeness, we also pre-emptively
// convert the result time to UTC in case the database is
// configured to use a non-default TZ.
_, _, resultedDay := resultedMetrics.Time().UTC().Date()
_, _, expectedDay := metrics.Time().UTC().Date()
So(resultedDay, ShouldEqual, expectedDay)
So(resultedMetrics.Boolval(), ShouldEqual, metrics.Boolval())
So(resultedMetrics.Doubleval(), ShouldEqual, metrics.Doubleval())
So(resultedMetrics.Strval(), ShouldEqual, metrics.Strval())
So(resultedMetrics.Tags()["swan_experiment"], ShouldEqual,
metrics.Tags()["swan_experiment"])
So(resultedMetrics.Tags()["swan_phase"], ShouldEqual,
metrics.Tags()["swan_phase"])
So(resultedMetrics.Tags()["swan_repetition"], ShouldEqual,
metrics.Tags()["swan_repetition"])
So(resultedMetrics.Valtype(), ShouldEqual, metrics.Valtype())
err = cassandraConfig.CloseSession()
So(err, ShouldBeNil)
})
})
})
})
}
Added function for creating keyspace. (#169)
package cassandra
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/gocql/gocql"
"github.com/intelsdi-x/swan/pkg/cassandra"
. "github.com/smartystreets/goconvey/convey"
"math/rand"
"testing"
"time"
)
func createKeyspace(ip string) error {
cluster := gocql.NewCluster(ip)
cluster.ProtoVersion = 4
cluster.Consistency = gocql.All
session, err := cluster.CreateSession()
if err != nil {
return err
}
err = session.Query(`CREATE KEYSPACE IF NOT EXISTS snap WITH replication = {
'class': 'SimpleStrategy','replication_factor':1}`).Exec()
if err != nil {
return err
}
session.Close()
return nil
}
func insertDataIntoCassandra(session *gocql.Session, metrics *cassandra.Metrics) error {
// TODO(CD): Consider getting schema from the cassandra publisher plugin
session.Query(`CREATE TABLE IF NOT EXISTS snap.metrics (
ns text,
ver int,
host text,
time timestamp,
valtype text,
doubleVal double,
boolVal boolean,
strVal text,
tags map<text,text>,
PRIMARY KEY ((ns, ver, host), time)
) WITH CLUSTERING ORDER BY (time DESC);`,
).Exec()
err := session.Query(`insert into snap.metrics(
ns, ver, host, time, boolval,
doubleval, strval, tags, valtype) values
(?, ?, ?, ?, ?, ?, ?, ?, ?)`,
metrics.Namespace(), metrics.Version(), metrics.Host(), metrics.Time(), metrics.Boolval(),
metrics.Doubleval(), metrics.Strval(), metrics.Tags(), metrics.Valtype(),
).Exec()
if err != nil {
return err
}
return nil
}
func TestValuesGatherer(t *testing.T) {
ip := "127.0.0.1"
Convey("While creating keyspace I should receive no error", t, func() {
err := createKeyspace(ip)
So(err, ShouldBeNil)
// Create fake experiment ID.
rand.Seed(int64(time.Now().Nanosecond()))
value := rand.Int()
experimentID := fmt.Sprintf("%d", value)
expectedTagsMap := map[string]string{"swan_experiment": experimentID, "swan_phase": "p2", "swan_repetition": "2"}
//Create Metrics struct that will be inserted into cassandra.
metrics := cassandra.NewMetrics(experimentID, 1, "abc", time.Now(), false, 10, "c", expectedTagsMap, "boolval")
logrus.SetLevel(logrus.ErrorLevel)
Convey("While connecting to Cassandra with proper parameters", func() {
cassandraConfig, err := cassandra.CreateConfigWithSession(ip, "snap")
So(err, ShouldBeNil)
session := cassandraConfig.CassandraSession()
Convey("I should receive not empty session", func() {
So(session, ShouldNotBeNil)
So(err, ShouldBeNil)
Convey("I should be able to insert data into cassandra", func() {
err := insertDataIntoCassandra(session, metrics)
So(err, ShouldBeNil)
Convey("and I should be able to receive expected values and close session", func() {
metricsList, err := cassandraConfig.GetValuesForGivenExperiment(experimentID)
So(len(metricsList), ShouldBeGreaterThan, 0)
So(err, ShouldBeNil)
resultedMetrics := metricsList[0]
// Check values of metrics.
So(resultedMetrics.Namespace(), ShouldEqual, metrics.Namespace())
So(resultedMetrics.Version(), ShouldEqual, metrics.Version())
So(resultedMetrics.Host(), ShouldEqual, metrics.Host())
// Cassandra stores time values in UTC by default. So, we
// convert the expected time value to UTC to avoid discrepancies
// in the interpreted calendar date and the test flakiness
// that could cause. For completeness, we also pre-emptively
// convert the result time to UTC in case the database is
// configured to use a non-default TZ.
_, _, resultedDay := resultedMetrics.Time().UTC().Date()
_, _, expectedDay := metrics.Time().UTC().Date()
So(resultedDay, ShouldEqual, expectedDay)
So(resultedMetrics.Boolval(), ShouldEqual, metrics.Boolval())
So(resultedMetrics.Doubleval(), ShouldEqual, metrics.Doubleval())
So(resultedMetrics.Strval(), ShouldEqual, metrics.Strval())
So(resultedMetrics.Tags()["swan_experiment"], ShouldEqual,
metrics.Tags()["swan_experiment"])
So(resultedMetrics.Tags()["swan_phase"], ShouldEqual,
metrics.Tags()["swan_phase"])
So(resultedMetrics.Tags()["swan_repetition"], ShouldEqual,
metrics.Tags()["swan_repetition"])
So(resultedMetrics.Valtype(), ShouldEqual, metrics.Valtype())
err = cassandraConfig.CloseSession()
So(err, ShouldBeNil)
})
})
})
})
})
}
|
Restore defer
|
// Copyright 2014 slowfei And The Contributors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Create on 2014-06-16
// Update on 2014-07-08
// Email slowfei#foxmail.com
// Home http://www.slowfei.com
//
// reflect router
//
package LVRouter
import (
"fmt"
. "github.com/slowfei/leafveingo"
"reflect"
)
var (
// controller default url request ("http://localhost:8080/") method
CONTROLLER_DEFAULT_METHOD = "Index"
)
//
// reflect router other option
//
type ReflectRouterOption struct {
ControllerOption
}
/**
* default option
*/
func DefaultReflectRouterOption() ReflectRouterOption {
option := ReflectRouterOption{}
option.ControllerOption.SetHost("")
option.ControllerOption.SetScheme("")
return option
}
/**
* set scheme
* "http" || "https" || ""(wildcard)
*/
func (o ReflectRouterOption) SetScheme(scheme string) ReflectRouterOption {
o.ControllerOption.SetScheme(scheme)
return o
}
/**
* set host
* "svn.slowfei.com" || "wwww.slowfei.com" || ""(wildcard)
*/
func (o ReflectRouterOption) SetHost(host string) ReflectRouterOption {
o.ControllerOption.SetHost(host)
return o
}
/**
* checked params
*/
func (o *ReflectRouterOption) Checked() {
o.ControllerOption.Checked()
}
//
// reflect router
//
// default template parh: [host]/[routerKey]/[funcNme].[TemplateSuffix]
// "[host]/" multi-project use, lefveinServer.SetMultiProjectHosts("slowfei.com","svn.slowfei.com")
// rule:
//
// router key = "/"
// URL = GET http://localhost:8080/
// func name = Index
// template path = [host]/Index.tpl
//
// router key = "/"
// URL = POST http://localhost:8080/
// func name = PostIndex
// template path = [host]/PostIndex.tpl
//
// router key = "/"
// URL = Get http://localhost:8080/user#!list
// func name = UserList
// template path = [host]/UserList.tpl
//
// router key = "/"
// URL = Post http://localhost:8080/user[^a-zA-Z]+list[^a-zA-Z]+auto
// func name = PostUserListAuto
// template path = [host]/PostUserListAuto.tpl
//
// router key = "/admin/"
// URL = GET http://localhost:8080/admin/login
// func name = Login
// template path = [host]/admin/Login.tpl
//
// router key = "/admin/"
// URL = POST http://localhost:8080/admin/login
// func name = PostLogin
// template path = [host]/admin/PostLogin.tpl
//
//
// 控制器分的指针传递和值传递
// 值传递:
// CreateReflectController("/pointer/struct/", PointerController{})
// 每次请求(http://localhost:8080/pointer/struct/) 都会根据设置的控制器类型新建立一个对象进行处理,直到一次请求周期结束。
//
// 指针传递:
// CreateReflectController("/pointer/", new(PointerController))
// 跟值传递相反,每次请求时都会使用设置的控制器地址进行处理,应用结束也不会改变,每次请求控制器都不会改变内存地址
// 这里涉及到并发时同时使用一个内存地址处理的问题,使用时需要注意
//
type ReflectRouter struct {
routerKey string // router key
beforeAfter BeforeAfterController // implement interface
adeRouter AdeRouterController // implement interface
isBeforeAfter bool
isAdeRouter bool
ctlRefVal reflect.Value // controller reflect value
checkFuncName map[string]int // check func name map
option ReflectRouterOption
info string
typestr string
}
/**
* create reflect router controller
*
* @param routerKey "/" || "/home/" || "/admin/"
* @param controller
*/
func CreateReflectController(routerKey string, controller interface{}) IRouter {
return CreateReflectControllerWithOption(routerKey, controller, DefaultReflectRouterOption())
}
/**
* create reflect router controller with option
*
* @param option other params option
*/
func CreateReflectControllerWithOption(routerKey string, controller interface{}, option ReflectRouterOption) IRouter {
option.Checked()
strBeforeAfter := ""
strAde := ""
refRouter := new(ReflectRouter)
refRouter.routerKey = routerKey
refRouter.checkFuncName = make(map[string]int)
refRouter.ctlRefVal = reflect.ValueOf(controller)
refRouter.option = option
refRouter.isAdeRouter = false
refRouter.isBeforeAfter = false
// 使用指针类型获取所有函数,否则非指针结构获取的只能是非指针的函数
refType := reflect.New(reflect.Indirect(refRouter.ctlRefVal).Type()).Type()
if refType.Implements(RefTypeAdeRouterController) {
if reflect.Ptr == refRouter.ctlRefVal.Kind() {
refRouter.adeRouter = controller.(AdeRouterController)
}
refRouter.isAdeRouter = true
strAde = "(Implemented AdeRouterController)"
}
if refType.Implements(RefTypeBeforeAfterController) {
if reflect.Ptr == refRouter.ctlRefVal.Kind() {
refRouter.beforeAfter = controller.(BeforeAfterController)
}
refRouter.isBeforeAfter = true
strBeforeAfter = "(Implemented BeforeAfterController)"
}
for i := 0; i < refType.NumMethod(); i++ {
refMet := refType.Method(i)
funcName := refMet.Name
if funcName[0] >= 'A' && funcName[0] <= 'Z' {
refRouter.checkFuncName[funcName] = i
}
}
refRouter.typestr = refRouter.ctlRefVal.Type().String()
refRouter.info = fmt.Sprintf("ReflectRouter(%v) %v%v", refRouter.ctlRefVal.Type(), strBeforeAfter, strAde)
return refRouter
}
/**
* func name suffix handle
*
* @param funcName
* @param option
*/
func (r *ReflectRouter) funcNameSuffixHandle(funcName string, option *RouterOption) string {
result := funcName
urlSuffix := option.UrlSuffix
if 0 != len(urlSuffix) {
firstc := urlSuffix[0]
if firstc >= 'a' && firstc <= 'z' {
firstc -= 'a' - 'A'
}
first := string(firstc)
urlSuffix = first + urlSuffix[1:]
tempFunc := funcName + urlSuffix
if _, ok := r.checkFuncName[tempFunc]; ok {
result = tempFunc
}
}
return result
}
/**
* get func params
*
* @param funcType
* @param
*/
func (r *ReflectRouter) getFuncArgs(funcType reflect.Type, context *HttpContext) []reflect.Value {
argsNum := funcType.NumIn()
args := make([]reflect.Value, argsNum, argsNum)
for i := 0; i < argsNum; i++ {
in := funcType.In(i)
typeString := in.String()
var argsValue reflect.Value
switch typeString {
case "*http.Request":
argsValue = reflect.ValueOf(context.Request)
case "http.Request":
argsValue = reflect.ValueOf(context.Request).Elem()
case "*url.URL":
argsValue = reflect.ValueOf(context.Request.URL)
case "url.URL":
argsValue = reflect.ValueOf(context.Request.URL).Elem()
case "*leafveingo.HttpContext":
argsValue = reflect.ValueOf(context)
case "leafveingo.HttpContext":
argsValue = reflect.ValueOf(context).Elem()
case "[]uint8":
body := context.RequestBody()
if nil != body {
argsValue = reflect.ValueOf(body)
} else {
argsValue = reflect.Zero(in)
}
case "http.ResponseWriter":
argsValue = reflect.ValueOf(context.RespWrite)
case "LVSession.HttpSession":
session, _ := context.Session(false)
if nil != session {
argsValue = reflect.ValueOf(session)
} else {
argsValue = reflect.Zero(in)
}
default:
val, err := context.PackStructFormByRefType(in)
if nil == err {
argsValue = val
} else {
context.LVServer().Log().Debug(err.Error())
}
}
if reflect.Invalid == argsValue.Kind() {
argsValue = reflect.Zero(in)
}
args[i] = argsValue
}
return args
}
//# mark ReflectRouter override IRouter -------------------------------------------------------------------------------------------
func (r *ReflectRouter) AfterRouterParse(context *HttpContext, option *RouterOption) HttpStatus {
statusCode := Status200
scheme := r.option.Scheme()
if 0 != len(scheme) && scheme != context.RequestScheme() {
return Status404
}
if reflect.Ptr != r.ctlRefVal.Kind() {
option.RouterDataRefVal = reflect.New(r.ctlRefVal.Type())
}
return statusCode
}
func (r *ReflectRouter) ParseFuncName(context *HttpContext, option *RouterOption) (funcName string, statusCode HttpStatus, err error) {
/* 高级路由实现操作 */
if r.isAdeRouter {
var params map[string]string = nil
if reflect.Invalid != option.RouterDataRefVal.Kind() {
adeRouter := option.RouterDataRefVal.Interface().(AdeRouterController)
funcName, params = adeRouter.RouterMethodParse(option)
} else if nil != r.adeRouter {
funcName, params = r.adeRouter.RouterMethodParse(option)
}
if 0 == len(funcName) {
statusCode = Status404
} else {
statusCode = Status200
}
if 0 != len(params) {
values := context.Request.URL.Query()
for k, v := range params {
values.Set(k, v)
}
context.Request.URL.RawQuery = values.Encode()
}
return
}
statusCode = Status404
method := option.RequestMethod
reqPath := option.RouterPath
/* parse func name prefix */
funcNamePrefix := ""
if "get" != method {
firstc := method[0]
if firstc >= 'a' && firstc <= 'z' {
firstc -= 'a' - 'A'
}
first := string(firstc)
funcNamePrefix = first + method[1:]
}
/* parse func name */
// url = "http://localhost:8080/router/" router key = "/router/" || "/router"
// reqPath = "" || "/" to Default func name
if 0 == len(reqPath) || (1 == len(reqPath) && '/' == reqPath[0]) {
statusCode = Status200
funcName = r.funcNameSuffixHandle(funcNamePrefix+CONTROLLER_DEFAULT_METHOD, option)
return
}
// url = "http://localhost:8080/router/[reqPath]" router key = "/router/"
// reqPath = "list" funcName = "List"
// reqPath = "list#!json" || "list[^a-zA-Z]*json" funcName = "ListJson"
// reqPath = "list/user" funcName = "ListUser"
// reqPath = "list/user/auto" || "list[^a-zA-Z]+user[^a-zA-Z]+auto" funcName = "ListUserAuto"
nameByte := make([]byte, len(reqPath))
isUpper := true
writeIdx := 0
count := len(reqPath)
for i := 0; i < count; i++ {
c := reqPath[i]
AZ := c >= 'A' && c <= 'Z'
az := c >= 'a' && c <= 'z'
if AZ || az {
if isUpper {
isUpper = false
if az {
c -= 'a' - 'A'
}
}
} else {
isUpper = true
}
nameByte[writeIdx] = c
writeIdx++
}
if 0 != writeIdx {
funcName = r.funcNameSuffixHandle(funcNamePrefix+string(nameByte[:writeIdx]), option)
statusCode = Status200
} else {
statusCode = Status404
}
return
}
func (r *ReflectRouter) CallFuncBefore(context *HttpContext, option *RouterOption) HttpStatus {
statucCode := Status200
if r.isBeforeAfter {
if reflect.Invalid != option.RouterDataRefVal.Kind() {
beforeAfter := option.RouterDataRefVal.Interface().(BeforeAfterController)
statucCode = beforeAfter.Before(context, option)
} else if nil != r.beforeAfter {
statucCode = r.beforeAfter.Before(context, option)
}
}
return statucCode
}
func (r *ReflectRouter) CallFunc(context *HttpContext, funcName string, option *RouterOption) (returnValue interface{}, statusCode HttpStatus, err error) {
if index, ok := r.checkFuncName[funcName]; ok {
statusCode = Status200
var controller reflect.Value
if reflect.Invalid != option.RouterDataRefVal.Kind() {
controller = option.RouterDataRefVal
} else {
controller = r.ctlRefVal
}
refMet := controller.Method(index)
// get params
args := r.getFuncArgs(refMet.Type(), context)
// call method
reVals := refMet.Call(args)
if 0 != len(reVals) {
returnValue = reVals[0].Interface()
}
} else {
statusCode = Status404
err = NewLeafveinError("(" + r.typestr + ") not found func name: " + funcName)
}
return
}
func (r *ReflectRouter) ParseTemplatePath(context *HttpContext, funcName string, option *RouterOption) string {
if 0 == len(funcName) {
return ""
}
path := r.routerKey
pathLen := len(path)
name := funcName
nameLen := len(name)
if '/' == path[0] {
path = path[1:]
pathLen = len(path)
}
if '/' == path[pathLen-1] {
path = path[:pathLen-1]
}
if '/' == name[0] {
name = name[1:]
nameLen = len(name)
}
if '/' == name[nameLen-1] {
name = name[:nameLen-1]
}
hostPath := ""
host := context.RequestHost()
if 0 != len(host) {
hostPath = host + "/"
}
return hostPath + path + "/" + name + context.LVServer().TemplateSuffix()
}
func (r *ReflectRouter) CallFuncAfter(context *HttpContext, option *RouterOption) {
if r.isBeforeAfter {
if reflect.Invalid != option.RouterDataRefVal.Kind() {
beforeAfter := option.RouterDataRefVal.Interface().(BeforeAfterController)
beforeAfter.After(context, option)
} else if nil != r.beforeAfter {
r.beforeAfter.After(context, option)
}
}
}
func (r *ReflectRouter) RouterKey() string {
return r.routerKey
}
func (r *ReflectRouter) ControllerOption() ControllerOption {
return r.option.ControllerOption
}
func (r *ReflectRouter) Info() string {
return r.info
}
修正函数中带的数字名称
// Copyright 2014 slowfei And The Contributors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Create on 2014-06-16
// Update on 2014-07-10
// Email slowfei#foxmail.com
// Home http://www.slowfei.com
//
// reflect router
//
package LVRouter
import (
"fmt"
. "github.com/slowfei/leafveingo"
"reflect"
)
var (
// controller default url request ("http://localhost:8080/") method
CONTROLLER_DEFAULT_METHOD = "Index"
)
//
// reflect router other option
//
type ReflectRouterOption struct {
ControllerOption
}
/**
* default option
*/
func DefaultReflectRouterOption() ReflectRouterOption {
option := ReflectRouterOption{}
option.ControllerOption.SetHost("")
option.ControllerOption.SetScheme("")
return option
}
/**
* set scheme
* "http" || "https" || ""(wildcard)
*/
func (o ReflectRouterOption) SetScheme(scheme string) ReflectRouterOption {
o.ControllerOption.SetScheme(scheme)
return o
}
/**
* set host
* "svn.slowfei.com" || "wwww.slowfei.com" || ""(wildcard)
*/
func (o ReflectRouterOption) SetHost(host string) ReflectRouterOption {
o.ControllerOption.SetHost(host)
return o
}
/**
* checked params
*/
func (o *ReflectRouterOption) Checked() {
o.ControllerOption.Checked()
}
//
// reflect router
//
// default template parh: [host]/[routerKey]/[funcNme].[TemplateSuffix]
// "[host]/" multi-project use, lefveinServer.SetMultiProjectHosts("slowfei.com","svn.slowfei.com")
// rule:
//
// router key = "/"
// URL = GET http://localhost:8080/
// func name = Index
// template path = [host]/Index.tpl
//
// router key = "/"
// URL = POST http://localhost:8080/
// func name = PostIndex
// template path = [host]/PostIndex.tpl
//
// router key = "/"
// URL = Get http://localhost:8080/user#!list
// func name = UserList
// template path = [host]/UserList.tpl
//
// router key = "/"
// URL = Post http://localhost:8080/user[^a-zA-Z]+list[^a-zA-Z]+auto
// func name = PostUserListAuto
// template path = [host]/PostUserListAuto.tpl
//
// router key = "/admin/"
// URL = GET http://localhost:8080/admin/login
// func name = Login
// template path = [host]/admin/Login.tpl
//
// router key = "/admin/"
// URL = POST http://localhost:8080/admin/login
// func name = PostLogin
// template path = [host]/admin/PostLogin.tpl
//
//
// 控制器分的指针传递和值传递
// 值传递:
// CreateReflectController("/pointer/struct/", PointerController{})
// 每次请求(http://localhost:8080/pointer/struct/) 都会根据设置的控制器类型新建立一个对象进行处理,直到一次请求周期结束。
//
// 指针传递:
// CreateReflectController("/pointer/", new(PointerController))
// 跟值传递相反,每次请求时都会使用设置的控制器地址进行处理,应用结束也不会改变,每次请求控制器都不会改变内存地址
// 这里涉及到并发时同时使用一个内存地址处理的问题,使用时需要注意
//
type ReflectRouter struct {
routerKey string // router key
beforeAfter BeforeAfterController // implement interface
adeRouter AdeRouterController // implement interface
isBeforeAfter bool
isAdeRouter bool
ctlRefVal reflect.Value // controller reflect value
checkFuncName map[string]int // check func name map
option ReflectRouterOption
info string
typestr string
}
/**
* create reflect router controller
*
* @param routerKey "/" || "/home/" || "/admin/"
* @param controller
*/
func CreateReflectController(routerKey string, controller interface{}) IRouter {
return CreateReflectControllerWithOption(routerKey, controller, DefaultReflectRouterOption())
}
/**
* create reflect router controller with option
*
* @param option other params option
*/
func CreateReflectControllerWithOption(routerKey string, controller interface{}, option ReflectRouterOption) IRouter {
option.Checked()
strBeforeAfter := ""
strAde := ""
refRouter := new(ReflectRouter)
refRouter.routerKey = routerKey
refRouter.checkFuncName = make(map[string]int)
refRouter.ctlRefVal = reflect.ValueOf(controller)
refRouter.option = option
refRouter.isAdeRouter = false
refRouter.isBeforeAfter = false
// 使用指针类型获取所有函数,否则非指针结构获取的只能是非指针的函数
refType := reflect.New(reflect.Indirect(refRouter.ctlRefVal).Type()).Type()
if refType.Implements(RefTypeAdeRouterController) {
if reflect.Ptr == refRouter.ctlRefVal.Kind() {
refRouter.adeRouter = controller.(AdeRouterController)
}
refRouter.isAdeRouter = true
strAde = "(Implemented AdeRouterController)"
}
if refType.Implements(RefTypeBeforeAfterController) {
if reflect.Ptr == refRouter.ctlRefVal.Kind() {
refRouter.beforeAfter = controller.(BeforeAfterController)
}
refRouter.isBeforeAfter = true
strBeforeAfter = "(Implemented BeforeAfterController)"
}
for i := 0; i < refType.NumMethod(); i++ {
refMet := refType.Method(i)
funcName := refMet.Name
if funcName[0] >= 'A' && funcName[0] <= 'Z' {
refRouter.checkFuncName[funcName] = i
}
}
refRouter.typestr = refRouter.ctlRefVal.Type().String()
refRouter.info = fmt.Sprintf("ReflectRouter(%v) %v%v", refRouter.ctlRefVal.Type(), strBeforeAfter, strAde)
return refRouter
}
/**
* func name suffix handle
*
* @param funcName
* @param option
*/
func (r *ReflectRouter) funcNameSuffixHandle(funcName string, option *RouterOption) string {
result := funcName
urlSuffix := option.UrlSuffix
if 0 != len(urlSuffix) {
firstc := urlSuffix[0]
if firstc >= 'a' && firstc <= 'z' {
firstc -= 'a' - 'A'
}
first := string(firstc)
urlSuffix = first + urlSuffix[1:]
tempFunc := funcName + urlSuffix
if _, ok := r.checkFuncName[tempFunc]; ok {
result = tempFunc
}
}
return result
}
/**
* get func params
*
* @param funcType
* @param
*/
func (r *ReflectRouter) getFuncArgs(funcType reflect.Type, context *HttpContext) []reflect.Value {
argsNum := funcType.NumIn()
args := make([]reflect.Value, argsNum, argsNum)
for i := 0; i < argsNum; i++ {
in := funcType.In(i)
typeString := in.String()
var argsValue reflect.Value
switch typeString {
case "*http.Request":
argsValue = reflect.ValueOf(context.Request)
case "http.Request":
argsValue = reflect.ValueOf(context.Request).Elem()
case "*url.URL":
argsValue = reflect.ValueOf(context.Request.URL)
case "url.URL":
argsValue = reflect.ValueOf(context.Request.URL).Elem()
case "*leafveingo.HttpContext":
argsValue = reflect.ValueOf(context)
case "leafveingo.HttpContext":
argsValue = reflect.ValueOf(context).Elem()
case "[]uint8":
body := context.RequestBody()
if nil != body {
argsValue = reflect.ValueOf(body)
} else {
argsValue = reflect.Zero(in)
}
case "http.ResponseWriter":
argsValue = reflect.ValueOf(context.RespWrite)
case "LVSession.HttpSession":
session, _ := context.Session(false)
if nil != session {
argsValue = reflect.ValueOf(session)
} else {
argsValue = reflect.Zero(in)
}
default:
val, err := context.PackStructFormByRefType(in)
if nil == err {
argsValue = val
} else {
context.LVServer().Log().Debug(err.Error())
}
}
if reflect.Invalid == argsValue.Kind() {
argsValue = reflect.Zero(in)
}
args[i] = argsValue
}
return args
}
//# mark ReflectRouter override IRouter -------------------------------------------------------------------------------------------
func (r *ReflectRouter) AfterRouterParse(context *HttpContext, option *RouterOption) HttpStatus {
statusCode := Status200
scheme := r.option.Scheme()
if 0 != len(scheme) && scheme != context.RequestScheme() {
return Status404
}
if reflect.Ptr != r.ctlRefVal.Kind() {
option.RouterDataRefVal = reflect.New(r.ctlRefVal.Type())
}
return statusCode
}
func (r *ReflectRouter) ParseFuncName(context *HttpContext, option *RouterOption) (funcName string, statusCode HttpStatus, err error) {
/* 高级路由实现操作 */
if r.isAdeRouter {
var params map[string]string = nil
if reflect.Invalid != option.RouterDataRefVal.Kind() {
adeRouter := option.RouterDataRefVal.Interface().(AdeRouterController)
funcName, params = adeRouter.RouterMethodParse(option)
} else if nil != r.adeRouter {
funcName, params = r.adeRouter.RouterMethodParse(option)
}
if 0 == len(funcName) {
statusCode = Status404
} else {
statusCode = Status200
}
if 0 != len(params) {
values := context.Request.URL.Query()
for k, v := range params {
values.Set(k, v)
}
context.Request.URL.RawQuery = values.Encode()
}
return
}
statusCode = Status404
method := option.RequestMethod
reqPath := option.RouterPath
/* parse func name prefix */
funcNamePrefix := ""
if "get" != method {
firstc := method[0]
if firstc >= 'a' && firstc <= 'z' {
firstc -= 'a' - 'A'
}
first := string(firstc)
funcNamePrefix = first + method[1:]
}
/* parse func name */
// url = "http://localhost:8080/router/" router key = "/router/" || "/router"
// reqPath = "" || "/" to Default func name
if 0 == len(reqPath) || (1 == len(reqPath) && '/' == reqPath[0]) {
statusCode = Status200
funcName = r.funcNameSuffixHandle(funcNamePrefix+CONTROLLER_DEFAULT_METHOD, option)
return
}
// url = "http://localhost:8080/router/[reqPath]" router key = "/router/"
// reqPath = "list" funcName = "List"
// reqPath = "list#!json" || "list[^a-zA-Z]*json" funcName = "ListJson"
// reqPath = "list/user" funcName = "ListUser"
// reqPath = "list/user/auto" || "list[^a-zA-Z]+user[^a-zA-Z]+auto" funcName = "ListUserAuto"
nameByte := make([]byte, len(reqPath))
isUpper := true
writeIdx := 0
count := len(reqPath)
for i := 0; i < count; i++ {
c := reqPath[i]
AZ := c >= 'A' && c <= 'Z'
az := c >= 'a' && c <= 'z'
number := c >= '0' && c <= '9'
if AZ || az || number {
if isUpper {
isUpper = false
if az {
c -= 'a' - 'A'
}
}
nameByte[writeIdx] = c
writeIdx++
} else {
isUpper = true
}
}
if 0 != writeIdx {
funcName = r.funcNameSuffixHandle(funcNamePrefix+string(nameByte[:writeIdx]), option)
statusCode = Status200
} else {
statusCode = Status404
}
return
}
func (r *ReflectRouter) CallFuncBefore(context *HttpContext, option *RouterOption) HttpStatus {
statucCode := Status200
if r.isBeforeAfter {
if reflect.Invalid != option.RouterDataRefVal.Kind() {
beforeAfter := option.RouterDataRefVal.Interface().(BeforeAfterController)
statucCode = beforeAfter.Before(context, option)
} else if nil != r.beforeAfter {
statucCode = r.beforeAfter.Before(context, option)
}
}
return statucCode
}
func (r *ReflectRouter) CallFunc(context *HttpContext, funcName string, option *RouterOption) (returnValue interface{}, statusCode HttpStatus, err error) {
if index, ok := r.checkFuncName[funcName]; ok {
statusCode = Status200
var controller reflect.Value
if reflect.Invalid != option.RouterDataRefVal.Kind() {
controller = option.RouterDataRefVal
} else {
controller = r.ctlRefVal
}
refMet := controller.Method(index)
// get params
args := r.getFuncArgs(refMet.Type(), context)
// call method
reVals := refMet.Call(args)
if 0 != len(reVals) {
returnValue = reVals[0].Interface()
}
} else {
statusCode = Status404
err = NewLeafveinError("(" + r.typestr + ") not found func name: " + funcName)
}
return
}
func (r *ReflectRouter) ParseTemplatePath(context *HttpContext, funcName string, option *RouterOption) string {
if 0 == len(funcName) {
return ""
}
path := r.routerKey
pathLen := len(path)
name := funcName
nameLen := len(name)
if '/' == path[0] {
path = path[1:]
pathLen = len(path)
}
if '/' == path[pathLen-1] {
path = path[:pathLen-1]
}
if '/' == name[0] {
name = name[1:]
nameLen = len(name)
}
if '/' == name[nameLen-1] {
name = name[:nameLen-1]
}
hostPath := ""
host := context.RequestHost()
if 0 != len(host) {
hostPath = host + "/"
}
return hostPath + path + "/" + name + context.LVServer().TemplateSuffix()
}
func (r *ReflectRouter) CallFuncAfter(context *HttpContext, option *RouterOption) {
if r.isBeforeAfter {
if reflect.Invalid != option.RouterDataRefVal.Kind() {
beforeAfter := option.RouterDataRefVal.Interface().(BeforeAfterController)
beforeAfter.After(context, option)
} else if nil != r.beforeAfter {
r.beforeAfter.After(context, option)
}
}
}
func (r *ReflectRouter) RouterKey() string {
return r.routerKey
}
func (r *ReflectRouter) ControllerOption() ControllerOption {
return r.option.ControllerOption
}
func (r *ReflectRouter) Info() string {
return r.info
}
|
// Copyright 2014 slowfei And The Contributors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Create on 2014-06-30
// Update on 2014-07-08
// Email slowfei#foxmail.com
// Home http://www.slowfei.com
//
// RESTful router
//
package LVRouter
import (
"fmt"
. "github.com/slowfei/leafveingo"
"reflect"
)
//
// RESTful controller interface
//
type RESTfulController interface {
/**
* method get
*
* @param context
* @return handle return value see response_body.go
*/
Get(context *HttpContext) interface{}
/**
* method post
*
* @param context
* @return
*/
Post(context *HttpContext) interface{}
/**
* method put
*
* @param context
* @return
*/
Put(context *HttpContext) interface{}
/**
* method delete
*
* @param context
* @return
*/
Delete(context *HttpContext) interface{}
/**
* method header
*
* @param context
* @return
*/
Header(context *HttpContext) interface{}
/**
* method options
*
* @param context
* @return
*/
Options(context *HttpContext) interface{}
/**
* other method
*
* @param context
* @return
*/
Other(context *HttpContext) interface{}
}
//
// RESTful router option
//
type RESTfulRouterOption struct {
ControllerOption
}
/**
* default option
*/
func DefaultRESTfulRouterOption() RESTfulRouterOption {
option := RESTfulRouterOption{}
option.ControllerOption.SetHost("")
option.ControllerOption.SetScheme("")
return option
}
/**
* set scheme
* "http" || "https" || ""(wildcard)
*/
func (o RESTfulRouterOption) SetScheme(scheme string) RESTfulRouterOption {
o.ControllerOption.SetScheme(scheme)
return o
}
/**
* set host
* "svn.slowfei.com" || "wwww.slowfei.com" || ""(wildcard)
*/
func (o RESTfulRouterOption) SetHost(host string) RESTfulRouterOption {
o.ControllerOption.SetHost(host)
return o
}
/**
* checked params
*/
func (o *RESTfulRouterOption) Checked() {
o.ControllerOption.Checked()
}
//
// RESTful router
//
// default template parh: [host]/[routerKey]/[funcNme].[TemplateSuffix]
// "[host]/" multi-project use, lefveinServer.SetMultiProjectHosts("slowfei.com","svn.slowfei.com")
// rule:
//
// router key = "/api/object"
// URL = GET http://localhost:8080/api/object
// func name = get
// template path = [host]/api/object/get.tpl
//
// router key = "/api/object"
// URL = POST http://localhost:8080/api/object
// func name = post
// template path = [host]/api/object/post.tpl
//
// router key = "/api/object"
// URL = PUT http://localhost:8080/api/object
// func name = put
// template path = [host]/api/object/put.tpl
//
// router key = "/api/object"
// URL = DELETE http://localhost:8080/api/object
// func name = delete
// template path = [host]/api/object/delete.tpl
//
//
// url params: implement AdeRouterController interface resolve on their own
//
// 控制器分的指针传递和值传递
// 值传递:
// CreateReflectController("/pointer/struct/", PointerController{})
// 每次请求(http://localhost:8080/pointer/struct/) 都会根据设置的控制器类型新建立一个对象进行处理,直到一次请求周期结束。
//
// 指针传递:
// CreateReflectController("/pointer/", new(PointerController))
// 跟值传递相反,每次请求时都会使用设置的控制器地址进行处理,应用结束也不会改变,每次请求控制器都不会改变内存地址
// 这里涉及到并发时同时使用一个内存地址处理的问题,使用时需要注意
//
type RESTfulRouter struct {
routerKey string // router key
beforeAfter BeforeAfterController // implement interface
adeRouter AdeRouterController // implement interface
isBeforeAfter bool //
isAdeRouter bool //
controller RESTfulController //
ctlType reflect.Type //
option RESTfulRouterOption
info string
}
/**
* create RESTful router controller
*
* @param routerKey "/" || "/home/" || "/admin/"
* @param controller
*/
func CreateRESTfulController(routerKey string, controller RESTfulController) IRouter {
return CreateRESTfulControllerWithOption(routerKey, controller, DefaultRESTfulRouterOption())
}
/**
* create RESTful router controller with option
*
* @param option other params option
*/
func CreateRESTfulControllerWithOption(routerKey string, controller RESTfulController, option RESTfulRouterOption) IRouter {
option.Checked()
strBeforeAfter := ""
strAde := ""
router := new(RESTfulRouter)
router.routerKey = routerKey
router.option = option
router.controller = controller
router.ctlType = reflect.TypeOf(controller)
// 使用指针类型获取所有函数,否则非指针结构获取的只能是非指针的函数
refType := reflect.New(reflect.Indirect(reflect.ValueOf(controller)).Type()).Type()
if refType.Implements(RefTypeAdeRouterController) {
if reflect.Ptr == router.ctlType.Kind() {
router.adeRouter = controller.(AdeRouterController)
}
router.isAdeRouter = true
strAde = "(Implemented AdeRouterController)"
}
if refType.Implements(RefTypeBeforeAfterController) {
if reflect.Ptr == router.ctlType.Kind() {
router.beforeAfter = controller.(BeforeAfterController)
}
router.isBeforeAfter = true
strBeforeAfter = "(Implemented BeforeAfterController)"
}
router.info = fmt.Sprintf("RESTfulRouter(%v) %v%v", router.ctlType, strBeforeAfter, strAde)
return router
}
//# mark RESTfulRouter override IRouter -------------------------------------------------------------------------------------------
func (r *RESTfulRouter) AfterRouterParse(context *HttpContext, option *RouterOption) HttpStatus {
statusCode := Status404
scheme := r.option.Scheme()
if 0 != len(scheme) && scheme != context.RequestScheme() {
return Status404
}
if reflect.Ptr != r.ctlType.Kind() {
option.RouterData = reflect.New(r.ctlType).Interface()
}
return statusCode
}
func (r *RESTfulRouter) ParseFuncName(context *HttpContext, option *RouterOption) (funcName string, statusCode HttpStatus, err error) {
/* 高级路由实现操作 */
if r.isAdeRouter {
var params map[string]string = nil
if nil != option.RouterData {
adeRouter := option.RouterData.(AdeRouterController)
funcName, params = adeRouter.RouterMethodParse(option)
} else if nil != r.adeRouter {
funcName, params = r.adeRouter.RouterMethodParse(option)
}
if 0 == len(funcName) {
statusCode = Status404
} else {
statusCode = Status200
}
if 0 != len(params) {
values := context.Request.URL.Query()
for k, v := range params {
values.Set(k, v)
}
context.Request.URL.RawQuery = values.Encode()
}
return
}
funcName = option.RequestMethod
statusCode = Status200
return
}
func (r *RESTfulRouter) CallFuncBefore(context *HttpContext, option *RouterOption) HttpStatus {
statucCode := Status200
if r.isBeforeAfter {
if nil != option.RouterData {
beforeAfter := option.RouterData.(BeforeAfterController)
statucCode = beforeAfter.Before(context, option)
} else if nil != r.beforeAfter {
statucCode = r.beforeAfter.Before(context, option)
}
}
return statucCode
}
func (r *RESTfulRouter) CallFunc(context *HttpContext, funcName string, option *RouterOption) (returnValue interface{}, statusCode HttpStatus, err error) {
var controller RESTfulController = nil
if nil != option.RouterData {
controller = option.RouterData.(RESTfulController)
} else {
controller = r.controller
}
switch funcName {
case "get":
returnValue = controller.Get(context)
case "post":
returnValue = controller.Post(context)
case "put":
returnValue = controller.Put(context)
case "delete":
returnValue = controller.Delete(context)
case "header":
returnValue = controller.Header(context)
case "options":
returnValue = controller.Options(context)
default:
returnValue = controller.Other(context)
}
statusCode = Status200
return
}
func (r *RESTfulRouter) ParseTemplatePath(context *HttpContext, funcName string, option *RouterOption) string {
if 0 == len(funcName) {
return ""
}
path := r.routerKey
pathLen := len(path)
name := funcName
nameLen := len(name)
if '/' == path[0] {
path = path[1:]
pathLen = len(path)
}
if '/' == path[pathLen-1] {
path = path[:pathLen-1]
}
if '/' == name[0] {
name = name[1:]
nameLen = len(name)
}
if '/' == name[nameLen-1] {
name = name[:nameLen-1]
}
hostPath := ""
host := context.RequestHost()
if 0 != len(host) {
hostPath = host + "/"
}
return hostPath + path + "/" + name + context.LVServer().TemplateSuffix()
}
func (r *RESTfulRouter) CallFuncAfter(context *HttpContext, option *RouterOption) {
if r.isBeforeAfter {
if nil != option.RouterData {
beforeAfter := option.RouterData.(BeforeAfterController)
beforeAfter.After(context, option)
} else if nil != r.beforeAfter {
r.beforeAfter.After(context, option)
}
}
}
func (r *RESTfulRouter) RouterKey() string {
return r.routerKey
}
func (r *RESTfulRouter) ControllerOption() ControllerOption {
return r.option.ControllerOption
}
func (r *RESTfulRouter) Info() string {
return r.info
}
修正CreateRESTfulController值控制器的接收
// Copyright 2014 slowfei And The Contributors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Create on 2014-06-30
// Update on 2014-07-08
// Email slowfei#foxmail.com
// Home http://www.slowfei.com
//
// RESTful router
//
package LVRouter
import (
"errors"
"fmt"
. "github.com/slowfei/leafveingo"
"reflect"
)
//
// RESTful controller interface
//
type RESTfulController interface {
/**
* method get
*
* @param context
* @return handle return value see response_body.go
*/
Get(context *HttpContext) interface{}
/**
* method post
*
* @param context
* @return
*/
Post(context *HttpContext) interface{}
/**
* method put
*
* @param context
* @return
*/
Put(context *HttpContext) interface{}
/**
* method delete
*
* @param context
* @return
*/
Delete(context *HttpContext) interface{}
/**
* method header
*
* @param context
* @return
*/
Header(context *HttpContext) interface{}
/**
* method options
*
* @param context
* @return
*/
Options(context *HttpContext) interface{}
/**
* other method
*
* @param context
* @return
*/
Other(context *HttpContext) interface{}
}
//
// RESTful router option
//
type RESTfulRouterOption struct {
ControllerOption
}
/**
* default option
*/
func DefaultRESTfulRouterOption() RESTfulRouterOption {
option := RESTfulRouterOption{}
option.ControllerOption.SetHost("")
option.ControllerOption.SetScheme("")
return option
}
/**
* set scheme
* "http" || "https" || ""(wildcard)
*/
func (o RESTfulRouterOption) SetScheme(scheme string) RESTfulRouterOption {
o.ControllerOption.SetScheme(scheme)
return o
}
/**
* set host
* "svn.slowfei.com" || "wwww.slowfei.com" || ""(wildcard)
*/
func (o RESTfulRouterOption) SetHost(host string) RESTfulRouterOption {
o.ControllerOption.SetHost(host)
return o
}
/**
* checked params
*/
func (o *RESTfulRouterOption) Checked() {
o.ControllerOption.Checked()
}
//
// RESTful router
//
// default template parh: [host]/[routerKey]/[funcNme].[TemplateSuffix]
// "[host]/" multi-project use, lefveinServer.SetMultiProjectHosts("slowfei.com","svn.slowfei.com")
// rule:
//
// router key = "/api/object"
// URL = GET http://localhost:8080/api/object
// func name = get
// template path = [host]/api/object/get.tpl
//
// router key = "/api/object"
// URL = POST http://localhost:8080/api/object
// func name = post
// template path = [host]/api/object/post.tpl
//
// router key = "/api/object"
// URL = PUT http://localhost:8080/api/object
// func name = put
// template path = [host]/api/object/put.tpl
//
// router key = "/api/object"
// URL = DELETE http://localhost:8080/api/object
// func name = delete
// template path = [host]/api/object/delete.tpl
//
//
// url params: implement AdeRouterController interface resolve on their own
//
// 控制器分的指针传递和值传递
// 值传递:
// CreateReflectController("/pointer/struct/", PointerController{})
// 每次请求(http://localhost:8080/pointer/struct/) 都会根据设置的控制器类型新建立一个对象进行处理,直到一次请求周期结束。
//
// 指针传递:
// CreateReflectController("/pointer/", new(PointerController))
// 跟值传递相反,每次请求时都会使用设置的控制器地址进行处理,应用结束也不会改变,每次请求控制器都不会改变内存地址
// 这里涉及到并发时同时使用一个内存地址处理的问题,使用时需要注意
//
type RESTfulRouter struct {
routerKey string // router key
beforeAfter BeforeAfterController // implement interface
adeRouter AdeRouterController // implement interface
isBeforeAfter bool //
isAdeRouter bool //
controller RESTfulController //
ctlType reflect.Type //
option RESTfulRouterOption
info string
}
/**
* create RESTful router controller
*
* @param routerKey "/" || "/home/" || "/admin/"
* @param controller
*/
func CreateRESTfulController(routerKey string, controller interface{}) IRouter {
return CreateRESTfulControllerWithOption(routerKey, controller, DefaultRESTfulRouterOption())
}
/**
* create RESTful router controller with option
*
* @param option other params option
*/
func CreateRESTfulControllerWithOption(routerKey string, controller interface{}, option RESTfulRouterOption) IRouter {
option.Checked()
strBeforeAfter := ""
strAde := ""
ok := false
newRefController := reflect.New(reflect.Indirect(reflect.ValueOf(controller)).Type())
router := new(RESTfulRouter)
router.routerKey = routerKey
router.option = option
router.ctlType = reflect.TypeOf(controller)
router.controller, ok = newRefController.Interface().(RESTfulController)
if !ok {
panic(errors.New(fmt.Sprintf("%v does not implement RESTfulController method has pointer receiver", router.ctlType.String())))
}
// 使用指针类型获取所有函数,否则非指针结构获取的只能是非指针的函数
refType := newRefController.Type()
if refType.Implements(RefTypeAdeRouterController) {
if reflect.Ptr == router.ctlType.Kind() {
router.adeRouter = controller.(AdeRouterController)
}
router.isAdeRouter = true
strAde = "(Implemented AdeRouterController)"
}
if refType.Implements(RefTypeBeforeAfterController) {
if reflect.Ptr == router.ctlType.Kind() {
router.beforeAfter = controller.(BeforeAfterController)
}
router.isBeforeAfter = true
strBeforeAfter = "(Implemented BeforeAfterController)"
}
router.info = fmt.Sprintf("RESTfulRouter(%v) %v%v", router.ctlType, strBeforeAfter, strAde)
return router
}
//# mark RESTfulRouter override IRouter -------------------------------------------------------------------------------------------
func (r *RESTfulRouter) AfterRouterParse(context *HttpContext, option *RouterOption) HttpStatus {
statusCode := Status404
scheme := r.option.Scheme()
if 0 != len(scheme) && scheme != context.RequestScheme() {
return Status404
}
if reflect.Ptr != r.ctlType.Kind() {
option.RouterData = reflect.New(r.ctlType).Interface()
}
return statusCode
}
func (r *RESTfulRouter) ParseFuncName(context *HttpContext, option *RouterOption) (funcName string, statusCode HttpStatus, err error) {
/* 高级路由实现操作 */
if r.isAdeRouter {
var params map[string]string = nil
if nil != option.RouterData {
adeRouter := option.RouterData.(AdeRouterController)
funcName, params = adeRouter.RouterMethodParse(option)
} else if nil != r.adeRouter {
funcName, params = r.adeRouter.RouterMethodParse(option)
}
if 0 == len(funcName) {
statusCode = Status404
} else {
statusCode = Status200
}
if 0 != len(params) {
values := context.Request.URL.Query()
for k, v := range params {
values.Set(k, v)
}
context.Request.URL.RawQuery = values.Encode()
}
return
}
funcName = option.RequestMethod
statusCode = Status200
return
}
func (r *RESTfulRouter) CallFuncBefore(context *HttpContext, option *RouterOption) HttpStatus {
statucCode := Status200
if r.isBeforeAfter {
if nil != option.RouterData {
beforeAfter := option.RouterData.(BeforeAfterController)
statucCode = beforeAfter.Before(context, option)
} else if nil != r.beforeAfter {
statucCode = r.beforeAfter.Before(context, option)
}
}
return statucCode
}
func (r *RESTfulRouter) CallFunc(context *HttpContext, funcName string, option *RouterOption) (returnValue interface{}, statusCode HttpStatus, err error) {
var controller RESTfulController = nil
if nil != option.RouterData {
controller = option.RouterData.(RESTfulController)
} else {
controller = r.controller
}
switch funcName {
case "get":
returnValue = controller.Get(context)
case "post":
returnValue = controller.Post(context)
case "put":
returnValue = controller.Put(context)
case "delete":
returnValue = controller.Delete(context)
case "header":
returnValue = controller.Header(context)
case "options":
returnValue = controller.Options(context)
default:
returnValue = controller.Other(context)
}
statusCode = Status200
return
}
func (r *RESTfulRouter) ParseTemplatePath(context *HttpContext, funcName string, option *RouterOption) string {
if 0 == len(funcName) {
return ""
}
path := r.routerKey
pathLen := len(path)
name := funcName
nameLen := len(name)
if '/' == path[0] {
path = path[1:]
pathLen = len(path)
}
if '/' == path[pathLen-1] {
path = path[:pathLen-1]
}
if '/' == name[0] {
name = name[1:]
nameLen = len(name)
}
if '/' == name[nameLen-1] {
name = name[:nameLen-1]
}
hostPath := ""
host := context.RequestHost()
if 0 != len(host) {
hostPath = host + "/"
}
return hostPath + path + "/" + name + context.LVServer().TemplateSuffix()
}
func (r *RESTfulRouter) CallFuncAfter(context *HttpContext, option *RouterOption) {
if r.isBeforeAfter {
if nil != option.RouterData {
beforeAfter := option.RouterData.(BeforeAfterController)
beforeAfter.After(context, option)
} else if nil != r.beforeAfter {
r.beforeAfter.After(context, option)
}
}
}
func (r *RESTfulRouter) RouterKey() string {
return r.routerKey
}
func (r *RESTfulRouter) ControllerOption() ControllerOption {
return r.option.ControllerOption
}
func (r *RESTfulRouter) Info() string {
return r.info
}
|
package rpcclient
import (
"context"
"encoding/json"
"fmt"
"math"
"net"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
types "github.com/tendermint/tendermint/rpc/lib/types"
cmn "github.com/tendermint/tmlibs/common"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the server.
pongWait = 30 * time.Second
// Send pings to server with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum reconnect attempts
maxReconnectAttempts = 25
)
type WSClient struct {
cmn.BaseService
conn *websocket.Conn
Address string // IP:PORT or /path/to/socket
Endpoint string // /websocket/url/endpoint
Dialer func(string, string) (net.Conn, error)
PingPongLatencyTimer metrics.Timer
sentLastPingAt time.Time
// user facing channels, closed only when the client is being stopped.
ResultsCh chan json.RawMessage
ErrorsCh chan error
// internal channels
send chan types.RPCRequest // user requests
backlog chan types.RPCRequest // stores a single user request received during a conn failure
reconnectAfter chan error // reconnect requests
receiveRoutineQuit chan struct{} // a way for receiveRoutine to close writeRoutine
reconnecting bool
wg sync.WaitGroup
mtx sync.RWMutex
}
// NewWSClient returns a new client.
func NewWSClient(remoteAddr, endpoint string) *WSClient {
addr, dialer := makeHTTPDialer(remoteAddr)
wsClient := &WSClient{
Address: addr,
Dialer: dialer,
Endpoint: endpoint,
PingPongLatencyTimer: metrics.NewTimer(),
}
wsClient.BaseService = *cmn.NewBaseService(nil, "WSClient", wsClient)
return wsClient
}
// String returns WS client full address.
func (c *WSClient) String() string {
return fmt.Sprintf("%s (%s)", c.Address, c.Endpoint)
}
// OnStart implements cmn.Service by dialing a server and creating read and
// write routines.
func (c *WSClient) OnStart() error {
err := c.dial()
if err != nil {
return err
}
c.ResultsCh = make(chan json.RawMessage)
c.ErrorsCh = make(chan error)
c.send = make(chan types.RPCRequest)
// 1 additional error may come from the read/write
// goroutine depending on which failed first.
c.reconnectAfter = make(chan error, 1)
// capacity for 1 request. a user won't be able to send more because the send
// channel is unbuffered.
c.backlog = make(chan types.RPCRequest, 1)
c.startReadWriteRoutines()
go c.reconnectRoutine()
return nil
}
// OnStop implements cmn.Service.
func (c *WSClient) OnStop() {}
// Stop overrides cmn.Service#Stop. There is no other way to wait until Quit
// channel is closed.
func (c *WSClient) Stop() bool {
success := c.BaseService.Stop()
// only close user-facing channels when we can't write to them
c.wg.Wait()
close(c.ResultsCh)
close(c.ErrorsCh)
return success
}
// IsReconnecting returns true if the client is reconnecting right now.
func (c *WSClient) IsReconnecting() bool {
return c.reconnecting
}
// IsActive returns true if the client is running and not reconnecting.
func (c *WSClient) IsActive() bool {
return c.IsRunning() && !c.IsReconnecting()
}
// Send asynchronously sends the given RPCRequest to the server. Results will
// be available on ResultsCh, errors, if any, on ErrorsCh.
func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error {
select {
case c.send <- request:
c.Logger.Info("sent a request", "req", request)
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// Call asynchronously calls a given method by sending an RPCRequest to the
// server. Results will be available on ResultsCh, errors, if any, on ErrorsCh.
func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error {
request, err := types.MapToRequest("", method, params)
if err != nil {
return err
}
return c.Send(ctx, request)
}
// CallWithArrayParams asynchronously calls a given method by sending an
// RPCRequest to the server. Results will be available on ResultsCh, errors, if
// any, on ErrorsCh.
func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error {
request, err := types.ArrayToRequest("", method, params)
if err != nil {
return err
}
return c.Send(ctx, request)
}
///////////////////////////////////////////////////////////////////////////////
// Private methods
func (c *WSClient) dial() error {
dialer := &websocket.Dialer{
NetDial: c.Dialer,
Proxy: http.ProxyFromEnvironment,
}
rHeader := http.Header{}
conn, _, err := dialer.Dial("ws://"+c.Address+c.Endpoint, rHeader)
if err != nil {
return err
}
c.conn = conn
return nil
}
// reconnect tries to redial up to maxReconnectAttempts with exponential
// backoff.
func (c *WSClient) reconnect() error {
attempt := 0
c.reconnecting = true
defer func() {
c.reconnecting = false
}()
for {
c.Logger.Info("reconnecting", "attempt", attempt+1)
d := time.Duration(math.Exp2(float64(attempt)))
time.Sleep(d * time.Second)
err := c.dial()
if err != nil {
c.Logger.Error("failed to redial", "err", err)
} else {
c.Logger.Info("reconnected")
return nil
}
attempt++
if attempt > maxReconnectAttempts {
return errors.Wrap(err, "reached maximum reconnect attempts")
}
}
}
func (c *WSClient) startReadWriteRoutines() {
c.wg.Add(2)
c.receiveRoutineQuit = make(chan struct{})
go c.receiveRoutine()
go c.writeRoutine()
}
func (c *WSClient) reconnectRoutine() {
for {
select {
case originalError := <-c.reconnectAfter:
// wait until writeRoutine and receiveRoutine finish
c.wg.Wait()
err := c.reconnect()
if err != nil {
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
c.Stop()
return
} else {
// drain reconnectAfter
LOOP:
for {
select {
case <-c.reconnectAfter:
default:
break LOOP
}
}
c.startReadWriteRoutines()
return
}
case <-c.Quit:
return
}
}
}
// The client ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (c *WSClient) writeRoutine() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
c.conn.Close()
c.wg.Done()
}()
for {
select {
case request := <-c.backlog:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteJSON(request)
if err != nil {
c.Logger.Error("failed to resend request", "err", err)
c.reconnectAfter <- err
// add request to the backlog, so we don't lose it
c.backlog <- request
return
}
c.Logger.Info("resend a request", "req", request)
case request := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteJSON(request)
if err != nil {
c.Logger.Error("failed to send request", "err", err)
c.reconnectAfter <- err
// add request to the backlog, so we don't lose it
c.backlog <- request
return
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteMessage(websocket.PingMessage, []byte{})
if err != nil {
c.Logger.Error("failed to write ping", "err", err)
c.reconnectAfter <- err
return
}
c.mtx.Lock()
c.sentLastPingAt = time.Now()
c.mtx.Unlock()
c.Logger.Debug("sent ping")
case <-c.receiveRoutineQuit:
return
case <-c.Quit:
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
}
}
// The client ensures that there is at most one reader to a connection by
// executing all reads from this goroutine.
func (c *WSClient) receiveRoutine() {
defer func() {
c.conn.Close()
c.wg.Done()
}()
c.conn.SetReadDeadline(time.Now().Add(pongWait))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(pongWait))
c.mtx.RLock()
c.PingPongLatencyTimer.UpdateSince(c.sentLastPingAt)
c.mtx.RUnlock()
c.Logger.Debug("got pong")
return nil
})
for {
_, data, err := c.conn.ReadMessage()
if err != nil {
if !websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
return
}
c.Logger.Error("failed to read response", "err", err)
close(c.receiveRoutineQuit)
c.reconnectAfter <- err
return
}
var response types.RPCResponse
err = json.Unmarshal(data, &response)
if err != nil {
c.Logger.Error("failed to parse response", "err", err, "data", string(data))
c.ErrorsCh <- err
continue
}
if response.Error != "" {
c.ErrorsCh <- errors.Errorf(response.Error)
continue
}
c.Logger.Info("got response", "resp", response.Result)
c.ResultsCh <- *response.Result
}
}
///////////////////////////////////////////////////////////////////////////////
// Predefined methods
// Subscribe to an event. Note the server must have a "subscribe" route
// defined.
func (c *WSClient) Subscribe(ctx context.Context, eventType string) error {
params := map[string]interface{}{"event": eventType}
return c.Call(ctx, "subscribe", params)
}
// Unsubscribe from an event. Note the server must have a "unsubscribe" route
// defined.
func (c *WSClient) Unsubscribe(ctx context.Context, eventType string) error {
params := map[string]interface{}{"event": eventType}
return c.Call(ctx, "unsubscribe", params)
}
// UnsubscribeAll from all. Note the server must have a "unsubscribe_all" route
// defined.
func (c *WSClient) UnsubscribeAll(ctx context.Context) error {
params := map[string]interface{}{}
return c.Call(ctx, "unsubscribe_all", params)
}
do not exit from reconnectRoutine!
package rpcclient
import (
"context"
"encoding/json"
"fmt"
"math"
"net"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
types "github.com/tendermint/tendermint/rpc/lib/types"
cmn "github.com/tendermint/tmlibs/common"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the server.
pongWait = 30 * time.Second
// Send pings to server with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum reconnect attempts
maxReconnectAttempts = 25
)
type WSClient struct {
cmn.BaseService
conn *websocket.Conn
Address string // IP:PORT or /path/to/socket
Endpoint string // /websocket/url/endpoint
Dialer func(string, string) (net.Conn, error)
PingPongLatencyTimer metrics.Timer
sentLastPingAt time.Time
// user facing channels, closed only when the client is being stopped.
ResultsCh chan json.RawMessage
ErrorsCh chan error
// internal channels
send chan types.RPCRequest // user requests
backlog chan types.RPCRequest // stores a single user request received during a conn failure
reconnectAfter chan error // reconnect requests
receiveRoutineQuit chan struct{} // a way for receiveRoutine to close writeRoutine
reconnecting bool
wg sync.WaitGroup
mtx sync.RWMutex
}
// NewWSClient returns a new client.
func NewWSClient(remoteAddr, endpoint string) *WSClient {
addr, dialer := makeHTTPDialer(remoteAddr)
wsClient := &WSClient{
Address: addr,
Dialer: dialer,
Endpoint: endpoint,
PingPongLatencyTimer: metrics.NewTimer(),
}
wsClient.BaseService = *cmn.NewBaseService(nil, "WSClient", wsClient)
return wsClient
}
// String returns WS client full address.
func (c *WSClient) String() string {
return fmt.Sprintf("%s (%s)", c.Address, c.Endpoint)
}
// OnStart implements cmn.Service by dialing a server and creating read and
// write routines.
func (c *WSClient) OnStart() error {
err := c.dial()
if err != nil {
return err
}
c.ResultsCh = make(chan json.RawMessage)
c.ErrorsCh = make(chan error)
c.send = make(chan types.RPCRequest)
// 1 additional error may come from the read/write
// goroutine depending on which failed first.
c.reconnectAfter = make(chan error, 1)
// capacity for 1 request. a user won't be able to send more because the send
// channel is unbuffered.
c.backlog = make(chan types.RPCRequest, 1)
c.startReadWriteRoutines()
go c.reconnectRoutine()
return nil
}
// OnStop implements cmn.Service.
func (c *WSClient) OnStop() {}
// Stop overrides cmn.Service#Stop. There is no other way to wait until Quit
// channel is closed.
func (c *WSClient) Stop() bool {
success := c.BaseService.Stop()
// only close user-facing channels when we can't write to them
c.wg.Wait()
close(c.ResultsCh)
close(c.ErrorsCh)
return success
}
// IsReconnecting returns true if the client is reconnecting right now.
func (c *WSClient) IsReconnecting() bool {
return c.reconnecting
}
// IsActive returns true if the client is running and not reconnecting.
func (c *WSClient) IsActive() bool {
return c.IsRunning() && !c.IsReconnecting()
}
// Send asynchronously sends the given RPCRequest to the server. Results will
// be available on ResultsCh, errors, if any, on ErrorsCh.
func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error {
select {
case c.send <- request:
c.Logger.Info("sent a request", "req", request)
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// Call asynchronously calls a given method by sending an RPCRequest to the
// server. Results will be available on ResultsCh, errors, if any, on ErrorsCh.
func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error {
request, err := types.MapToRequest("", method, params)
if err != nil {
return err
}
return c.Send(ctx, request)
}
// CallWithArrayParams asynchronously calls a given method by sending an
// RPCRequest to the server. Results will be available on ResultsCh, errors, if
// any, on ErrorsCh.
func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error {
request, err := types.ArrayToRequest("", method, params)
if err != nil {
return err
}
return c.Send(ctx, request)
}
///////////////////////////////////////////////////////////////////////////////
// Private methods
func (c *WSClient) dial() error {
dialer := &websocket.Dialer{
NetDial: c.Dialer,
Proxy: http.ProxyFromEnvironment,
}
rHeader := http.Header{}
conn, _, err := dialer.Dial("ws://"+c.Address+c.Endpoint, rHeader)
if err != nil {
return err
}
c.conn = conn
return nil
}
// reconnect tries to redial up to maxReconnectAttempts with exponential
// backoff.
func (c *WSClient) reconnect() error {
attempt := 0
c.reconnecting = true
defer func() {
c.reconnecting = false
}()
for {
c.Logger.Info("reconnecting", "attempt", attempt+1)
d := time.Duration(math.Exp2(float64(attempt)))
time.Sleep(d * time.Second)
err := c.dial()
if err != nil {
c.Logger.Error("failed to redial", "err", err)
} else {
c.Logger.Info("reconnected")
return nil
}
attempt++
if attempt > maxReconnectAttempts {
return errors.Wrap(err, "reached maximum reconnect attempts")
}
}
}
func (c *WSClient) startReadWriteRoutines() {
c.wg.Add(2)
c.receiveRoutineQuit = make(chan struct{})
go c.receiveRoutine()
go c.writeRoutine()
}
func (c *WSClient) reconnectRoutine() {
for {
select {
case originalError := <-c.reconnectAfter:
// wait until writeRoutine and receiveRoutine finish
c.wg.Wait()
err := c.reconnect()
if err != nil {
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
c.Stop()
return
} else {
// drain reconnectAfter
LOOP:
for {
select {
case <-c.reconnectAfter:
default:
break LOOP
}
}
c.startReadWriteRoutines()
}
case <-c.Quit:
return
}
}
}
// The client ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (c *WSClient) writeRoutine() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
c.conn.Close()
c.wg.Done()
}()
for {
select {
case request := <-c.backlog:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteJSON(request)
if err != nil {
c.Logger.Error("failed to resend request", "err", err)
c.reconnectAfter <- err
// add request to the backlog, so we don't lose it
c.backlog <- request
return
}
c.Logger.Info("resend a request", "req", request)
case request := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteJSON(request)
if err != nil {
c.Logger.Error("failed to send request", "err", err)
c.reconnectAfter <- err
// add request to the backlog, so we don't lose it
c.backlog <- request
return
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
err := c.conn.WriteMessage(websocket.PingMessage, []byte{})
if err != nil {
c.Logger.Error("failed to write ping", "err", err)
c.reconnectAfter <- err
return
}
c.mtx.Lock()
c.sentLastPingAt = time.Now()
c.mtx.Unlock()
c.Logger.Debug("sent ping")
case <-c.receiveRoutineQuit:
return
case <-c.Quit:
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
}
}
// The client ensures that there is at most one reader to a connection by
// executing all reads from this goroutine.
func (c *WSClient) receiveRoutine() {
defer func() {
c.conn.Close()
c.wg.Done()
}()
c.conn.SetReadDeadline(time.Now().Add(pongWait))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(pongWait))
c.mtx.RLock()
c.PingPongLatencyTimer.UpdateSince(c.sentLastPingAt)
c.mtx.RUnlock()
c.Logger.Debug("got pong")
return nil
})
for {
_, data, err := c.conn.ReadMessage()
if err != nil {
if !websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
return
}
c.Logger.Error("failed to read response", "err", err)
close(c.receiveRoutineQuit)
c.reconnectAfter <- err
return
}
var response types.RPCResponse
err = json.Unmarshal(data, &response)
if err != nil {
c.Logger.Error("failed to parse response", "err", err, "data", string(data))
c.ErrorsCh <- err
continue
}
if response.Error != "" {
c.ErrorsCh <- errors.Errorf(response.Error)
continue
}
c.Logger.Info("got response", "resp", response.Result)
c.ResultsCh <- *response.Result
}
}
///////////////////////////////////////////////////////////////////////////////
// Predefined methods
// Subscribe to an event. Note the server must have a "subscribe" route
// defined.
func (c *WSClient) Subscribe(ctx context.Context, eventType string) error {
params := map[string]interface{}{"event": eventType}
return c.Call(ctx, "subscribe", params)
}
// Unsubscribe from an event. Note the server must have a "unsubscribe" route
// defined.
func (c *WSClient) Unsubscribe(ctx context.Context, eventType string) error {
params := map[string]interface{}{"event": eventType}
return c.Call(ctx, "unsubscribe", params)
}
// UnsubscribeAll from all. Note the server must have a "unsubscribe_all" route
// defined.
func (c *WSClient) UnsubscribeAll(ctx context.Context) error {
params := map[string]interface{}{}
return c.Call(ctx, "unsubscribe_all", params)
}
|
package phraseapp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"time"
)
type AffectedCount struct {
RecordsAffected int64 `json:"records_affected"`
}
type AffectedResources struct {
RecordsAffected int64 `json:"records_affected"`
}
type Authorization struct {
CreatedAt *time.Time `json:"created_at"`
ExpiresAt *time.Time `json:"expires_at"`
HashedToken string `json:"hashed_token"`
ID string `json:"id"`
Note string `json:"note"`
Scopes []string `json:"scopes"`
TokenLastEight string `json:"token_last_eight"`
UpdatedAt *time.Time `json:"updated_at"`
}
type AuthorizationWithToken struct {
Authorization
Token string `json:"token"`
}
type BlacklistedKey struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type Comment struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Message string `json:"message"`
UpdatedAt *time.Time `json:"updated_at"`
User *UserPreview `json:"user"`
}
type Format struct {
ApiName string `json:"api_name"`
DefaultEncoding string `json:"default_encoding"`
DefaultFile string `json:"default_file"`
Description string `json:"description"`
Exportable bool `json:"exportable"`
Extension string `json:"extension"`
Importable bool `json:"importable"`
Name string `json:"name"`
}
type KeyPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Plural bool `json:"plural"`
}
type Locale struct {
Code string `json:"code"`
CreatedAt *time.Time `json:"created_at"`
Default bool `json:"default"`
ID string `json:"id"`
Main bool `json:"main"`
Name string `json:"name"`
PluralForms []string `json:"plural_forms"`
Rtl bool `json:"rtl"`
SourceLocale *LocalePreview `json:"source_locale"`
UpdatedAt *time.Time `json:"updated_at"`
}
type LocaleDetails struct {
Locale
Statistics *LocaleStatistics `json:"statistics"`
}
type LocalePreview struct {
Code string `json:"code"`
ID string `json:"id"`
Name string `json:"name"`
}
type LocaleStatistics struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
MissingWordsCount int64 `json:"missing_words_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
UnverifiedWordsCount int64 `json:"unverified_words_count"`
WordsTotalCount int64 `json:"words_total_count"`
}
type Project struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
MainFormat string `json:"main_format"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type ProjectDetails struct {
Project
SharesTranslationMemory bool `json:"shares_translation_memory"`
}
type StatisticsListItem struct {
Locale *LocalePreview `json:"locale"`
Statistics StatisticsType `json:"statistics"`
}
type StatisticsType struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
}
type Styleguide struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Title string `json:"title"`
UpdatedAt *time.Time `json:"updated_at"`
}
type StyleguideDetails struct {
Styleguide
Audience string `json:"audience"`
Business string `json:"business"`
CompanyBranding string `json:"company_branding"`
Formatting string `json:"formatting"`
GlossaryTerms string `json:"glossary_terms"`
GrammarConsistency string `json:"grammar_consistency"`
GrammaticalPerson string `json:"grammatical_person"`
LiteralTranslation string `json:"literal_translation"`
OverallTone string `json:"overall_tone"`
PublicUrl string `json:"public_url"`
Samples string `json:"samples"`
TargetAudience string `json:"target_audience"`
VocabularyType string `json:"vocabulary_type"`
}
type StyleguidePreview struct {
ID string `json:"id"`
Title string `json:"title"`
}
type SummaryType struct {
LocalesCreated int64 `json:"locales_created"`
TagsCreated int64 `json:"tags_created"`
TranslationKeysCreated int64 `json:"translation_keys_created"`
TranslationsCreated int64 `json:"translations_created"`
TranslationsUpdated int64 `json:"translations_updated"`
}
type Tag struct {
CreatedAt *time.Time `json:"created_at"`
KeysCount int64 `json:"keys_count"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TagWithStats struct {
Tag
Statistics []*StatisticsListItem `json:"statistics"`
}
type Translation struct {
Content string `json:"content"`
CreatedAt *time.Time `json:"created_at"`
Excluded bool `json:"excluded"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
Placeholders []string `json:"placeholders"`
PluralSuffix string `json:"plural_suffix"`
Unverified bool `json:"unverified"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationDetails struct {
Translation
User *UserPreview `json:"user"`
WordCount int64 `json:"word_count"`
}
type TranslationKey struct {
CreatedAt *time.Time `json:"created_at"`
DataType string `json:"data_type"`
Description string `json:"description"`
ID string `json:"id"`
Name string `json:"name"`
NameHash string `json:"name_hash"`
Plural bool `json:"plural"`
Tags []string `json:"tags"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationKeyDetails struct {
TranslationKey
CommentsCount int64 `json:"comments_count"`
FormatValueType string `json:"format_value_type"`
MaxCharactersAllowed int64 `json:"max_characters_allowed"`
NamePlural string `json:"name_plural"`
OriginalFile string `json:"original_file"`
ScreenshotUrl string `json:"screenshot_url"`
Unformatted bool `json:"unformatted"`
XmlSpacePreserve bool `json:"xml_space_preserve"`
}
type TranslationOrder struct {
AmountInCents int64 `json:"amount_in_cents"`
CreatedAt *time.Time `json:"created_at"`
Currency string `json:"currency"`
ID string `json:"id"`
Lsp string `json:"lsp"`
Message string `json:"message"`
Priority bool `json:"priority"`
ProgressPercent int64 `json:"progress_percent"`
Quality bool `json:"quality"`
SourceLocale *LocalePreview `json:"source_locale"`
State string `json:"state"`
Styleguide *StyleguidePreview `json:"styleguide"`
Tag string `json:"tag"`
TargetLocales []*LocalePreview `json:"target_locales"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery bool `json:"unverify_translations_upon_delivery"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationVersion struct {
ChangedAt *time.Time `json:"changed_at"`
Content string `json:"content"`
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
PluralSuffix string `json:"plural_suffix"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationVersionWithUser struct {
TranslationVersion
User *UserPreview `json:"user"`
}
type Upload struct {
CreatedAt *time.Time `json:"created_at"`
Filename string `json:"filename"`
Format string `json:"format"`
ID string `json:"id"`
State string `json:"state"`
Summary SummaryType `json:"summary"`
UpdatedAt *time.Time `json:"updated_at"`
}
type User struct {
CreatedAt *time.Time `json:"created_at"`
Email string `json:"email"`
ID string `json:"id"`
Name string `json:"name"`
Position string `json:"position"`
UpdatedAt *time.Time `json:"updated_at"`
Username string `json:"username"`
}
type UserPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
}
type Webhook struct {
Active bool `json:"active"`
CallbackUrl string `json:"callback_url"`
CreatedAt *time.Time `json:"created_at"`
Description string `json:"description"`
Events []string `json:"events"`
ID string `json:"id"`
UpdatedAt *time.Time `json:"updated_at"`
}
type AuthorizationParams struct {
ExpiresAt **time.Time `json:"expires_at,omitempty"`
Note *string `json:"note,omitempty"`
Scopes []string `json:"scopes,omitempty"`
}
func (params *AuthorizationParams) ApplyDefaults(defaults map[string]interface{}) (*AuthorizationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(AuthorizationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type BlacklistedKeyParams struct {
Name *string `json:"name,omitempty"`
}
func (params *BlacklistedKeyParams) ApplyDefaults(defaults map[string]interface{}) (*BlacklistedKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(BlacklistedKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type CommentParams struct {
Message *string `json:"message,omitempty"`
}
func (params *CommentParams) ApplyDefaults(defaults map[string]interface{}) (*CommentParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(CommentParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationKeyParams struct {
DataType *string `json:"data_type,omitempty"`
Description *string `json:"description,omitempty"`
LocalizedFormatKey *string `json:"localized_format_key,omitempty"`
LocalizedFormatString *string `json:"localized_format_string,omitempty"`
MaxCharactersAllowed *int64 `json:"max_characters_allowed,omitempty"`
Name *string `json:"name,omitempty"`
NamePlural *string `json:"name_plural,omitempty"`
OriginalFile *string `json:"original_file,omitempty"`
Plural *bool `json:"plural,omitempty"`
RemoveScreenshot *bool `json:"remove_screenshot,omitempty"`
Screenshot *string `json:"screenshot,omitempty"`
Tags *string `json:"tags,omitempty"`
Unformatted *bool `json:"unformatted,omitempty"`
XmlSpacePreserve *bool `json:"xml_space_preserve,omitempty"`
}
func (params *TranslationKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleParams struct {
Code *string `json:"code,omitempty"`
Default *bool `json:"default,omitempty"`
Main *bool `json:"main,omitempty"`
Name *string `json:"name,omitempty"`
Rtl *bool `json:"rtl,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
}
func (params *LocaleParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationOrderParams struct {
Category *string `json:"category,omitempty"`
IncludeUntranslatedKeys *bool `json:"include_untranslated_keys,omitempty"`
IncludeUnverifiedTranslations *bool `json:"include_unverified_translations,omitempty"`
Lsp *string `json:"lsp,omitempty"`
Message *string `json:"message,omitempty"`
Priority *bool `json:"priority,omitempty"`
Quality *bool `json:"quality,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
StyleguideID *string `json:"styleguide_id,omitempty"`
Tag *string `json:"tag,omitempty"`
TargetLocaleIDs []string `json:"target_locale_ids,omitempty"`
TranslationType *string `json:"translation_type,omitempty"`
UnverifyTranslationsUponDelivery *bool `json:"unverify_translations_upon_delivery,omitempty"`
}
func (params *TranslationOrderParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationOrderParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationOrderParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type ProjectParams struct {
MainFormat *string `json:"main_format,omitempty"`
Name *string `json:"name,omitempty"`
SharesTranslationMemory *bool `json:"shares_translation_memory,omitempty"`
}
func (params *ProjectParams) ApplyDefaults(defaults map[string]interface{}) (*ProjectParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(ProjectParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type StyleguideParams struct {
Audience *string `json:"audience,omitempty"`
Business *string `json:"business,omitempty"`
CompanyBranding *string `json:"company_branding,omitempty"`
Formatting *string `json:"formatting,omitempty"`
GlossaryTerms *string `json:"glossary_terms,omitempty"`
GrammarConsistency *string `json:"grammar_consistency,omitempty"`
GrammaticalPerson *string `json:"grammatical_person,omitempty"`
LiteralTranslation *string `json:"literal_translation,omitempty"`
OverallTone *string `json:"overall_tone,omitempty"`
Samples *string `json:"samples,omitempty"`
TargetAudience *string `json:"target_audience,omitempty"`
Title *string `json:"title,omitempty"`
VocabularyType *string `json:"vocabulary_type,omitempty"`
}
func (params *StyleguideParams) ApplyDefaults(defaults map[string]interface{}) (*StyleguideParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(StyleguideParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TagParams struct {
Name *string `json:"name,omitempty"`
}
func (params *TagParams) ApplyDefaults(defaults map[string]interface{}) (*TagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationParams struct {
Content *string `json:"content,omitempty"`
Excluded *bool `json:"excluded,omitempty"`
KeyID *string `json:"key_id,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type UploadParams struct {
ConvertEmoji *bool `json:"convert_emoji,omitempty"`
File *string `json:"file,omitempty"`
FileEncoding *string `json:"file_encoding,omitempty"`
FileFormat *string `json:"file_format,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
SkipUnverification *bool `json:"skip_unverification,omitempty"`
SkipUploadTags *bool `json:"skip_upload_tags,omitempty"`
Tags *string `json:"tags,omitempty"`
UpdateTranslations *bool `json:"update_translations,omitempty"`
}
func (params *UploadParams) ApplyDefaults(defaults map[string]interface{}) (*UploadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(UploadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type WebhookParams struct {
Active *bool `json:"active,omitempty"`
CallbackUrl *string `json:"callback_url,omitempty"`
Description *string `json:"description,omitempty"`
Events *string `json:"events,omitempty"`
}
func (params *WebhookParams) ApplyDefaults(defaults map[string]interface{}) (*WebhookParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(WebhookParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Create a new authorization.
func (client *Client) AuthorizationCreate(params *AuthorizationParams) (*AuthorizationWithToken, error) {
retVal := new(AuthorizationWithToken)
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing authorization. API calls using that token will stop working.
func (client *Client) AuthorizationDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single authorization.
func (client *Client) AuthorizationShow(id string) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing authorization.
func (client *Client) AuthorizationUpdate(id string, params *AuthorizationParams) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all your authorizations.
func (client *Client) AuthorizationsList(page, perPage int) ([]*Authorization, error) {
retVal := []*Authorization{}
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new rule for blacklisting keys.
func (client *Client) BlacklistedKeyCreate(project_id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single rule for blacklisting keys for a given project.
func (client *Client) BlacklistedKeyShow(project_id, id string) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyUpdate(project_id, id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all rules for blacklisting keys for the given project.
func (client *Client) BlacklistedKeysList(project_id string, page, perPage int) ([]*BlacklistedKey, error) {
retVal := []*BlacklistedKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new comment for a key.
func (client *Client) CommentCreate(project_id, key_id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing comment.
func (client *Client) CommentDelete(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Check if comment was marked as read. Returns 204 if read, 404 if unread.
func (client *Client) CommentMarkCheck(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as read.
func (client *Client) CommentMarkRead(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as unread.
func (client *Client) CommentMarkUnread(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single comment.
func (client *Client) CommentShow(project_id, key_id, id string) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing comment.
func (client *Client) CommentUpdate(project_id, key_id, id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all comments for a key.
func (client *Client) CommentsList(project_id, key_id string, page, perPage int) ([]*Comment, error) {
retVal := []*Comment{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get a handy list of all localization file formats supported in PhraseApp.
func (client *Client) FormatsList(page, perPage int) ([]*Format, error) {
retVal := []*Format{}
err := func() error {
url := fmt.Sprintf("/v2/formats")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new key.
func (client *Client) KeyCreate(project_id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != nil {
err := writer.WriteField("name", *params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing key.
func (client *Client) KeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single key for a given project.
func (client *Client) KeyShow(project_id, id string) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing key.
func (client *Client) KeyUpdate(project_id, id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != nil {
err := writer.WriteField("name", *params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("PATCH", url, ctype, paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysDeleteParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
}
func (params *KeysDeleteParams) ApplyDefaults(defaults map[string]interface{}) (*KeysDeleteParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysDeleteParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Delete all keys matching query. Same constraints as list.
func (client *Client) KeysDelete(project_id string, params *KeysDeleteParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysListParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysListParams) ApplyDefaults(defaults map[string]interface{}) (*KeysListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List all keys for the given project. Alternatively you can POST requests to /search.
func (client *Client) KeysList(project_id string, page, perPage int, params *KeysListParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysSearchParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysSearchParams) ApplyDefaults(defaults map[string]interface{}) (*KeysSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Search keys for the given project matching query.
func (client *Client) KeysSearch(project_id string, page, perPage int, params *KeysSearchParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysTagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags *string `json:"tags,omitempty"`
}
func (params *KeysTagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysTagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysTagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Tags all keys matching query. Same constraints as list.
func (client *Client) KeysTag(project_id string, params *KeysTagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysUntagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags *string `json:"tags,omitempty"`
}
func (params *KeysUntagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysUntagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysUntagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Removes specified tags from keys matching query.
func (client *Client) KeysUntag(project_id string, params *KeysUntagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/untag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new locale.
func (client *Client) LocaleCreate(project_id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing locale.
func (client *Client) LocaleDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
type LocaleDownloadParams struct {
ConvertEmoji bool `json:"convert_emoji,omitempty"`
Encoding *string `json:"encoding,omitempty"`
FallbackLocaleID *string `json:"fallback_locale_id,omitempty"`
FileFormat *string `json:"file_format,omitempty"`
FormatOptions *map[string]interface{} `json:"format_options,omitempty"`
IncludeEmptyTranslations bool `json:"include_empty_translations,omitempty"`
KeepNotranslateTags bool `json:"keep_notranslate_tags,omitempty"`
SkipUnverifiedTranslations bool `json:"skip_unverified_translations,omitempty"`
Tag *string `json:"tag,omitempty"`
}
func (params *LocaleDownloadParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleDownloadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleDownloadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Download a locale in a specific file format.
func (client *Client) LocaleDownload(project_id, id string, params *LocaleDownloadParams) ([]byte, error) {
retVal := []byte{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/download", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("GET", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
retVal, err = ioutil.ReadAll(reader)
return err
}()
return retVal, err
}
// Get details on a single locale for a given project.
func (client *Client) LocaleShow(project_id, id string) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing locale.
func (client *Client) LocaleUpdate(project_id, id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all locales for the given project.
func (client *Client) LocalesList(project_id string, page, perPage int) ([]*Locale, error) {
retVal := []*Locale{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Confirm an existing order and send it to the provider for translation. Same constraints as for create.
func (client *Client) OrderConfirm(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s/confirm", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new order. Access token scope must include <code>orders.create</code>.
func (client *Client) OrderCreate(project_id string, params *TranslationOrderParams) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Cancel an existing order. Must not yet be confirmed.
func (client *Client) OrderDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single order.
func (client *Client) OrderShow(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all orders for the given project.
func (client *Client) OrdersList(project_id string, page, perPage int) ([]*TranslationOrder, error) {
retVal := []*TranslationOrder{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new project.
func (client *Client) ProjectCreate(params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing project.
func (client *Client) ProjectDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single project.
func (client *Client) ProjectShow(id string) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing project.
func (client *Client) ProjectUpdate(id string, params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all projects the current user has access to.
func (client *Client) ProjectsList(page, perPage int) ([]*Project, error) {
retVal := []*Project{}
err := func() error {
url := fmt.Sprintf("/v2/projects")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Show details for current User.
func (client *Client) ShowUser() (*User, error) {
retVal := new(User)
err := func() error {
url := fmt.Sprintf("/v2/user")
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new style guide.
func (client *Client) StyleguideCreate(project_id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing style guide.
func (client *Client) StyleguideDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single style guide.
func (client *Client) StyleguideShow(project_id, id string) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing style guide.
func (client *Client) StyleguideUpdate(project_id, id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all styleguides for the given project.
func (client *Client) StyleguidesList(project_id string, page, perPage int) ([]*Styleguide, error) {
retVal := []*Styleguide{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new tag.
func (client *Client) TagCreate(project_id string, params *TagParams) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing tag.
func (client *Client) TagDelete(project_id, name string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details and progress information on a single tag for a given project.
func (client *Client) TagShow(project_id, name string) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all tags for the given project.
func (client *Client) TagsList(project_id string, page, perPage int) ([]*Tag, error) {
retVal := []*Tag{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a translation.
func (client *Client) TranslationCreate(project_id string, params *TranslationParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single translation.
func (client *Client) TranslationShow(project_id, id string) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationUpdateParams struct {
Content *string `json:"content,omitempty"`
Excluded *bool `json:"excluded,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationUpdateParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationUpdateParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationUpdateParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Update an existing translation.
func (client *Client) TranslationUpdate(project_id, id string, params *TranslationUpdateParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByKeyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific key.
func (client *Client) TranslationsByKey(project_id, key_id string, page, perPage int, params *TranslationsByKeyParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/translations", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByLocaleParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByLocaleParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByLocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByLocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific locale. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsByLocale(project_id, locale_id string, page, perPage int, params *TranslationsByLocaleParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/translations", project_id, locale_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsExcludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsExcludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsExcludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsExcludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Exclude translations matching query from locale export.
func (client *Client) TranslationsExclude(project_id string, params *TranslationsExcludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/exclude", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsIncludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsIncludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsIncludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsIncludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Include translations matching query in locale export.
func (client *Client) TranslationsInclude(project_id string, params *TranslationsIncludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/include", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsListParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsListParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsList(project_id string, page, perPage int, params *TranslationsListParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsSearchParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsSearchParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project if you exceed GET request limitations on translations list. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsSearch(project_id string, page, perPage int, params *TranslationsSearchParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsUnverifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsUnverifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsUnverifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsUnverifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Mark translations matching query as unverified.
func (client *Client) TranslationsUnverify(project_id string, params *TranslationsUnverifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/unverify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsVerifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsVerifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsVerifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsVerifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Verify translations matching query.
func (client *Client) TranslationsVerify(project_id string, params *TranslationsVerifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/verify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Upload a new language file. Creates necessary resources in your project.
func (client *Client) UploadCreate(project_id string, params *UploadParams) (*Upload, error) {
retVal := new(Upload)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.ConvertEmoji != nil {
err := writer.WriteField("convert_emoji", strconv.FormatBool(*params.ConvertEmoji))
if err != nil {
return err
}
}
if params.File != nil {
part, err := writer.CreateFormFile("file", filepath.Base(*params.File))
if err != nil {
return err
}
file, err := os.Open(*params.File)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.FileEncoding != nil {
err := writer.WriteField("file_encoding", *params.FileEncoding)
if err != nil {
return err
}
}
if params.FileFormat != nil {
err := writer.WriteField("file_format", *params.FileFormat)
if err != nil {
return err
}
}
if params.LocaleID != nil {
err := writer.WriteField("locale_id", *params.LocaleID)
if err != nil {
return err
}
}
if params.SkipUnverification != nil {
err := writer.WriteField("skip_unverification", strconv.FormatBool(*params.SkipUnverification))
if err != nil {
return err
}
}
if params.SkipUploadTags != nil {
err := writer.WriteField("skip_upload_tags", strconv.FormatBool(*params.SkipUploadTags))
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.UpdateTranslations != nil {
err := writer.WriteField("update_translations", strconv.FormatBool(*params.UpdateTranslations))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// View details and summary for a single upload.
func (client *Client) UploadShow(project_id, id string) (*Upload, error) {
retVal := new(Upload)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all uploads for the given project.
func (client *Client) UploadsList(project_id string, page, perPage int) ([]*Upload, error) {
retVal := []*Upload{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single version.
func (client *Client) VersionShow(project_id, translation_id, id string) (*TranslationVersionWithUser, error) {
retVal := new(TranslationVersionWithUser)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions/%s", project_id, translation_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all versions for the given translation.
func (client *Client) VersionsList(project_id, translation_id string, page, perPage int) ([]*TranslationVersion, error) {
retVal := []*TranslationVersion{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions", project_id, translation_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new webhook.
func (client *Client) WebhookCreate(project_id string, params *WebhookParams) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing webhook.
func (client *Client) WebhookDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single webhook.
func (client *Client) WebhookShow(project_id, id string) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Perform a test request for a webhook.
func (client *Client) WebhookTest(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s/test", project_id, id)
rc, err := client.sendRequest("POST", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Update an existing webhook.
func (client *Client) WebhookUpdate(project_id, id string, params *WebhookParams) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all webhooks for the given project.
func (client *Client) WebhooksList(project_id string, page, perPage int) ([]*Webhook, error) {
retVal := []*Webhook{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
func GetUserAgent() string {
return "PhraseApp go (1.1.6)"
}
1.1.7
Added support for the `fallback_locale_id` param to pull targets.
package phraseapp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"time"
)
type AffectedCount struct {
RecordsAffected int64 `json:"records_affected"`
}
type AffectedResources struct {
RecordsAffected int64 `json:"records_affected"`
}
type Authorization struct {
CreatedAt *time.Time `json:"created_at"`
ExpiresAt *time.Time `json:"expires_at"`
HashedToken string `json:"hashed_token"`
ID string `json:"id"`
Note string `json:"note"`
Scopes []string `json:"scopes"`
TokenLastEight string `json:"token_last_eight"`
UpdatedAt *time.Time `json:"updated_at"`
}
type AuthorizationWithToken struct {
Authorization
Token string `json:"token"`
}
type BlacklistedKey struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type Comment struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Message string `json:"message"`
UpdatedAt *time.Time `json:"updated_at"`
User *UserPreview `json:"user"`
}
type Format struct {
ApiName string `json:"api_name"`
DefaultEncoding string `json:"default_encoding"`
DefaultFile string `json:"default_file"`
Description string `json:"description"`
Exportable bool `json:"exportable"`
Extension string `json:"extension"`
Importable bool `json:"importable"`
Name string `json:"name"`
}
type KeyPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Plural bool `json:"plural"`
}
type Locale struct {
Code string `json:"code"`
CreatedAt *time.Time `json:"created_at"`
Default bool `json:"default"`
ID string `json:"id"`
Main bool `json:"main"`
Name string `json:"name"`
PluralForms []string `json:"plural_forms"`
Rtl bool `json:"rtl"`
SourceLocale *LocalePreview `json:"source_locale"`
UpdatedAt *time.Time `json:"updated_at"`
}
type LocaleDetails struct {
Locale
Statistics *LocaleStatistics `json:"statistics"`
}
type LocalePreview struct {
Code string `json:"code"`
ID string `json:"id"`
Name string `json:"name"`
}
type LocaleStatistics struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
MissingWordsCount int64 `json:"missing_words_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
UnverifiedWordsCount int64 `json:"unverified_words_count"`
WordsTotalCount int64 `json:"words_total_count"`
}
type Project struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
MainFormat string `json:"main_format"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type ProjectDetails struct {
Project
SharesTranslationMemory bool `json:"shares_translation_memory"`
}
type StatisticsListItem struct {
Locale *LocalePreview `json:"locale"`
Statistics StatisticsType `json:"statistics"`
}
type StatisticsType struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
}
type Styleguide struct {
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Title string `json:"title"`
UpdatedAt *time.Time `json:"updated_at"`
}
type StyleguideDetails struct {
Styleguide
Audience string `json:"audience"`
Business string `json:"business"`
CompanyBranding string `json:"company_branding"`
Formatting string `json:"formatting"`
GlossaryTerms string `json:"glossary_terms"`
GrammarConsistency string `json:"grammar_consistency"`
GrammaticalPerson string `json:"grammatical_person"`
LiteralTranslation string `json:"literal_translation"`
OverallTone string `json:"overall_tone"`
PublicUrl string `json:"public_url"`
Samples string `json:"samples"`
TargetAudience string `json:"target_audience"`
VocabularyType string `json:"vocabulary_type"`
}
type StyleguidePreview struct {
ID string `json:"id"`
Title string `json:"title"`
}
type SummaryType struct {
LocalesCreated int64 `json:"locales_created"`
TagsCreated int64 `json:"tags_created"`
TranslationKeysCreated int64 `json:"translation_keys_created"`
TranslationsCreated int64 `json:"translations_created"`
TranslationsUpdated int64 `json:"translations_updated"`
}
type Tag struct {
CreatedAt *time.Time `json:"created_at"`
KeysCount int64 `json:"keys_count"`
Name string `json:"name"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TagWithStats struct {
Tag
Statistics []*StatisticsListItem `json:"statistics"`
}
type Translation struct {
Content string `json:"content"`
CreatedAt *time.Time `json:"created_at"`
Excluded bool `json:"excluded"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
Placeholders []string `json:"placeholders"`
PluralSuffix string `json:"plural_suffix"`
Unverified bool `json:"unverified"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationDetails struct {
Translation
User *UserPreview `json:"user"`
WordCount int64 `json:"word_count"`
}
type TranslationKey struct {
CreatedAt *time.Time `json:"created_at"`
DataType string `json:"data_type"`
Description string `json:"description"`
ID string `json:"id"`
Name string `json:"name"`
NameHash string `json:"name_hash"`
Plural bool `json:"plural"`
Tags []string `json:"tags"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationKeyDetails struct {
TranslationKey
CommentsCount int64 `json:"comments_count"`
FormatValueType string `json:"format_value_type"`
MaxCharactersAllowed int64 `json:"max_characters_allowed"`
NamePlural string `json:"name_plural"`
OriginalFile string `json:"original_file"`
ScreenshotUrl string `json:"screenshot_url"`
Unformatted bool `json:"unformatted"`
XmlSpacePreserve bool `json:"xml_space_preserve"`
}
type TranslationOrder struct {
AmountInCents int64 `json:"amount_in_cents"`
CreatedAt *time.Time `json:"created_at"`
Currency string `json:"currency"`
ID string `json:"id"`
Lsp string `json:"lsp"`
Message string `json:"message"`
Priority bool `json:"priority"`
ProgressPercent int64 `json:"progress_percent"`
Quality bool `json:"quality"`
SourceLocale *LocalePreview `json:"source_locale"`
State string `json:"state"`
Styleguide *StyleguidePreview `json:"styleguide"`
Tag string `json:"tag"`
TargetLocales []*LocalePreview `json:"target_locales"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery bool `json:"unverify_translations_upon_delivery"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationVersion struct {
ChangedAt *time.Time `json:"changed_at"`
Content string `json:"content"`
CreatedAt *time.Time `json:"created_at"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
PluralSuffix string `json:"plural_suffix"`
UpdatedAt *time.Time `json:"updated_at"`
}
type TranslationVersionWithUser struct {
TranslationVersion
User *UserPreview `json:"user"`
}
type Upload struct {
CreatedAt *time.Time `json:"created_at"`
Filename string `json:"filename"`
Format string `json:"format"`
ID string `json:"id"`
State string `json:"state"`
Summary SummaryType `json:"summary"`
UpdatedAt *time.Time `json:"updated_at"`
}
type User struct {
CreatedAt *time.Time `json:"created_at"`
Email string `json:"email"`
ID string `json:"id"`
Name string `json:"name"`
Position string `json:"position"`
UpdatedAt *time.Time `json:"updated_at"`
Username string `json:"username"`
}
type UserPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
}
type Webhook struct {
Active bool `json:"active"`
CallbackUrl string `json:"callback_url"`
CreatedAt *time.Time `json:"created_at"`
Description string `json:"description"`
Events []string `json:"events"`
ID string `json:"id"`
UpdatedAt *time.Time `json:"updated_at"`
}
type AuthorizationParams struct {
ExpiresAt **time.Time `json:"expires_at,omitempty"`
Note *string `json:"note,omitempty"`
Scopes []string `json:"scopes,omitempty"`
}
func (params *AuthorizationParams) ApplyDefaults(defaults map[string]interface{}) (*AuthorizationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(AuthorizationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type BlacklistedKeyParams struct {
Name *string `json:"name,omitempty"`
}
func (params *BlacklistedKeyParams) ApplyDefaults(defaults map[string]interface{}) (*BlacklistedKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(BlacklistedKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type CommentParams struct {
Message *string `json:"message,omitempty"`
}
func (params *CommentParams) ApplyDefaults(defaults map[string]interface{}) (*CommentParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(CommentParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationKeyParams struct {
DataType *string `json:"data_type,omitempty"`
Description *string `json:"description,omitempty"`
LocalizedFormatKey *string `json:"localized_format_key,omitempty"`
LocalizedFormatString *string `json:"localized_format_string,omitempty"`
MaxCharactersAllowed *int64 `json:"max_characters_allowed,omitempty"`
Name *string `json:"name,omitempty"`
NamePlural *string `json:"name_plural,omitempty"`
OriginalFile *string `json:"original_file,omitempty"`
Plural *bool `json:"plural,omitempty"`
RemoveScreenshot *bool `json:"remove_screenshot,omitempty"`
Screenshot *string `json:"screenshot,omitempty"`
Tags *string `json:"tags,omitempty"`
Unformatted *bool `json:"unformatted,omitempty"`
XmlSpacePreserve *bool `json:"xml_space_preserve,omitempty"`
}
func (params *TranslationKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleParams struct {
Code *string `json:"code,omitempty"`
Default *bool `json:"default,omitempty"`
Main *bool `json:"main,omitempty"`
Name *string `json:"name,omitempty"`
Rtl *bool `json:"rtl,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
}
func (params *LocaleParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationOrderParams struct {
Category *string `json:"category,omitempty"`
IncludeUntranslatedKeys *bool `json:"include_untranslated_keys,omitempty"`
IncludeUnverifiedTranslations *bool `json:"include_unverified_translations,omitempty"`
Lsp *string `json:"lsp,omitempty"`
Message *string `json:"message,omitempty"`
Priority *bool `json:"priority,omitempty"`
Quality *bool `json:"quality,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
StyleguideID *string `json:"styleguide_id,omitempty"`
Tag *string `json:"tag,omitempty"`
TargetLocaleIDs []string `json:"target_locale_ids,omitempty"`
TranslationType *string `json:"translation_type,omitempty"`
UnverifyTranslationsUponDelivery *bool `json:"unverify_translations_upon_delivery,omitempty"`
}
func (params *TranslationOrderParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationOrderParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationOrderParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type ProjectParams struct {
MainFormat *string `json:"main_format,omitempty"`
Name *string `json:"name,omitempty"`
SharesTranslationMemory *bool `json:"shares_translation_memory,omitempty"`
}
func (params *ProjectParams) ApplyDefaults(defaults map[string]interface{}) (*ProjectParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(ProjectParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type StyleguideParams struct {
Audience *string `json:"audience,omitempty"`
Business *string `json:"business,omitempty"`
CompanyBranding *string `json:"company_branding,omitempty"`
Formatting *string `json:"formatting,omitempty"`
GlossaryTerms *string `json:"glossary_terms,omitempty"`
GrammarConsistency *string `json:"grammar_consistency,omitempty"`
GrammaticalPerson *string `json:"grammatical_person,omitempty"`
LiteralTranslation *string `json:"literal_translation,omitempty"`
OverallTone *string `json:"overall_tone,omitempty"`
Samples *string `json:"samples,omitempty"`
TargetAudience *string `json:"target_audience,omitempty"`
Title *string `json:"title,omitempty"`
VocabularyType *string `json:"vocabulary_type,omitempty"`
}
func (params *StyleguideParams) ApplyDefaults(defaults map[string]interface{}) (*StyleguideParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(StyleguideParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TagParams struct {
Name *string `json:"name,omitempty"`
}
func (params *TagParams) ApplyDefaults(defaults map[string]interface{}) (*TagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationParams struct {
Content *string `json:"content,omitempty"`
Excluded *bool `json:"excluded,omitempty"`
KeyID *string `json:"key_id,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type UploadParams struct {
ConvertEmoji *bool `json:"convert_emoji,omitempty"`
File *string `json:"file,omitempty"`
FileEncoding *string `json:"file_encoding,omitempty"`
FileFormat *string `json:"file_format,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
SkipUnverification *bool `json:"skip_unverification,omitempty"`
SkipUploadTags *bool `json:"skip_upload_tags,omitempty"`
Tags *string `json:"tags,omitempty"`
UpdateTranslations *bool `json:"update_translations,omitempty"`
}
func (params *UploadParams) ApplyDefaults(defaults map[string]interface{}) (*UploadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(UploadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type WebhookParams struct {
Active *bool `json:"active,omitempty"`
CallbackUrl *string `json:"callback_url,omitempty"`
Description *string `json:"description,omitempty"`
Events *string `json:"events,omitempty"`
}
func (params *WebhookParams) ApplyDefaults(defaults map[string]interface{}) (*WebhookParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(WebhookParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Create a new authorization.
func (client *Client) AuthorizationCreate(params *AuthorizationParams) (*AuthorizationWithToken, error) {
retVal := new(AuthorizationWithToken)
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing authorization. API calls using that token will stop working.
func (client *Client) AuthorizationDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single authorization.
func (client *Client) AuthorizationShow(id string) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing authorization.
func (client *Client) AuthorizationUpdate(id string, params *AuthorizationParams) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all your authorizations.
func (client *Client) AuthorizationsList(page, perPage int) ([]*Authorization, error) {
retVal := []*Authorization{}
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new rule for blacklisting keys.
func (client *Client) BlacklistedKeyCreate(project_id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single rule for blacklisting keys for a given project.
func (client *Client) BlacklistedKeyShow(project_id, id string) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyUpdate(project_id, id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all rules for blacklisting keys for the given project.
func (client *Client) BlacklistedKeysList(project_id string, page, perPage int) ([]*BlacklistedKey, error) {
retVal := []*BlacklistedKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new comment for a key.
func (client *Client) CommentCreate(project_id, key_id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing comment.
func (client *Client) CommentDelete(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Check if comment was marked as read. Returns 204 if read, 404 if unread.
func (client *Client) CommentMarkCheck(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as read.
func (client *Client) CommentMarkRead(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as unread.
func (client *Client) CommentMarkUnread(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single comment.
func (client *Client) CommentShow(project_id, key_id, id string) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing comment.
func (client *Client) CommentUpdate(project_id, key_id, id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all comments for a key.
func (client *Client) CommentsList(project_id, key_id string, page, perPage int) ([]*Comment, error) {
retVal := []*Comment{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get a handy list of all localization file formats supported in PhraseApp.
func (client *Client) FormatsList(page, perPage int) ([]*Format, error) {
retVal := []*Format{}
err := func() error {
url := fmt.Sprintf("/v2/formats")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new key.
func (client *Client) KeyCreate(project_id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != nil {
err := writer.WriteField("name", *params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing key.
func (client *Client) KeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single key for a given project.
func (client *Client) KeyShow(project_id, id string) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing key.
func (client *Client) KeyUpdate(project_id, id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != nil {
err := writer.WriteField("name", *params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("PATCH", url, ctype, paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysDeleteParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
}
func (params *KeysDeleteParams) ApplyDefaults(defaults map[string]interface{}) (*KeysDeleteParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysDeleteParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Delete all keys matching query. Same constraints as list.
func (client *Client) KeysDelete(project_id string, params *KeysDeleteParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysListParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysListParams) ApplyDefaults(defaults map[string]interface{}) (*KeysListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List all keys for the given project. Alternatively you can POST requests to /search.
func (client *Client) KeysList(project_id string, page, perPage int, params *KeysListParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysSearchParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysSearchParams) ApplyDefaults(defaults map[string]interface{}) (*KeysSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Search keys for the given project matching query.
func (client *Client) KeysSearch(project_id string, page, perPage int, params *KeysSearchParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysTagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags *string `json:"tags,omitempty"`
}
func (params *KeysTagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysTagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysTagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Tags all keys matching query. Same constraints as list.
func (client *Client) KeysTag(project_id string, params *KeysTagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysUntagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags *string `json:"tags,omitempty"`
}
func (params *KeysUntagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysUntagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysUntagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Removes specified tags from keys matching query.
func (client *Client) KeysUntag(project_id string, params *KeysUntagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/untag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new locale.
func (client *Client) LocaleCreate(project_id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing locale.
func (client *Client) LocaleDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
type LocaleDownloadParams struct {
ConvertEmoji bool `json:"convert_emoji,omitempty"`
Encoding *string `json:"encoding,omitempty"`
FallbackLocaleID *string `json:"fallback_locale_id,omitempty"`
FileFormat *string `json:"file_format,omitempty"`
FormatOptions *map[string]interface{} `json:"format_options,omitempty"`
IncludeEmptyTranslations bool `json:"include_empty_translations,omitempty"`
KeepNotranslateTags bool `json:"keep_notranslate_tags,omitempty"`
SkipUnverifiedTranslations bool `json:"skip_unverified_translations,omitempty"`
Tag *string `json:"tag,omitempty"`
}
func (params *LocaleDownloadParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleDownloadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleDownloadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Download a locale in a specific file format.
func (client *Client) LocaleDownload(project_id, id string, params *LocaleDownloadParams) ([]byte, error) {
retVal := []byte{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/download", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("GET", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
retVal, err = ioutil.ReadAll(reader)
return err
}()
return retVal, err
}
// Get details on a single locale for a given project.
func (client *Client) LocaleShow(project_id, id string) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing locale.
func (client *Client) LocaleUpdate(project_id, id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all locales for the given project.
func (client *Client) LocalesList(project_id string, page, perPage int) ([]*Locale, error) {
retVal := []*Locale{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Confirm an existing order and send it to the provider for translation. Same constraints as for create.
func (client *Client) OrderConfirm(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s/confirm", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new order. Access token scope must include <code>orders.create</code>.
func (client *Client) OrderCreate(project_id string, params *TranslationOrderParams) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Cancel an existing order. Must not yet be confirmed.
func (client *Client) OrderDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single order.
func (client *Client) OrderShow(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all orders for the given project.
func (client *Client) OrdersList(project_id string, page, perPage int) ([]*TranslationOrder, error) {
retVal := []*TranslationOrder{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new project.
func (client *Client) ProjectCreate(params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing project.
func (client *Client) ProjectDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single project.
func (client *Client) ProjectShow(id string) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing project.
func (client *Client) ProjectUpdate(id string, params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all projects the current user has access to.
func (client *Client) ProjectsList(page, perPage int) ([]*Project, error) {
retVal := []*Project{}
err := func() error {
url := fmt.Sprintf("/v2/projects")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Show details for current User.
func (client *Client) ShowUser() (*User, error) {
retVal := new(User)
err := func() error {
url := fmt.Sprintf("/v2/user")
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new style guide.
func (client *Client) StyleguideCreate(project_id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing style guide.
func (client *Client) StyleguideDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single style guide.
func (client *Client) StyleguideShow(project_id, id string) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing style guide.
func (client *Client) StyleguideUpdate(project_id, id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all styleguides for the given project.
func (client *Client) StyleguidesList(project_id string, page, perPage int) ([]*Styleguide, error) {
retVal := []*Styleguide{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new tag.
func (client *Client) TagCreate(project_id string, params *TagParams) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing tag.
func (client *Client) TagDelete(project_id, name string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details and progress information on a single tag for a given project.
func (client *Client) TagShow(project_id, name string) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all tags for the given project.
func (client *Client) TagsList(project_id string, page, perPage int) ([]*Tag, error) {
retVal := []*Tag{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a translation.
func (client *Client) TranslationCreate(project_id string, params *TranslationParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single translation.
func (client *Client) TranslationShow(project_id, id string) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationUpdateParams struct {
Content *string `json:"content,omitempty"`
Excluded *bool `json:"excluded,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationUpdateParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationUpdateParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationUpdateParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Update an existing translation.
func (client *Client) TranslationUpdate(project_id, id string, params *TranslationUpdateParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByKeyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific key.
func (client *Client) TranslationsByKey(project_id, key_id string, page, perPage int, params *TranslationsByKeyParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/translations", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByLocaleParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByLocaleParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByLocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByLocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific locale. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsByLocale(project_id, locale_id string, page, perPage int, params *TranslationsByLocaleParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/translations", project_id, locale_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsExcludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsExcludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsExcludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsExcludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Exclude translations matching query from locale export.
func (client *Client) TranslationsExclude(project_id string, params *TranslationsExcludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/exclude", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsIncludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsIncludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsIncludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsIncludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Include translations matching query in locale export.
func (client *Client) TranslationsInclude(project_id string, params *TranslationsIncludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/include", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsListParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsListParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsList(project_id string, page, perPage int, params *TranslationsListParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsSearchParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsSearchParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project if you exceed GET request limitations on translations list. If you want to download all translations for one locale we recommend to use the <code>locales#download</code> endpoint.
func (client *Client) TranslationsSearch(project_id string, page, perPage int, params *TranslationsSearchParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsUnverifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsUnverifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsUnverifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsUnverifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Mark translations matching query as unverified.
func (client *Client) TranslationsUnverify(project_id string, params *TranslationsUnverifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/unverify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsVerifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsVerifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsVerifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsVerifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Verify translations matching query.
func (client *Client) TranslationsVerify(project_id string, params *TranslationsVerifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/verify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Upload a new language file. Creates necessary resources in your project.
func (client *Client) UploadCreate(project_id string, params *UploadParams) (*Upload, error) {
retVal := new(Upload)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.ConvertEmoji != nil {
err := writer.WriteField("convert_emoji", strconv.FormatBool(*params.ConvertEmoji))
if err != nil {
return err
}
}
if params.File != nil {
part, err := writer.CreateFormFile("file", filepath.Base(*params.File))
if err != nil {
return err
}
file, err := os.Open(*params.File)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.FileEncoding != nil {
err := writer.WriteField("file_encoding", *params.FileEncoding)
if err != nil {
return err
}
}
if params.FileFormat != nil {
err := writer.WriteField("file_format", *params.FileFormat)
if err != nil {
return err
}
}
if params.LocaleID != nil {
err := writer.WriteField("locale_id", *params.LocaleID)
if err != nil {
return err
}
}
if params.SkipUnverification != nil {
err := writer.WriteField("skip_unverification", strconv.FormatBool(*params.SkipUnverification))
if err != nil {
return err
}
}
if params.SkipUploadTags != nil {
err := writer.WriteField("skip_upload_tags", strconv.FormatBool(*params.SkipUploadTags))
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.UpdateTranslations != nil {
err := writer.WriteField("update_translations", strconv.FormatBool(*params.UpdateTranslations))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// View details and summary for a single upload.
func (client *Client) UploadShow(project_id, id string) (*Upload, error) {
retVal := new(Upload)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all uploads for the given project.
func (client *Client) UploadsList(project_id string, page, perPage int) ([]*Upload, error) {
retVal := []*Upload{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single version.
func (client *Client) VersionShow(project_id, translation_id, id string) (*TranslationVersionWithUser, error) {
retVal := new(TranslationVersionWithUser)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions/%s", project_id, translation_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all versions for the given translation.
func (client *Client) VersionsList(project_id, translation_id string, page, perPage int) ([]*TranslationVersion, error) {
retVal := []*TranslationVersion{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions", project_id, translation_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new webhook.
func (client *Client) WebhookCreate(project_id string, params *WebhookParams) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing webhook.
func (client *Client) WebhookDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single webhook.
func (client *Client) WebhookShow(project_id, id string) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Perform a test request for a webhook.
func (client *Client) WebhookTest(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s/test", project_id, id)
rc, err := client.sendRequest("POST", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Update an existing webhook.
func (client *Client) WebhookUpdate(project_id, id string, params *WebhookParams) (*Webhook, error) {
retVal := new(Webhook)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all webhooks for the given project.
func (client *Client) WebhooksList(project_id string, page, perPage int) ([]*Webhook, error) {
retVal := []*Webhook{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/webhooks", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
func GetUserAgent() string {
return "PhraseApp go (1.1.7)"
}
|
package phraseapp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"time"
)
type AffectedCount struct {
RecordsAffected int64 `json:"records_affected"`
}
type AffectedResources struct {
RecordsAffected int64 `json:"records_affected"`
}
type Authorization struct {
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
HashedToken string `json:"hashed_token"`
ID string `json:"id"`
Note string `json:"note"`
Scopes []string `json:"scopes"`
TokenLastEight string `json:"token_last_eight"`
UpdatedAt time.Time `json:"updated_at"`
}
type AuthorizationWithToken struct {
Authorization
Token string `json:"token"`
}
type BlacklistedKey struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type Comment struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Message string `json:"message"`
UpdatedAt time.Time `json:"updated_at"`
User *UserPreview `json:"user"`
}
type ExcludeRule struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type Format struct {
ApiName string `json:"api_name"`
DefaultEncoding string `json:"default_encoding"`
DefaultFile string `json:"default_file"`
Description string `json:"description"`
Exportable bool `json:"exportable"`
Extension string `json:"extension"`
Importable bool `json:"importable"`
Name string `json:"name"`
}
type KeyPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Plural bool `json:"plural"`
}
type Locale struct {
Code string `json:"code"`
CreatedAt time.Time `json:"created_at"`
Default bool `json:"default"`
ID string `json:"id"`
Main bool `json:"main"`
Name string `json:"name"`
PluralForms []string `json:"plural_forms"`
Rtl bool `json:"rtl"`
SourceLocale *LocalePreview `json:"source_locale"`
UpdatedAt time.Time `json:"updated_at"`
}
type LocaleDetails struct {
Locale
Statistics *LocaleStatistics `json:"statistics"`
}
type LocaleFileImport struct {
CreatedAt time.Time `json:"created_at"`
FileFormat string `json:"file_format"`
ID string `json:"id"`
State string `json:"state"`
UpdatedAt time.Time `json:"updated_at"`
}
type LocaleFileImportWithSummary struct {
LocaleFileImport
Summary SummaryType `json:"summary"`
}
type LocalePreview struct {
Code string `json:"code"`
ID string `json:"id"`
Name string `json:"name"`
}
type LocaleStatistics struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
MissingWordsCount int64 `json:"missing_words_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
UnverifiedWordsCount int64 `json:"unverified_words_count"`
WordsTotalCount int64 `json:"words_total_count"`
}
type Project struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
MainFormat string `json:"main_format"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type ProjectDetails struct {
Project
SharesTranslationMemory bool `json:"shares_translation_memory"`
}
type StatisticsListItem struct {
Locale *LocalePreview `json:"locale"`
Statistics StatisticsType `json:"statistics"`
}
type StatisticsType struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
}
type Styleguide struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Title string `json:"title"`
UpdatedAt time.Time `json:"updated_at"`
}
type StyleguideDetails struct {
Styleguide
Audience string `json:"audience"`
Business string `json:"business"`
CompanyBranding string `json:"company_branding"`
Formatting string `json:"formatting"`
GlossaryTerms string `json:"glossary_terms"`
GrammarConsistency string `json:"grammar_consistency"`
GrammaticalPerson string `json:"grammatical_person"`
LiteralTranslation string `json:"literal_translation"`
OverallTone string `json:"overall_tone"`
PublicUrl string `json:"public_url"`
Samples string `json:"samples"`
TargetAudience string `json:"target_audience"`
VocabularyType string `json:"vocabulary_type"`
}
type StyleguidePreview struct {
ID string `json:"id"`
PublicUrl string `json:"public_url"`
}
type SummaryType struct {
LocalesCreated int64 `json:"locales_created"`
TagsCreated int64 `json:"tags_created"`
TranslationKeysCreated int64 `json:"translation_keys_created"`
TranslationsCreated int64 `json:"translations_created"`
TranslationsUpdated int64 `json:"translations_updated"`
}
type Tag struct {
CreatedAt time.Time `json:"created_at"`
KeysCount int64 `json:"keys_count"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type TagWithStats struct {
Tag
Statistics []*StatisticsListItem `json:"statistics"`
}
type Translation struct {
Content string `json:"content"`
CreatedAt time.Time `json:"created_at"`
Excluded bool `json:"excluded"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
Placeholders []string `json:"placeholders"`
PluralSuffix string `json:"plural_suffix"`
Unverified bool `json:"unverified"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationDetails struct {
Translation
User *UserPreview `json:"user"`
WordCount int64 `json:"word_count"`
}
type TranslationKey struct {
CreatedAt time.Time `json:"created_at"`
DataType string `json:"data_type"`
Description string `json:"description"`
ID string `json:"id"`
Name string `json:"name"`
NameHash string `json:"name_hash"`
Plural bool `json:"plural"`
Tags []string `json:"tags"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationKeyDetails struct {
TranslationKey
CommentsCount int64 `json:"comments_count"`
FormatValueType string `json:"format_value_type"`
MaxCharactersAllowed int64 `json:"max_characters_allowed"`
NamePlural string `json:"name_plural"`
OriginalFile string `json:"original_file"`
ScreenshotUrl string `json:"screenshot_url"`
Unformatted bool `json:"unformatted"`
XmlSpacePreserve bool `json:"xml_space_preserve"`
}
type TranslationOrder struct {
AmountInCents int64 `json:"amount_in_cents"`
CreatedAt time.Time `json:"created_at"`
Currency string `json:"currency"`
ID string `json:"id"`
Lsp string `json:"lsp"`
Message string `json:"message"`
Priority bool `json:"priority"`
ProgressPercent int64 `json:"progress_percent"`
Quality bool `json:"quality"`
SourceLocale *LocalePreview `json:"source_locale"`
State string `json:"state"`
Styleguide *StyleguidePreview `json:"styleguide"`
Tag string `json:"tag"`
TargetLocales []*LocalePreview `json:"target_locales"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery bool `json:"unverify_translations_upon_delivery"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationVersion struct {
ChangedAt time.Time `json:"changed_at"`
Content string `json:"content"`
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
PluralSuffix string `json:"plural_suffix"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationVersionWithUser struct {
TranslationVersion
User *UserPreview `json:"user"`
}
type User struct {
CreatedAt time.Time `json:"created_at"`
Email string `json:"email"`
ID string `json:"id"`
Name string `json:"name"`
Position string `json:"position"`
UpdatedAt time.Time `json:"updated_at"`
Username string `json:"username"`
}
type UserPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
}
type AuthorizationParams struct {
ExpiresAt *time.Time `json:"expires_at,omitempty"`
Note string `json:"note"`
Scopes []string `json:"scopes,omitempty"`
}
func (params *AuthorizationParams) ApplyDefaults(defaults map[string]interface{}) (*AuthorizationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(AuthorizationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type CommentParams struct {
Message string `json:"message"`
}
func (params *CommentParams) ApplyDefaults(defaults map[string]interface{}) (*CommentParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(CommentParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type ExcludeRuleParams struct {
Name string `json:"name"`
}
func (params *ExcludeRuleParams) ApplyDefaults(defaults map[string]interface{}) (*ExcludeRuleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(ExcludeRuleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationKeyParams struct {
DataType *string `json:"data_type,omitempty"`
Description *string `json:"description,omitempty"`
LocalizedFormatKey *string `json:"localized_format_key,omitempty"`
LocalizedFormatString *string `json:"localized_format_string,omitempty"`
MaxCharactersAllowed *int64 `json:"max_characters_allowed,omitempty"`
Name string `json:"name"`
NamePlural *string `json:"name_plural,omitempty"`
OriginalFile *string `json:"original_file,omitempty"`
Plural *bool `json:"plural,omitempty"`
RemoveScreenshot *bool `json:"remove_screenshot,omitempty"`
Screenshot *string `json:"screenshot,omitempty"`
Tags *string `json:"tags,omitempty"`
Unformatted *bool `json:"unformatted,omitempty"`
XmlSpacePreserve *bool `json:"xml_space_preserve,omitempty"`
}
func (params *TranslationKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleParams struct {
Code string `json:"code"`
Default *bool `json:"default,omitempty"`
Main *bool `json:"main,omitempty"`
Name string `json:"name"`
Rtl *bool `json:"rtl,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
}
func (params *LocaleParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationOrderParams struct {
Category string `json:"category"`
IncludeUntranslatedKeys *bool `json:"include_untranslated_keys,omitempty"`
IncludeUnverifiedTranslations *bool `json:"include_unverified_translations,omitempty"`
Lsp string `json:"lsp"`
Message *string `json:"message,omitempty"`
Priority *bool `json:"priority,omitempty"`
Quality *bool `json:"quality,omitempty"`
SourceLocaleID string `json:"source_locale_id"`
StyleguideID *string `json:"styleguide_id,omitempty"`
Tag *string `json:"tag,omitempty"`
TargetLocaleIDs []string `json:"target_locale_ids"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery *bool `json:"unverify_translations_upon_delivery,omitempty"`
}
func (params *TranslationOrderParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationOrderParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationOrderParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type ProjectParams struct {
MainFormat *string `json:"main_format,omitempty"`
Name string `json:"name"`
SharesTranslationMemory *bool `json:"shares_translation_memory,omitempty"`
}
func (params *ProjectParams) ApplyDefaults(defaults map[string]interface{}) (*ProjectParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(ProjectParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type StyleguideParams struct {
Audience *string `json:"audience,omitempty"`
Business *string `json:"business,omitempty"`
CompanyBranding *string `json:"company_branding,omitempty"`
Formatting *string `json:"formatting,omitempty"`
GlossaryTerms *string `json:"glossary_terms,omitempty"`
GrammarConsistency *string `json:"grammar_consistency,omitempty"`
GrammaticalPerson *string `json:"grammatical_person,omitempty"`
LiteralTranslation *string `json:"literal_translation,omitempty"`
OverallTone *string `json:"overall_tone,omitempty"`
Samples *string `json:"samples,omitempty"`
TargetAudience *string `json:"target_audience,omitempty"`
Title string `json:"title"`
VocabularyType *string `json:"vocabulary_type,omitempty"`
}
func (params *StyleguideParams) ApplyDefaults(defaults map[string]interface{}) (*StyleguideParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(StyleguideParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TagParams struct {
Name string `json:"name"`
}
func (params *TagParams) ApplyDefaults(defaults map[string]interface{}) (*TagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationParams struct {
Content string `json:"content"`
Excluded *bool `json:"excluded,omitempty"`
KeyID string `json:"key_id"`
LocaleID string `json:"locale_id"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleFileImportParams struct {
ConvertEmoji *bool `json:"convert_emoji,omitempty"`
File string `json:"file"`
FileFormat *string `json:"file_format,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
SkipUnverification *bool `json:"skip_unverification,omitempty"`
SkipUploadTags *bool `json:"skip_upload_tags,omitempty"`
Tags *string `json:"tags,omitempty"`
UpdateTranslations *bool `json:"update_translations,omitempty"`
}
func (params *LocaleFileImportParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleFileImportParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleFileImportParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Create a new authorization.
func (client *Client) AuthorizationCreate(params *AuthorizationParams) (*AuthorizationWithToken, error) {
retVal := new(AuthorizationWithToken)
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing authorization. API calls using that token will stop working.
func (client *Client) AuthorizationDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single authorization.
func (client *Client) AuthorizationShow(id string) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing authorization.
func (client *Client) AuthorizationUpdate(id string, params *AuthorizationParams) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all your authorizations.
func (client *Client) AuthorizationsList(page, perPage int) ([]*Authorization, error) {
retVal := []*Authorization{}
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new comment for a key.
func (client *Client) CommentCreate(project_id, key_id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing comment.
func (client *Client) CommentDelete(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Check if comment was marked as read. Returns 204 if read, 404 if unread.
func (client *Client) CommentMarkCheck(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as read.
func (client *Client) CommentMarkRead(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as unread.
func (client *Client) CommentMarkUnread(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single comment.
func (client *Client) CommentShow(project_id, key_id, id string) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing comment.
func (client *Client) CommentUpdate(project_id, key_id, id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all comments for a key.
func (client *Client) CommentsList(project_id, key_id string, page, perPage int) ([]*Comment, error) {
retVal := []*Comment{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new blacklisted key.
func (client *Client) ExcludeRuleCreate(project_id string, params *ExcludeRuleParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing blacklisted key.
func (client *Client) ExcludeRuleDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single blacklisted key for a given project.
func (client *Client) ExcludeRuleShow(project_id, id string) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing blacklisted key.
func (client *Client) ExcludeRuleUpdate(project_id, id string, params *ExcludeRuleParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all blacklisted keys for the given project.
func (client *Client) ExcludeRulesIndex(project_id string, page, perPage int) ([]*BlacklistedKey, error) {
retVal := []*BlacklistedKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get a handy list of all localization file formats supported in PhraseApp.
func (client *Client) FormatsList(page, perPage int) ([]*Format, error) {
retVal := []*Format{}
err := func() error {
url := fmt.Sprintf("/v2/formats")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new key.
func (client *Client) KeyCreate(project_id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != "" {
err := writer.WriteField("name", params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing key.
func (client *Client) KeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single key for a given project.
func (client *Client) KeyShow(project_id, id string) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing key.
func (client *Client) KeyUpdate(project_id, id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != "" {
err := writer.WriteField("name", params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("PATCH", url, ctype, paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysDeleteParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
}
func (params *KeysDeleteParams) ApplyDefaults(defaults map[string]interface{}) (*KeysDeleteParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysDeleteParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Delete all keys matching query. Same constraints as list.
func (client *Client) KeysDelete(project_id string, params *KeysDeleteParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysListParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysListParams) ApplyDefaults(defaults map[string]interface{}) (*KeysListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List all keys for the given project. Alternatively you can POST requests to /search.
func (client *Client) KeysList(project_id string, page, perPage int, params *KeysListParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysSearchParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysSearchParams) ApplyDefaults(defaults map[string]interface{}) (*KeysSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Search keys for the given project matching query.
func (client *Client) KeysSearch(project_id string, page, perPage int, params *KeysSearchParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysTagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags string `json:"tags"`
}
func (params *KeysTagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysTagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysTagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Tags all keys matching query. Same constraints as list.
func (client *Client) KeysTag(project_id string, params *KeysTagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysUntagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags string `json:"tags"`
}
func (params *KeysUntagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysUntagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysUntagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Removes specified tags from keys matching query.
func (client *Client) KeysUntag(project_id string, params *KeysUntagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new locale.
func (client *Client) LocaleCreate(project_id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing locale.
func (client *Client) LocaleDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
type LocaleDownloadParams struct {
ConvertEmoji bool `json:"convert_emoji,omitempty"`
FileFormat string `json:"file_format"`
FormatOptions *map[string]interface{} `json:"format_options,omitempty"`
IncludeEmptyTranslations bool `json:"include_empty_translations,omitempty"`
KeepNotranslateTags bool `json:"keep_notranslate_tags,omitempty"`
Tag *string `json:"tag,omitempty"`
}
func (params *LocaleDownloadParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleDownloadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleDownloadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Download a locale in a specific file format.
func (client *Client) LocaleDownload(project_id, id string, params *LocaleDownloadParams) ([]byte, error) {
retVal := []byte{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/download", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("GET", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
retVal, err = ioutil.ReadAll(reader)
return err
}()
return retVal, err
}
// Get details on a single locale for a given project.
func (client *Client) LocaleShow(project_id, id string) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing locale.
func (client *Client) LocaleUpdate(project_id, id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all locales for the given project.
func (client *Client) LocalesList(project_id string, page, perPage int) ([]*Locale, error) {
retVal := []*Locale{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Confirm an existing order and send it to the provider for translation. Same constraints as for create.
func (client *Client) OrderConfirm(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s/confirm", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new order. Access token scope must include <code>orders.create</code>.
func (client *Client) OrderCreate(project_id string, params *TranslationOrderParams) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Cancel an existing order. Must not yet be confirmed.
func (client *Client) OrderDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single order.
func (client *Client) OrderShow(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all orders for the given project.
func (client *Client) OrdersList(project_id string, page, perPage int) ([]*TranslationOrder, error) {
retVal := []*TranslationOrder{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new project.
func (client *Client) ProjectCreate(params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing project.
func (client *Client) ProjectDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single project.
func (client *Client) ProjectShow(id string) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing project.
func (client *Client) ProjectUpdate(id string, params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all projects the current user has access to.
func (client *Client) ProjectsList(page, perPage int) ([]*Project, error) {
retVal := []*Project{}
err := func() error {
url := fmt.Sprintf("/v2/projects")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Show details for current User.
func (client *Client) ShowUser() (*User, error) {
retVal := new(User)
err := func() error {
url := fmt.Sprintf("/v2/user")
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new style guide.
func (client *Client) StyleguideCreate(project_id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing style guide.
func (client *Client) StyleguideDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single style guide.
func (client *Client) StyleguideShow(project_id, id string) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing style guide.
func (client *Client) StyleguideUpdate(project_id, id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all styleguides for the given project.
func (client *Client) StyleguidesList(project_id string, page, perPage int) ([]*Styleguide, error) {
retVal := []*Styleguide{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new tag.
func (client *Client) TagCreate(project_id string, params *TagParams) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing tag.
func (client *Client) TagDelete(project_id, name string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details and progress information on a single tag for a given project.
func (client *Client) TagShow(project_id, name string) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all tags for the given project.
func (client *Client) TagsList(project_id string, page, perPage int) ([]*Tag, error) {
retVal := []*Tag{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a translation.
func (client *Client) TranslationCreate(project_id string, params *TranslationParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update a translation with machine translation
func (client *Client) TranslationMachineTranslate(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/machine_translate", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single translation.
func (client *Client) TranslationShow(project_id, id string) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationUpdateParams struct {
Content string `json:"content"`
Excluded *bool `json:"excluded,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationUpdateParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationUpdateParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationUpdateParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Update an existing translation.
func (client *Client) TranslationUpdate(project_id, id string, params *TranslationUpdateParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByKeyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific key.
func (client *Client) TranslationsByKey(project_id, key_id string, page, perPage int, params *TranslationsByKeyParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/translations", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByLocaleParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByLocaleParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByLocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByLocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific locale.
func (client *Client) TranslationsByLocale(project_id, locale_id string, page, perPage int, params *TranslationsByLocaleParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/translations", project_id, locale_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsExcludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsExcludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsExcludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsExcludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Exclude translations matching query from locale export.
func (client *Client) TranslationsExclude(project_id string, params *TranslationsExcludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/exclude", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsIncludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsIncludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsIncludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsIncludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Include translations matching query in locale export.
func (client *Client) TranslationsInclude(project_id string, params *TranslationsIncludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/include", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsListParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsListParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project. Alternatively, POST request to /search
func (client *Client) TranslationsList(project_id string, page, perPage int, params *TranslationsListParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsSearchParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsSearchParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project if you exceed GET request limitations on translations list.
func (client *Client) TranslationsSearch(project_id string, page, perPage int, params *TranslationsSearchParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsUnverifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsUnverifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsUnverifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsUnverifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Mark translations matching query as unverified.
func (client *Client) TranslationsUnverify(project_id string, params *TranslationsUnverifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/unverify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsVerifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsVerifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsVerifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsVerifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Verify translations matching query.
func (client *Client) TranslationsVerify(project_id string, params *TranslationsVerifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/verify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Upload a new language file. Creates necessary resources in your project.
func (client *Client) UploadCreate(project_id string, params *LocaleFileImportParams) (*LocaleFileImportWithSummary, error) {
retVal := new(LocaleFileImportWithSummary)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.ConvertEmoji != nil {
err := writer.WriteField("convert_emoji", strconv.FormatBool(*params.ConvertEmoji))
if err != nil {
return err
}
}
if params.File != "" {
part, err := writer.CreateFormFile("file", filepath.Base(params.File))
if err != nil {
return err
}
file, err := os.Open(params.File)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.FileFormat != nil {
err := writer.WriteField("file_format", *params.FileFormat)
if err != nil {
return err
}
}
if params.LocaleID != nil {
err := writer.WriteField("locale_id", *params.LocaleID)
if err != nil {
return err
}
}
if params.SkipUnverification != nil {
err := writer.WriteField("skip_unverification", strconv.FormatBool(*params.SkipUnverification))
if err != nil {
return err
}
}
if params.SkipUploadTags != nil {
err := writer.WriteField("skip_upload_tags", strconv.FormatBool(*params.SkipUploadTags))
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.UpdateTranslations != nil {
err := writer.WriteField("update_translations", strconv.FormatBool(*params.UpdateTranslations))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// View details and summary for a single upload.
func (client *Client) UploadShow(project_id, id string) (*LocaleFileImportWithSummary, error) {
retVal := new(LocaleFileImportWithSummary)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single version.
func (client *Client) VersionShow(project_id, translation_id, id string) (*TranslationVersionWithUser, error) {
retVal := new(TranslationVersionWithUser)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions/%s", project_id, translation_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all versions for the given translation.
func (client *Client) VersionsList(project_id, translation_id string, page, perPage int) ([]*TranslationVersion, error) {
retVal := []*TranslationVersion{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions", project_id, translation_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
func GetUserAgent() string {
return "PhraseApp go (test)"
}
updates lib.go
package phraseapp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"time"
)
type AffectedCount struct {
RecordsAffected int64 `json:"records_affected"`
}
type AffectedResources struct {
RecordsAffected int64 `json:"records_affected"`
}
type Authorization struct {
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
HashedToken string `json:"hashed_token"`
ID string `json:"id"`
Note string `json:"note"`
Scopes []string `json:"scopes"`
TokenLastEight string `json:"token_last_eight"`
UpdatedAt time.Time `json:"updated_at"`
}
type AuthorizationWithToken struct {
Authorization
Token string `json:"token"`
}
type BlacklistedKey struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type Comment struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Message string `json:"message"`
UpdatedAt time.Time `json:"updated_at"`
User *UserPreview `json:"user"`
}
type Format struct {
ApiName string `json:"api_name"`
DefaultEncoding string `json:"default_encoding"`
DefaultFile string `json:"default_file"`
Description string `json:"description"`
Exportable bool `json:"exportable"`
Extension string `json:"extension"`
Importable bool `json:"importable"`
Name string `json:"name"`
}
type KeyPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Plural bool `json:"plural"`
}
type Locale struct {
Code string `json:"code"`
CreatedAt time.Time `json:"created_at"`
Default bool `json:"default"`
ID string `json:"id"`
Main bool `json:"main"`
Name string `json:"name"`
PluralForms []string `json:"plural_forms"`
Rtl bool `json:"rtl"`
SourceLocale *LocalePreview `json:"source_locale"`
UpdatedAt time.Time `json:"updated_at"`
}
type LocaleDetails struct {
Locale
Statistics *LocaleStatistics `json:"statistics"`
}
type LocaleFileImport struct {
CreatedAt time.Time `json:"created_at"`
FileFormat string `json:"file_format"`
ID string `json:"id"`
State string `json:"state"`
UpdatedAt time.Time `json:"updated_at"`
}
type LocaleFileImportWithSummary struct {
LocaleFileImport
Summary SummaryType `json:"summary"`
}
type LocalePreview struct {
Code string `json:"code"`
ID string `json:"id"`
Name string `json:"name"`
}
type LocaleStatistics struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
MissingWordsCount int64 `json:"missing_words_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
UnverifiedWordsCount int64 `json:"unverified_words_count"`
WordsTotalCount int64 `json:"words_total_count"`
}
type Project struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
MainFormat string `json:"main_format"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type ProjectDetails struct {
Project
SharesTranslationMemory bool `json:"shares_translation_memory"`
}
type StatisticsListItem struct {
Locale *LocalePreview `json:"locale"`
Statistics StatisticsType `json:"statistics"`
}
type StatisticsType struct {
KeysTotalCount int64 `json:"keys_total_count"`
KeysUntranslatedCount int64 `json:"keys_untranslated_count"`
TranslationsCompletedCount int64 `json:"translations_completed_count"`
TranslationsUnverifiedCount int64 `json:"translations_unverified_count"`
}
type Styleguide struct {
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Title string `json:"title"`
UpdatedAt time.Time `json:"updated_at"`
}
type StyleguideDetails struct {
Styleguide
Audience string `json:"audience"`
Business string `json:"business"`
CompanyBranding string `json:"company_branding"`
Formatting string `json:"formatting"`
GlossaryTerms string `json:"glossary_terms"`
GrammarConsistency string `json:"grammar_consistency"`
GrammaticalPerson string `json:"grammatical_person"`
LiteralTranslation string `json:"literal_translation"`
OverallTone string `json:"overall_tone"`
PublicUrl string `json:"public_url"`
Samples string `json:"samples"`
TargetAudience string `json:"target_audience"`
VocabularyType string `json:"vocabulary_type"`
}
type StyleguidePreview struct {
ID string `json:"id"`
PublicUrl string `json:"public_url"`
}
type SummaryType struct {
LocalesCreated int64 `json:"locales_created"`
TagsCreated int64 `json:"tags_created"`
TranslationKeysCreated int64 `json:"translation_keys_created"`
TranslationsCreated int64 `json:"translations_created"`
TranslationsUpdated int64 `json:"translations_updated"`
}
type Tag struct {
CreatedAt time.Time `json:"created_at"`
KeysCount int64 `json:"keys_count"`
Name string `json:"name"`
UpdatedAt time.Time `json:"updated_at"`
}
type TagWithStats struct {
Tag
Statistics []*StatisticsListItem `json:"statistics"`
}
type Translation struct {
Content string `json:"content"`
CreatedAt time.Time `json:"created_at"`
Excluded bool `json:"excluded"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
Placeholders []string `json:"placeholders"`
PluralSuffix string `json:"plural_suffix"`
Unverified bool `json:"unverified"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationDetails struct {
Translation
User *UserPreview `json:"user"`
WordCount int64 `json:"word_count"`
}
type TranslationKey struct {
CreatedAt time.Time `json:"created_at"`
DataType string `json:"data_type"`
Description string `json:"description"`
ID string `json:"id"`
Name string `json:"name"`
NameHash string `json:"name_hash"`
Plural bool `json:"plural"`
Tags []string `json:"tags"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationKeyDetails struct {
TranslationKey
CommentsCount int64 `json:"comments_count"`
FormatValueType string `json:"format_value_type"`
MaxCharactersAllowed int64 `json:"max_characters_allowed"`
NamePlural string `json:"name_plural"`
OriginalFile string `json:"original_file"`
ScreenshotUrl string `json:"screenshot_url"`
Unformatted bool `json:"unformatted"`
XmlSpacePreserve bool `json:"xml_space_preserve"`
}
type TranslationOrder struct {
AmountInCents int64 `json:"amount_in_cents"`
CreatedAt time.Time `json:"created_at"`
Currency string `json:"currency"`
ID string `json:"id"`
Lsp string `json:"lsp"`
Message string `json:"message"`
Priority bool `json:"priority"`
ProgressPercent int64 `json:"progress_percent"`
Quality bool `json:"quality"`
SourceLocale *LocalePreview `json:"source_locale"`
State string `json:"state"`
Styleguide *StyleguidePreview `json:"styleguide"`
Tag string `json:"tag"`
TargetLocales []*LocalePreview `json:"target_locales"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery bool `json:"unverify_translations_upon_delivery"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationVersion struct {
ChangedAt time.Time `json:"changed_at"`
Content string `json:"content"`
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Key *KeyPreview `json:"key"`
Locale *LocalePreview `json:"locale"`
PluralSuffix string `json:"plural_suffix"`
UpdatedAt time.Time `json:"updated_at"`
}
type TranslationVersionWithUser struct {
TranslationVersion
User *UserPreview `json:"user"`
}
type User struct {
CreatedAt time.Time `json:"created_at"`
Email string `json:"email"`
ID string `json:"id"`
Name string `json:"name"`
Position string `json:"position"`
UpdatedAt time.Time `json:"updated_at"`
Username string `json:"username"`
}
type UserPreview struct {
ID string `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
}
type AuthorizationParams struct {
ExpiresAt *time.Time `json:"expires_at,omitempty"`
Note string `json:"note"`
Scopes []string `json:"scopes,omitempty"`
}
func (params *AuthorizationParams) ApplyDefaults(defaults map[string]interface{}) (*AuthorizationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(AuthorizationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type BlacklistedKeyParams struct {
Name string `json:"name"`
}
func (params *BlacklistedKeyParams) ApplyDefaults(defaults map[string]interface{}) (*BlacklistedKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(BlacklistedKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type CommentParams struct {
Message string `json:"message"`
}
func (params *CommentParams) ApplyDefaults(defaults map[string]interface{}) (*CommentParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(CommentParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationKeyParams struct {
DataType *string `json:"data_type,omitempty"`
Description *string `json:"description,omitempty"`
LocalizedFormatKey *string `json:"localized_format_key,omitempty"`
LocalizedFormatString *string `json:"localized_format_string,omitempty"`
MaxCharactersAllowed *int64 `json:"max_characters_allowed,omitempty"`
Name string `json:"name"`
NamePlural *string `json:"name_plural,omitempty"`
OriginalFile *string `json:"original_file,omitempty"`
Plural *bool `json:"plural,omitempty"`
RemoveScreenshot *bool `json:"remove_screenshot,omitempty"`
Screenshot *string `json:"screenshot,omitempty"`
Tags *string `json:"tags,omitempty"`
Unformatted *bool `json:"unformatted,omitempty"`
XmlSpacePreserve *bool `json:"xml_space_preserve,omitempty"`
}
func (params *TranslationKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleParams struct {
Code string `json:"code"`
Default *bool `json:"default,omitempty"`
Main *bool `json:"main,omitempty"`
Name string `json:"name"`
Rtl *bool `json:"rtl,omitempty"`
SourceLocaleID *string `json:"source_locale_id,omitempty"`
}
func (params *LocaleParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationOrderParams struct {
Category string `json:"category"`
IncludeUntranslatedKeys *bool `json:"include_untranslated_keys,omitempty"`
IncludeUnverifiedTranslations *bool `json:"include_unverified_translations,omitempty"`
Lsp string `json:"lsp"`
Message *string `json:"message,omitempty"`
Priority *bool `json:"priority,omitempty"`
Quality *bool `json:"quality,omitempty"`
SourceLocaleID string `json:"source_locale_id"`
StyleguideID *string `json:"styleguide_id,omitempty"`
Tag *string `json:"tag,omitempty"`
TargetLocaleIDs []string `json:"target_locale_ids"`
TranslationType string `json:"translation_type"`
UnverifyTranslationsUponDelivery *bool `json:"unverify_translations_upon_delivery,omitempty"`
}
func (params *TranslationOrderParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationOrderParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationOrderParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type ProjectParams struct {
MainFormat *string `json:"main_format,omitempty"`
Name string `json:"name"`
SharesTranslationMemory *bool `json:"shares_translation_memory,omitempty"`
}
func (params *ProjectParams) ApplyDefaults(defaults map[string]interface{}) (*ProjectParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(ProjectParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type StyleguideParams struct {
Audience *string `json:"audience,omitempty"`
Business *string `json:"business,omitempty"`
CompanyBranding *string `json:"company_branding,omitempty"`
Formatting *string `json:"formatting,omitempty"`
GlossaryTerms *string `json:"glossary_terms,omitempty"`
GrammarConsistency *string `json:"grammar_consistency,omitempty"`
GrammaticalPerson *string `json:"grammatical_person,omitempty"`
LiteralTranslation *string `json:"literal_translation,omitempty"`
OverallTone *string `json:"overall_tone,omitempty"`
Samples *string `json:"samples,omitempty"`
TargetAudience *string `json:"target_audience,omitempty"`
Title string `json:"title"`
VocabularyType *string `json:"vocabulary_type,omitempty"`
}
func (params *StyleguideParams) ApplyDefaults(defaults map[string]interface{}) (*StyleguideParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(StyleguideParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TagParams struct {
Name string `json:"name"`
}
func (params *TagParams) ApplyDefaults(defaults map[string]interface{}) (*TagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type TranslationParams struct {
Content string `json:"content"`
Excluded *bool `json:"excluded,omitempty"`
KeyID string `json:"key_id"`
LocaleID string `json:"locale_id"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
type LocaleFileImportParams struct {
ConvertEmoji *bool `json:"convert_emoji,omitempty"`
File string `json:"file"`
FileFormat *string `json:"file_format,omitempty"`
LocaleID *string `json:"locale_id,omitempty"`
SkipUnverification *bool `json:"skip_unverification,omitempty"`
SkipUploadTags *bool `json:"skip_upload_tags,omitempty"`
Tags *string `json:"tags,omitempty"`
UpdateTranslations *bool `json:"update_translations,omitempty"`
}
func (params *LocaleFileImportParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleFileImportParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleFileImportParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Create a new authorization.
func (client *Client) AuthorizationCreate(params *AuthorizationParams) (*AuthorizationWithToken, error) {
retVal := new(AuthorizationWithToken)
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing authorization. API calls using that token will stop working.
func (client *Client) AuthorizationDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single authorization.
func (client *Client) AuthorizationShow(id string) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing authorization.
func (client *Client) AuthorizationUpdate(id string, params *AuthorizationParams) (*Authorization, error) {
retVal := new(Authorization)
err := func() error {
url := fmt.Sprintf("/v2/authorizations/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all your authorizations.
func (client *Client) AuthorizationsList(page, perPage int) ([]*Authorization, error) {
retVal := []*Authorization{}
err := func() error {
url := fmt.Sprintf("/v2/authorizations")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new rule for blacklisting keys.
func (client *Client) BlacklistedKeyCreate(project_id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single rule for blacklisting keys for a given project.
func (client *Client) BlacklistedKeyShow(project_id, id string) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing rule for blacklisting keys.
func (client *Client) BlacklistedKeyUpdate(project_id, id string, params *BlacklistedKeyParams) (*BlacklistedKey, error) {
retVal := new(BlacklistedKey)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all rules for blacklisting keys for the given project.
func (client *Client) BlacklistedKeysIndex(project_id string, page, perPage int) ([]*BlacklistedKey, error) {
retVal := []*BlacklistedKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/blacklisted_keys", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new comment for a key.
func (client *Client) CommentCreate(project_id, key_id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing comment.
func (client *Client) CommentDelete(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Check if comment was marked as read. Returns 204 if read, 404 if unread.
func (client *Client) CommentMarkCheck(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as read.
func (client *Client) CommentMarkRead(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Mark a comment as unread.
func (client *Client) CommentMarkUnread(project_id, key_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s/read", project_id, key_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single comment.
func (client *Client) CommentShow(project_id, key_id, id string) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing comment.
func (client *Client) CommentUpdate(project_id, key_id, id string, params *CommentParams) (*Comment, error) {
retVal := new(Comment)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments/%s", project_id, key_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all comments for a key.
func (client *Client) CommentsList(project_id, key_id string, page, perPage int) ([]*Comment, error) {
retVal := []*Comment{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/comments", project_id, key_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get a handy list of all localization file formats supported in PhraseApp.
func (client *Client) FormatsList(page, perPage int) ([]*Format, error) {
retVal := []*Format{}
err := func() error {
url := fmt.Sprintf("/v2/formats")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new key.
func (client *Client) KeyCreate(project_id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != "" {
err := writer.WriteField("name", params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing key.
func (client *Client) KeyDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single key for a given project.
func (client *Client) KeyShow(project_id, id string) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing key.
func (client *Client) KeyUpdate(project_id, id string, params *TranslationKeyParams) (*TranslationKeyDetails, error) {
retVal := new(TranslationKeyDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.DataType != nil {
err := writer.WriteField("data_type", *params.DataType)
if err != nil {
return err
}
}
if params.Description != nil {
err := writer.WriteField("description", *params.Description)
if err != nil {
return err
}
}
if params.LocalizedFormatKey != nil {
err := writer.WriteField("localized_format_key", *params.LocalizedFormatKey)
if err != nil {
return err
}
}
if params.LocalizedFormatString != nil {
err := writer.WriteField("localized_format_string", *params.LocalizedFormatString)
if err != nil {
return err
}
}
if params.MaxCharactersAllowed != nil {
err := writer.WriteField("max_characters_allowed", strconv.FormatInt(*params.MaxCharactersAllowed, 10))
if err != nil {
return err
}
}
if params.Name != "" {
err := writer.WriteField("name", params.Name)
if err != nil {
return err
}
}
if params.NamePlural != nil {
err := writer.WriteField("name_plural", *params.NamePlural)
if err != nil {
return err
}
}
if params.OriginalFile != nil {
err := writer.WriteField("original_file", *params.OriginalFile)
if err != nil {
return err
}
}
if params.Plural != nil {
err := writer.WriteField("plural", strconv.FormatBool(*params.Plural))
if err != nil {
return err
}
}
if params.RemoveScreenshot != nil {
err := writer.WriteField("remove_screenshot", strconv.FormatBool(*params.RemoveScreenshot))
if err != nil {
return err
}
}
if params.Screenshot != nil {
part, err := writer.CreateFormFile("screenshot", filepath.Base(*params.Screenshot))
if err != nil {
return err
}
file, err := os.Open(*params.Screenshot)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.Unformatted != nil {
err := writer.WriteField("unformatted", strconv.FormatBool(*params.Unformatted))
if err != nil {
return err
}
}
if params.XmlSpacePreserve != nil {
err := writer.WriteField("xml_space_preserve", strconv.FormatBool(*params.XmlSpacePreserve))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("PATCH", url, ctype, paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysDeleteParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
}
func (params *KeysDeleteParams) ApplyDefaults(defaults map[string]interface{}) (*KeysDeleteParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysDeleteParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Delete all keys matching query. Same constraints as list.
func (client *Client) KeysDelete(project_id string, params *KeysDeleteParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysListParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysListParams) ApplyDefaults(defaults map[string]interface{}) (*KeysListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List all keys for the given project. Alternatively you can POST requests to /search.
func (client *Client) KeysList(project_id string, page, perPage int, params *KeysListParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysSearchParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *KeysSearchParams) ApplyDefaults(defaults map[string]interface{}) (*KeysSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Search keys for the given project matching query.
func (client *Client) KeysSearch(project_id string, page, perPage int, params *KeysSearchParams) ([]*TranslationKey, error) {
retVal := []*TranslationKey{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysTagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags string `json:"tags"`
}
func (params *KeysTagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysTagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysTagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Tags all keys matching query. Same constraints as list.
func (client *Client) KeysTag(project_id string, params *KeysTagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type KeysUntagParams struct {
LocaleID *string `json:"locale_id,omitempty"`
Q *string `json:"q,omitempty"`
Tags string `json:"tags"`
}
func (params *KeysUntagParams) ApplyDefaults(defaults map[string]interface{}) (*KeysUntagParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(KeysUntagParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Removes specified tags from keys matching query.
func (client *Client) KeysUntag(project_id string, params *KeysUntagParams) (*AffectedResources, error) {
retVal := new(AffectedResources)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/tag", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("DELETE", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new locale.
func (client *Client) LocaleCreate(project_id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing locale.
func (client *Client) LocaleDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
type LocaleDownloadParams struct {
ConvertEmoji bool `json:"convert_emoji,omitempty"`
Encoding *string `json:"encoding,omitempty"`
FileFormat string `json:"file_format"`
FormatOptions *map[string]interface{} `json:"format_options,omitempty"`
IncludeEmptyTranslations bool `json:"include_empty_translations,omitempty"`
KeepNotranslateTags bool `json:"keep_notranslate_tags,omitempty"`
SkipUnverifiedTranslations bool `json:"skip_unverified_translations,omitempty"`
Tag *string `json:"tag,omitempty"`
}
func (params *LocaleDownloadParams) ApplyDefaults(defaults map[string]interface{}) (*LocaleDownloadParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(LocaleDownloadParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Download a locale in a specific file format.
func (client *Client) LocaleDownload(project_id, id string, params *LocaleDownloadParams) ([]byte, error) {
retVal := []byte{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/download", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("GET", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
retVal, err = ioutil.ReadAll(reader)
return err
}()
return retVal, err
}
// Get details on a single locale for a given project.
func (client *Client) LocaleShow(project_id, id string) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing locale.
func (client *Client) LocaleUpdate(project_id, id string, params *LocaleParams) (*LocaleDetails, error) {
retVal := new(LocaleDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all locales for the given project.
func (client *Client) LocalesList(project_id string, page, perPage int) ([]*Locale, error) {
retVal := []*Locale{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Confirm an existing order and send it to the provider for translation. Same constraints as for create.
func (client *Client) OrderConfirm(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s/confirm", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new order. Access token scope must include <code>orders.create</code>.
func (client *Client) OrderCreate(project_id string, params *TranslationOrderParams) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Cancel an existing order. Must not yet be confirmed.
func (client *Client) OrderDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single order.
func (client *Client) OrderShow(project_id, id string) (*TranslationOrder, error) {
retVal := new(TranslationOrder)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all orders for the given project.
func (client *Client) OrdersList(project_id string, page, perPage int) ([]*TranslationOrder, error) {
retVal := []*TranslationOrder{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/orders", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new project.
func (client *Client) ProjectCreate(params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects")
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing project.
func (client *Client) ProjectDelete(id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single project.
func (client *Client) ProjectShow(id string) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing project.
func (client *Client) ProjectUpdate(id string, params *ProjectParams) (*ProjectDetails, error) {
retVal := new(ProjectDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s", id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all projects the current user has access to.
func (client *Client) ProjectsList(page, perPage int) ([]*Project, error) {
retVal := []*Project{}
err := func() error {
url := fmt.Sprintf("/v2/projects")
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Show details for current User.
func (client *Client) ShowUser() (*User, error) {
retVal := new(User)
err := func() error {
url := fmt.Sprintf("/v2/user")
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new style guide.
func (client *Client) StyleguideCreate(project_id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing style guide.
func (client *Client) StyleguideDelete(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single style guide.
func (client *Client) StyleguideShow(project_id, id string) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update an existing style guide.
func (client *Client) StyleguideUpdate(project_id, id string, params *StyleguideParams) (*StyleguideDetails, error) {
retVal := new(StyleguideDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all styleguides for the given project.
func (client *Client) StyleguidesList(project_id string, page, perPage int) ([]*Styleguide, error) {
retVal := []*Styleguide{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/styleguides", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a new tag.
func (client *Client) TagCreate(project_id string, params *TagParams) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Delete an existing tag.
func (client *Client) TagDelete(project_id, name string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("DELETE", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details and progress information on a single tag for a given project.
func (client *Client) TagShow(project_id, name string) (*TagWithStats, error) {
retVal := new(TagWithStats)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags/%s", project_id, name)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all tags for the given project.
func (client *Client) TagsList(project_id string, page, perPage int) ([]*Tag, error) {
retVal := []*Tag{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/tags", project_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Create a translation.
func (client *Client) TranslationCreate(project_id string, params *TranslationParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("POST", url, "application/json", paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Update a translation with machine translation
func (client *Client) TranslationMachineTranslate(project_id, id string) error {
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/machine_translate", project_id, id)
rc, err := client.sendRequest("PATCH", url, "", nil, 204)
if err != nil {
return err
}
defer rc.Close()
return nil
}()
return err
}
// Get details on a single translation.
func (client *Client) TranslationShow(project_id, id string) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationUpdateParams struct {
Content string `json:"content"`
Excluded *bool `json:"excluded,omitempty"`
PluralSuffix *string `json:"plural_suffix,omitempty"`
Unverified *bool `json:"unverified,omitempty"`
}
func (params *TranslationUpdateParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationUpdateParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationUpdateParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Update an existing translation.
func (client *Client) TranslationUpdate(project_id, id string, params *TranslationUpdateParams) (*TranslationDetails, error) {
retVal := new(TranslationDetails)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s", project_id, id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByKeyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByKeyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByKeyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByKeyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific key.
func (client *Client) TranslationsByKey(project_id, key_id string, page, perPage int, params *TranslationsByKeyParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/keys/%s/translations", project_id, key_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsByLocaleParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsByLocaleParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsByLocaleParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsByLocaleParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for a specific locale.
func (client *Client) TranslationsByLocale(project_id, locale_id string, page, perPage int, params *TranslationsByLocaleParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/locales/%s/translations", project_id, locale_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsExcludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsExcludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsExcludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsExcludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Exclude translations matching query from locale export.
func (client *Client) TranslationsExclude(project_id string, params *TranslationsExcludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/exclude", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsIncludeParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsIncludeParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsIncludeParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsIncludeParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Include translations matching query in locale export.
func (client *Client) TranslationsInclude(project_id string, params *TranslationsIncludeParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/include", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsListParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsListParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsListParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsListParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project. Alternatively, POST request to /search
func (client *Client) TranslationsList(project_id string, page, perPage int, params *TranslationsListParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("GET", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsSearchParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsSearchParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsSearchParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsSearchParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// List translations for the given project if you exceed GET request limitations on translations list.
func (client *Client) TranslationsSearch(project_id string, page, perPage int, params *TranslationsSearchParams) ([]*Translation, error) {
retVal := []*Translation{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/search", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequestPaginated("POST", url, "application/json", paramsBuf, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsUnverifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsUnverifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsUnverifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsUnverifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Mark translations matching query as unverified.
func (client *Client) TranslationsUnverify(project_id string, params *TranslationsUnverifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/unverify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
type TranslationsVerifyParams struct {
Order *string `json:"order,omitempty"`
Q *string `json:"q,omitempty"`
Sort *string `json:"sort,omitempty"`
}
func (params *TranslationsVerifyParams) ApplyDefaults(defaults map[string]interface{}) (*TranslationsVerifyParams, error) {
str, err := json.Marshal(defaults)
if err != nil {
return params, err
}
defaultParams := new(TranslationsVerifyParams)
err = json.Unmarshal(str, defaultParams)
if err != nil {
return params, err
}
return defaultParams, nil
}
// Verify translations matching query.
func (client *Client) TranslationsVerify(project_id string, params *TranslationsVerifyParams) (*AffectedCount, error) {
retVal := new(AffectedCount)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/verify", project_id)
paramsBuf := bytes.NewBuffer(nil)
err := json.NewEncoder(paramsBuf).Encode(¶ms)
if err != nil {
return err
}
rc, err := client.sendRequest("PATCH", url, "application/json", paramsBuf, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Upload a new language file. Creates necessary resources in your project.
func (client *Client) UploadCreate(project_id string, params *LocaleFileImportParams) (*LocaleFileImportWithSummary, error) {
retVal := new(LocaleFileImportWithSummary)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads", project_id)
paramsBuf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(paramsBuf)
ctype := writer.FormDataContentType()
if params.ConvertEmoji != nil {
err := writer.WriteField("convert_emoji", strconv.FormatBool(*params.ConvertEmoji))
if err != nil {
return err
}
}
if params.File != "" {
part, err := writer.CreateFormFile("file", filepath.Base(params.File))
if err != nil {
return err
}
file, err := os.Open(params.File)
if err != nil {
return err
}
_, err = io.Copy(part, file)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
}
if params.FileFormat != nil {
err := writer.WriteField("file_format", *params.FileFormat)
if err != nil {
return err
}
}
if params.LocaleID != nil {
err := writer.WriteField("locale_id", *params.LocaleID)
if err != nil {
return err
}
}
if params.SkipUnverification != nil {
err := writer.WriteField("skip_unverification", strconv.FormatBool(*params.SkipUnverification))
if err != nil {
return err
}
}
if params.SkipUploadTags != nil {
err := writer.WriteField("skip_upload_tags", strconv.FormatBool(*params.SkipUploadTags))
if err != nil {
return err
}
}
if params.Tags != nil {
err := writer.WriteField("tags", *params.Tags)
if err != nil {
return err
}
}
if params.UpdateTranslations != nil {
err := writer.WriteField("update_translations", strconv.FormatBool(*params.UpdateTranslations))
if err != nil {
return err
}
}
err := writer.WriteField("utf8", "✓")
writer.Close()
rc, err := client.sendRequest("POST", url, ctype, paramsBuf, 201)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// View details and summary for a single upload.
func (client *Client) UploadShow(project_id, id string) (*LocaleFileImportWithSummary, error) {
retVal := new(LocaleFileImportWithSummary)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/uploads/%s", project_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// Get details on a single version.
func (client *Client) VersionShow(project_id, translation_id, id string) (*TranslationVersionWithUser, error) {
retVal := new(TranslationVersionWithUser)
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions/%s", project_id, translation_id, id)
rc, err := client.sendRequest("GET", url, "", nil, 200)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
// List all versions for the given translation.
func (client *Client) VersionsList(project_id, translation_id string, page, perPage int) ([]*TranslationVersion, error) {
retVal := []*TranslationVersion{}
err := func() error {
url := fmt.Sprintf("/v2/projects/%s/translations/%s/versions", project_id, translation_id)
rc, err := client.sendRequestPaginated("GET", url, "", nil, 200, page, perPage)
if err != nil {
return err
}
defer rc.Close()
var reader io.Reader
if Debug {
reader = io.TeeReader(rc, os.Stderr)
} else {
reader = rc
}
return json.NewDecoder(reader).Decode(&retVal)
}()
return retVal, err
}
func GetUserAgent() string {
return "PhraseApp go (test)"
}
|
package physical
import (
"encoding/base64"
"errors"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
"golang.org/x/net/context"
)
const (
// Ideally, this prefix would match the "_" used in the file backend, but
// that prefix has special meaining in etcd. Specifically, it excludes those
// entries from directory listings.
EtcdNodeFilePrefix = "."
// The lock prefix can (and probably should) cause an entry to be excluded
// from diretory listings, so "_" works here.
EtcdNodeLockPrefix = "_"
// The delimiter is the same as the `-C` flag of etcdctl.
EtcdMachineDelimiter = ","
// The lock TTL matches the default that Consul API uses, 15 seconds.
EtcdLockTTL = 15 * time.Second
// The amount of time to wait between the semaphore key renewals
EtcdLockRenewInterval = 5 * time.Second
// The ammount of time to wait if a watch fails before trying again.
EtcdWatchRetryInterval = time.Second
// The number of times to re-try a failed watch before signaling that leadership is lost.
EtcdWatchRetryMax = 5
)
var (
EtcdSyncConfigError = errors.New("client setup failed: unable to parse etcd sync field in config")
EtcdSyncClusterError = errors.New("client setup failed: unable to sync etcd cluster")
EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
EtcdSemaphoreKeysEmptyError = errors.New("lock queue is empty")
EtcdLockHeldError = errors.New("lock already held")
EtcdLockNotHeldError = errors.New("lock not held")
EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition")
)
// errorIsMissingKey returns true if the given error is an etcd error with an
// error code corresponding to a missing key.
func errorIsMissingKey(err error) bool {
etcdErr, ok := err.(client.Error)
return ok && etcdErr.Code == client.ErrorCodeKeyNotFound
}
// EtcdBackend is a physical backend that stores data at specific
// prefix within Etcd. It is used for most production situations as
// it allows Vault to run on multiple machines in a highly-available manner.
type EtcdBackend struct {
path string
kAPI client.KeysAPI
permitPool *PermitPool
logger *log.Logger
}
// newEtcdBackend constructs a etcd backend using a given machine address.
func newEtcdBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
// Get the etcd path form the configuration.
path, ok := conf["path"]
if !ok {
path = "/vault"
}
// Ensure path is prefixed.
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
// Set a default machines list and check for an overriding address value.
machines := "http://128.0.0.1:2379"
if address, ok := conf["address"]; ok {
machines = address
}
machinesParsed := strings.Split(machines, EtcdMachineDelimiter)
// Verify that the machines are valid URLs
for _, machine := range machinesParsed {
u, urlErr := url.Parse(machine)
if urlErr != nil || u.Scheme == "" {
return nil, EtcdAddressError
}
}
// Create a new client from the supplied address and attempt to sync with the
// cluster.
var cTransport client.CancelableTransport
cert, hasCert := conf["tls_cert_file"]
key, hasKey := conf["tls_key_file"]
ca, hasCa := conf["tls_ca_file"]
if (hasCert && hasKey) || hasCa {
var transportErr error
tls := transport.TLSInfo{
CAFile: ca,
CertFile: cert,
KeyFile: key,
}
cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)
if transportErr != nil {
return nil, transportErr
}
} else {
cTransport = client.DefaultTransport
}
cfg := client.Config{
Endpoints: machinesParsed,
Transport: cTransport,
}
// Set credentials.
username := os.Getenv("ETCD_USERNAME")
if username == "" {
username, _ = conf["username"]
}
password := os.Getenv("ETCD_PASSWORD")
if password == "" {
password, _ = conf["password"]
}
if username != "" && password != "" {
cfg.Username = username
cfg.Password = password
}
c, err := client.New(cfg)
if err != nil {
return nil, err
}
// Should we sync the cluster state? There are three available options
// for our client library: don't sync (required for some proxies), sync
// once, or sync periodically with AutoSync. We currently support the
// first two.
sync, ok := conf["sync"]
if !ok {
sync = "yes"
}
switch sync {
case "yes", "true", "y", "1":
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
syncErr := c.Sync(ctx)
cancel()
if syncErr != nil {
return nil, fmt.Errorf("%s: %s", EtcdSyncClusterError, syncErr)
}
case "no", "false", "n", "0":
default:
return nil, fmt.Errorf("value of 'sync' could not be understood")
}
kAPI := client.NewKeysAPI(c)
// Setup the backend.
return &EtcdBackend{
path: path,
kAPI: kAPI,
permitPool: NewPermitPool(DefaultParallelOperations),
logger: logger,
}, nil
}
// Put is used to insert or update an entry.
func (c *EtcdBackend) Put(entry *Entry) error {
defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
value := base64.StdEncoding.EncodeToString(entry.Value)
c.permitPool.Acquire()
defer c.permitPool.Release()
_, err := c.kAPI.Set(context.Background(), c.nodePath(entry.Key), value, nil)
return err
}
// Get is used to fetch an entry.
func (c *EtcdBackend) Get(key string) (*Entry, error) {
defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
getOpts := &client.GetOptions{
Recursive: false,
Sort: false,
}
response, err := c.kAPI.Get(context.Background(), c.nodePath(key), getOpts)
if err != nil {
if errorIsMissingKey(err) {
return nil, nil
}
return nil, err
}
// Decode the stored value from base-64.
value, err := base64.StdEncoding.DecodeString(response.Node.Value)
if err != nil {
return nil, err
}
// Construct and return a new entry.
return &Entry{
Key: key,
Value: value,
}, nil
}
// Delete is used to permanently delete an entry.
func (c *EtcdBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
// Remove the key, non-recursively.
delOpts := &client.DeleteOptions{
Recursive: false,
}
_, err := c.kAPI.Delete(context.Background(), c.nodePath(key), delOpts)
if err != nil && !errorIsMissingKey(err) {
return err
}
return nil
}
// List is used to list all the keys under a given prefix, up to the next
// prefix.
func (c *EtcdBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
// Set a directory path from the given prefix.
path := c.nodePathDir(prefix)
c.permitPool.Acquire()
defer c.permitPool.Release()
// Get the directory, non-recursively, from etcd. If the directory is
// missing, we just return an empty list of contents.
getOpts := &client.GetOptions{
Recursive: false,
Sort: true,
}
response, err := c.kAPI.Get(context.Background(), path, getOpts)
if err != nil {
if errorIsMissingKey(err) {
return []string{}, nil
}
return nil, err
}
out := make([]string, len(response.Node.Nodes))
for i, node := range response.Node.Nodes {
// etcd keys include the full path, so let's trim the prefix directory
// path.
name := strings.TrimPrefix(node.Key, path)
// Check if this node is itself a directory. If it is, add a trailing
// slash; if it isn't remove the node file prefix.
if node.Dir {
out[i] = name + "/"
} else {
out[i] = name[1:]
}
}
return out, nil
}
// nodePath returns an etcd filepath based on the given key.
func (b *EtcdBackend) nodePath(key string) string {
return filepath.Join(b.path, filepath.Dir(key), EtcdNodeFilePrefix+filepath.Base(key))
}
// nodePathDir returns an etcd directory path based on the given key.
func (b *EtcdBackend) nodePathDir(key string) string {
return filepath.Join(b.path, key) + "/"
}
// nodePathLock returns an etcd directory path used specifically for semaphore
// indicies based on the given key.
func (b *EtcdBackend) nodePathLock(key string) string {
return filepath.Join(b.path, filepath.Dir(key), EtcdNodeLockPrefix+filepath.Base(key)+"/")
}
// Lock is used for mutual exclusion based on the given key.
func (c *EtcdBackend) LockWith(key, value string) (Lock, error) {
return &EtcdLock{
kAPI: c.kAPI,
value: value,
semaphoreDirKey: c.nodePathLock(key),
}, nil
}
// EtcdLock emplements a lock using and etcd backend.
type EtcdLock struct {
kAPI client.KeysAPI
value, semaphoreDirKey, semaphoreKey string
lock sync.Mutex
}
// addSemaphoreKey aquires a new ordered semaphore key.
func (c *EtcdLock) addSemaphoreKey() (string, uint64, error) {
// CreateInOrder is an atomic operation that can be used to enqueue a
// request onto a semaphore. In the rest of the comments, we refer to the
// resulting key as a "semaphore key".
// https://coreos.com/etcd/docs/2.0.8/api.html#atomically-creating-in-order-keys
opts := &client.CreateInOrderOptions{
TTL: EtcdLockTTL,
}
response, err := c.kAPI.CreateInOrder(context.Background(), c.semaphoreDirKey, c.value, opts)
if err != nil {
return "", 0, err
}
return response.Node.Key, response.Index, nil
}
// renewSemaphoreKey renews an existing semaphore key.
func (c *EtcdLock) renewSemaphoreKey() (string, uint64, error) {
setOpts := &client.SetOptions{
TTL: EtcdLockTTL,
PrevExist: client.PrevExist,
}
response, err := c.kAPI.Set(context.Background(), c.semaphoreKey, c.value, setOpts)
if err != nil {
return "", 0, err
}
return response.Node.Key, response.Index, nil
}
// getSemaphoreKey determines which semaphore key holder has aquired the lock
// and its value.
func (c *EtcdLock) getSemaphoreKey() (string, string, uint64, error) {
// Get the list of waiters in order to see if we are next.
getOpts := &client.GetOptions{
Recursive: false,
Sort: true,
}
response, err := c.kAPI.Get(context.Background(), c.semaphoreDirKey, getOpts)
if err != nil {
return "", "", 0, err
}
// Make sure the list isn't empty.
if response.Node.Nodes.Len() == 0 {
return "", "", response.Index, nil
}
return response.Node.Nodes[0].Key, response.Node.Nodes[0].Value, response.Index, nil
}
// isHeld determines if we are the current holders of the lock.
func (c *EtcdLock) isHeld() (bool, error) {
if c.semaphoreKey == "" {
return false, nil
}
// Get the key of the curren holder of the lock.
currentSemaphoreKey, _, _, err := c.getSemaphoreKey()
if err != nil {
return false, err
}
return c.semaphoreKey == currentSemaphoreKey, nil
}
// assertHeld determines whether or not we are the current holders of the lock
// and returns an EtcdLockNotHeldError if we are not.
func (c *EtcdLock) assertHeld() error {
held, err := c.isHeld()
if err != nil {
return err
}
// Check if we don't hold the lock.
if !held {
return EtcdLockNotHeldError
}
return nil
}
// assertNotHeld determines whether or not we are the current holders of the
// lock and returns an EtcdLockHeldError if we are.
func (c *EtcdLock) assertNotHeld() error {
held, err := c.isHeld()
if err != nil {
return err
}
// Check if we hold the lock.
if held {
return EtcdLockHeldError
}
return nil
}
// periodically renew our semaphore key so that it doesn't expire
func (c *EtcdLock) periodicallyRenewSemaphoreKey(stopCh chan struct{}) {
for {
select {
case <-time.After(EtcdLockRenewInterval):
c.renewSemaphoreKey()
case <-stopCh:
return
}
}
}
// watchForKeyRemoval continuously watches a single non-directory key starting
// from the provided etcd index and closes the provided channel when it's
// deleted, expires, or appears to be missing.
func (c *EtcdLock) watchForKeyRemoval(key string, etcdIndex uint64, closeCh chan struct{}) {
retries := EtcdWatchRetryMax
for {
// Start a non-recursive watch of the given key.
w := c.kAPI.Watcher(key, &client.WatcherOptions{AfterIndex: etcdIndex, Recursive: false})
response, err := w.Next(context.TODO())
if err != nil {
// If the key is just missing, we can exit the loop.
if errorIsMissingKey(err) {
break
}
// If the error is something else, there's nothing we can do but retry
// the watch. Check that we still have retries left.
retries -= 1
if retries == 0 {
break
}
// Sleep for a period of time to avoid slamming etcd.
time.Sleep(EtcdWatchRetryInterval)
continue
}
// Check if the key we are concerned with has been removed. If it has, we
// can exit the loop.
if response.Node.Key == key &&
(response.Action == "delete" || response.Action == "expire") {
break
}
// Update the etcd index.
etcdIndex = response.Index + 1
}
// Regardless of what happened, we need to close the close channel.
close(closeCh)
}
// Lock attempts to aquire the lock by waiting for a new semaphore key in etcd
// to become the first in the queue and will block until it is successful or
// it recieves a signal on the provided channel. The returned channel will be
// closed when the lock is lost, either by an explicit call to Unlock or by
// the associated semaphore key in etcd otherwise being deleted or expiring.
//
// If the lock is currently held by this instance of EtcdLock, Lock will
// return an EtcdLockHeldError error.
func (c *EtcdLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
// Get the local lock before interacting with etcd.
c.lock.Lock()
defer c.lock.Unlock()
// Check if the lock is already held.
if err := c.assertNotHeld(); err != nil {
return nil, err
}
// Add a new semaphore key that we will track.
semaphoreKey, _, err := c.addSemaphoreKey()
if err != nil {
return nil, err
}
c.semaphoreKey = semaphoreKey
// Get the current semaphore key.
currentSemaphoreKey, _, currentEtcdIndex, err := c.getSemaphoreKey()
if err != nil {
return nil, err
}
// Create an etcd-compatible boolean stop channel from the provided
// interface stop channel.
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
defer cancel()
// Create a channel to signal when we lose the semaphore key.
done := make(chan struct{})
defer func() {
if retErr != nil {
close(done)
}
}()
go c.periodicallyRenewSemaphoreKey(done)
// Loop until the we current semaphore key matches ours.
for semaphoreKey != currentSemaphoreKey {
var err error
// Start a watch of the entire lock directory
w := c.kAPI.Watcher(c.semaphoreDirKey, &client.WatcherOptions{AfterIndex: currentEtcdIndex, Recursive: true})
response, err := w.Next(ctx)
if err != nil {
// If the error is not an etcd error, we can assume it's a notification
// of the stop channel having closed. In this scenario, we also want to
// remove our semaphore key as we are no longer waiting to aquire the
// lock.
if _, ok := err.(*client.Error); !ok {
delOpts := &client.DeleteOptions{
Recursive: false,
}
_, err = c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts)
}
return nil, err
}
// Make sure the index we are waiting for has not been removed. If it has,
// this is an error and nothing else needs to be done.
if response.Node.Key == semaphoreKey &&
(response.Action == "delete" || response.Action == "expire") {
return nil, EtcdSemaphoreKeyRemovedError
}
// Get the current semaphore key and etcd index.
currentSemaphoreKey, _, currentEtcdIndex, err = c.getSemaphoreKey()
if err != nil {
return nil, err
}
}
go c.watchForKeyRemoval(c.semaphoreKey, currentEtcdIndex, done)
return done, nil
}
// Unlock releases the lock by deleting the associated semaphore key in etcd.
//
// If the lock is not currently held by this instance of EtcdLock, Unlock will
// return an EtcdLockNotHeldError error.
func (c *EtcdLock) Unlock() error {
// Get the local lock before interacting with etcd.
c.lock.Lock()
defer c.lock.Unlock()
// Check that the lock is held.
if err := c.assertHeld(); err != nil {
return err
}
// Delete our semaphore key.
delOpts := &client.DeleteOptions{
Recursive: false,
}
if _, err := c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts); err != nil {
return err
}
return nil
}
// Value checks whether or not the lock is held by any instance of EtcdLock,
// including this one, and returns the current value.
func (c *EtcdLock) Value() (bool, string, error) {
semaphoreKey, semaphoreValue, _, err := c.getSemaphoreKey()
if err != nil {
return false, "", err
}
if semaphoreKey == "" {
return false, "", nil
}
return true, semaphoreValue, nil
}
Fix default etcd address
Should be `127.0.0.1`, not `128.0.0.1`
package physical
import (
"encoding/base64"
"errors"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
"golang.org/x/net/context"
)
const (
// Ideally, this prefix would match the "_" used in the file backend, but
// that prefix has special meaining in etcd. Specifically, it excludes those
// entries from directory listings.
EtcdNodeFilePrefix = "."
// The lock prefix can (and probably should) cause an entry to be excluded
// from diretory listings, so "_" works here.
EtcdNodeLockPrefix = "_"
// The delimiter is the same as the `-C` flag of etcdctl.
EtcdMachineDelimiter = ","
// The lock TTL matches the default that Consul API uses, 15 seconds.
EtcdLockTTL = 15 * time.Second
// The amount of time to wait between the semaphore key renewals
EtcdLockRenewInterval = 5 * time.Second
// The ammount of time to wait if a watch fails before trying again.
EtcdWatchRetryInterval = time.Second
// The number of times to re-try a failed watch before signaling that leadership is lost.
EtcdWatchRetryMax = 5
)
var (
EtcdSyncConfigError = errors.New("client setup failed: unable to parse etcd sync field in config")
EtcdSyncClusterError = errors.New("client setup failed: unable to sync etcd cluster")
EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
EtcdSemaphoreKeysEmptyError = errors.New("lock queue is empty")
EtcdLockHeldError = errors.New("lock already held")
EtcdLockNotHeldError = errors.New("lock not held")
EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition")
)
// errorIsMissingKey returns true if the given error is an etcd error with an
// error code corresponding to a missing key.
func errorIsMissingKey(err error) bool {
etcdErr, ok := err.(client.Error)
return ok && etcdErr.Code == client.ErrorCodeKeyNotFound
}
// EtcdBackend is a physical backend that stores data at specific
// prefix within Etcd. It is used for most production situations as
// it allows Vault to run on multiple machines in a highly-available manner.
type EtcdBackend struct {
path string
kAPI client.KeysAPI
permitPool *PermitPool
logger *log.Logger
}
// newEtcdBackend constructs a etcd backend using a given machine address.
func newEtcdBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
// Get the etcd path form the configuration.
path, ok := conf["path"]
if !ok {
path = "/vault"
}
// Ensure path is prefixed.
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
// Set a default machines list and check for an overriding address value.
machines := "http://127.0.0.1:2379"
if address, ok := conf["address"]; ok {
machines = address
}
machinesParsed := strings.Split(machines, EtcdMachineDelimiter)
// Verify that the machines are valid URLs
for _, machine := range machinesParsed {
u, urlErr := url.Parse(machine)
if urlErr != nil || u.Scheme == "" {
return nil, EtcdAddressError
}
}
// Create a new client from the supplied address and attempt to sync with the
// cluster.
var cTransport client.CancelableTransport
cert, hasCert := conf["tls_cert_file"]
key, hasKey := conf["tls_key_file"]
ca, hasCa := conf["tls_ca_file"]
if (hasCert && hasKey) || hasCa {
var transportErr error
tls := transport.TLSInfo{
CAFile: ca,
CertFile: cert,
KeyFile: key,
}
cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)
if transportErr != nil {
return nil, transportErr
}
} else {
cTransport = client.DefaultTransport
}
cfg := client.Config{
Endpoints: machinesParsed,
Transport: cTransport,
}
// Set credentials.
username := os.Getenv("ETCD_USERNAME")
if username == "" {
username, _ = conf["username"]
}
password := os.Getenv("ETCD_PASSWORD")
if password == "" {
password, _ = conf["password"]
}
if username != "" && password != "" {
cfg.Username = username
cfg.Password = password
}
c, err := client.New(cfg)
if err != nil {
return nil, err
}
// Should we sync the cluster state? There are three available options
// for our client library: don't sync (required for some proxies), sync
// once, or sync periodically with AutoSync. We currently support the
// first two.
sync, ok := conf["sync"]
if !ok {
sync = "yes"
}
switch sync {
case "yes", "true", "y", "1":
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
syncErr := c.Sync(ctx)
cancel()
if syncErr != nil {
return nil, fmt.Errorf("%s: %s", EtcdSyncClusterError, syncErr)
}
case "no", "false", "n", "0":
default:
return nil, fmt.Errorf("value of 'sync' could not be understood")
}
kAPI := client.NewKeysAPI(c)
// Setup the backend.
return &EtcdBackend{
path: path,
kAPI: kAPI,
permitPool: NewPermitPool(DefaultParallelOperations),
logger: logger,
}, nil
}
// Put is used to insert or update an entry.
func (c *EtcdBackend) Put(entry *Entry) error {
defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
value := base64.StdEncoding.EncodeToString(entry.Value)
c.permitPool.Acquire()
defer c.permitPool.Release()
_, err := c.kAPI.Set(context.Background(), c.nodePath(entry.Key), value, nil)
return err
}
// Get is used to fetch an entry.
func (c *EtcdBackend) Get(key string) (*Entry, error) {
defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
getOpts := &client.GetOptions{
Recursive: false,
Sort: false,
}
response, err := c.kAPI.Get(context.Background(), c.nodePath(key), getOpts)
if err != nil {
if errorIsMissingKey(err) {
return nil, nil
}
return nil, err
}
// Decode the stored value from base-64.
value, err := base64.StdEncoding.DecodeString(response.Node.Value)
if err != nil {
return nil, err
}
// Construct and return a new entry.
return &Entry{
Key: key,
Value: value,
}, nil
}
// Delete is used to permanently delete an entry.
func (c *EtcdBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
// Remove the key, non-recursively.
delOpts := &client.DeleteOptions{
Recursive: false,
}
_, err := c.kAPI.Delete(context.Background(), c.nodePath(key), delOpts)
if err != nil && !errorIsMissingKey(err) {
return err
}
return nil
}
// List is used to list all the keys under a given prefix, up to the next
// prefix.
func (c *EtcdBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
// Set a directory path from the given prefix.
path := c.nodePathDir(prefix)
c.permitPool.Acquire()
defer c.permitPool.Release()
// Get the directory, non-recursively, from etcd. If the directory is
// missing, we just return an empty list of contents.
getOpts := &client.GetOptions{
Recursive: false,
Sort: true,
}
response, err := c.kAPI.Get(context.Background(), path, getOpts)
if err != nil {
if errorIsMissingKey(err) {
return []string{}, nil
}
return nil, err
}
out := make([]string, len(response.Node.Nodes))
for i, node := range response.Node.Nodes {
// etcd keys include the full path, so let's trim the prefix directory
// path.
name := strings.TrimPrefix(node.Key, path)
// Check if this node is itself a directory. If it is, add a trailing
// slash; if it isn't remove the node file prefix.
if node.Dir {
out[i] = name + "/"
} else {
out[i] = name[1:]
}
}
return out, nil
}
// nodePath returns an etcd filepath based on the given key.
func (b *EtcdBackend) nodePath(key string) string {
return filepath.Join(b.path, filepath.Dir(key), EtcdNodeFilePrefix+filepath.Base(key))
}
// nodePathDir returns an etcd directory path based on the given key.
func (b *EtcdBackend) nodePathDir(key string) string {
return filepath.Join(b.path, key) + "/"
}
// nodePathLock returns an etcd directory path used specifically for semaphore
// indicies based on the given key.
func (b *EtcdBackend) nodePathLock(key string) string {
return filepath.Join(b.path, filepath.Dir(key), EtcdNodeLockPrefix+filepath.Base(key)+"/")
}
// Lock is used for mutual exclusion based on the given key.
func (c *EtcdBackend) LockWith(key, value string) (Lock, error) {
return &EtcdLock{
kAPI: c.kAPI,
value: value,
semaphoreDirKey: c.nodePathLock(key),
}, nil
}
// EtcdLock emplements a lock using and etcd backend.
type EtcdLock struct {
kAPI client.KeysAPI
value, semaphoreDirKey, semaphoreKey string
lock sync.Mutex
}
// addSemaphoreKey aquires a new ordered semaphore key.
func (c *EtcdLock) addSemaphoreKey() (string, uint64, error) {
// CreateInOrder is an atomic operation that can be used to enqueue a
// request onto a semaphore. In the rest of the comments, we refer to the
// resulting key as a "semaphore key".
// https://coreos.com/etcd/docs/2.0.8/api.html#atomically-creating-in-order-keys
opts := &client.CreateInOrderOptions{
TTL: EtcdLockTTL,
}
response, err := c.kAPI.CreateInOrder(context.Background(), c.semaphoreDirKey, c.value, opts)
if err != nil {
return "", 0, err
}
return response.Node.Key, response.Index, nil
}
// renewSemaphoreKey renews an existing semaphore key.
func (c *EtcdLock) renewSemaphoreKey() (string, uint64, error) {
setOpts := &client.SetOptions{
TTL: EtcdLockTTL,
PrevExist: client.PrevExist,
}
response, err := c.kAPI.Set(context.Background(), c.semaphoreKey, c.value, setOpts)
if err != nil {
return "", 0, err
}
return response.Node.Key, response.Index, nil
}
// getSemaphoreKey determines which semaphore key holder has aquired the lock
// and its value.
func (c *EtcdLock) getSemaphoreKey() (string, string, uint64, error) {
// Get the list of waiters in order to see if we are next.
getOpts := &client.GetOptions{
Recursive: false,
Sort: true,
}
response, err := c.kAPI.Get(context.Background(), c.semaphoreDirKey, getOpts)
if err != nil {
return "", "", 0, err
}
// Make sure the list isn't empty.
if response.Node.Nodes.Len() == 0 {
return "", "", response.Index, nil
}
return response.Node.Nodes[0].Key, response.Node.Nodes[0].Value, response.Index, nil
}
// isHeld determines if we are the current holders of the lock.
func (c *EtcdLock) isHeld() (bool, error) {
if c.semaphoreKey == "" {
return false, nil
}
// Get the key of the curren holder of the lock.
currentSemaphoreKey, _, _, err := c.getSemaphoreKey()
if err != nil {
return false, err
}
return c.semaphoreKey == currentSemaphoreKey, nil
}
// assertHeld determines whether or not we are the current holders of the lock
// and returns an EtcdLockNotHeldError if we are not.
func (c *EtcdLock) assertHeld() error {
held, err := c.isHeld()
if err != nil {
return err
}
// Check if we don't hold the lock.
if !held {
return EtcdLockNotHeldError
}
return nil
}
// assertNotHeld determines whether or not we are the current holders of the
// lock and returns an EtcdLockHeldError if we are.
func (c *EtcdLock) assertNotHeld() error {
held, err := c.isHeld()
if err != nil {
return err
}
// Check if we hold the lock.
if held {
return EtcdLockHeldError
}
return nil
}
// periodically renew our semaphore key so that it doesn't expire
func (c *EtcdLock) periodicallyRenewSemaphoreKey(stopCh chan struct{}) {
for {
select {
case <-time.After(EtcdLockRenewInterval):
c.renewSemaphoreKey()
case <-stopCh:
return
}
}
}
// watchForKeyRemoval continuously watches a single non-directory key starting
// from the provided etcd index and closes the provided channel when it's
// deleted, expires, or appears to be missing.
func (c *EtcdLock) watchForKeyRemoval(key string, etcdIndex uint64, closeCh chan struct{}) {
retries := EtcdWatchRetryMax
for {
// Start a non-recursive watch of the given key.
w := c.kAPI.Watcher(key, &client.WatcherOptions{AfterIndex: etcdIndex, Recursive: false})
response, err := w.Next(context.TODO())
if err != nil {
// If the key is just missing, we can exit the loop.
if errorIsMissingKey(err) {
break
}
// If the error is something else, there's nothing we can do but retry
// the watch. Check that we still have retries left.
retries -= 1
if retries == 0 {
break
}
// Sleep for a period of time to avoid slamming etcd.
time.Sleep(EtcdWatchRetryInterval)
continue
}
// Check if the key we are concerned with has been removed. If it has, we
// can exit the loop.
if response.Node.Key == key &&
(response.Action == "delete" || response.Action == "expire") {
break
}
// Update the etcd index.
etcdIndex = response.Index + 1
}
// Regardless of what happened, we need to close the close channel.
close(closeCh)
}
// Lock attempts to aquire the lock by waiting for a new semaphore key in etcd
// to become the first in the queue and will block until it is successful or
// it recieves a signal on the provided channel. The returned channel will be
// closed when the lock is lost, either by an explicit call to Unlock or by
// the associated semaphore key in etcd otherwise being deleted or expiring.
//
// If the lock is currently held by this instance of EtcdLock, Lock will
// return an EtcdLockHeldError error.
func (c *EtcdLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
// Get the local lock before interacting with etcd.
c.lock.Lock()
defer c.lock.Unlock()
// Check if the lock is already held.
if err := c.assertNotHeld(); err != nil {
return nil, err
}
// Add a new semaphore key that we will track.
semaphoreKey, _, err := c.addSemaphoreKey()
if err != nil {
return nil, err
}
c.semaphoreKey = semaphoreKey
// Get the current semaphore key.
currentSemaphoreKey, _, currentEtcdIndex, err := c.getSemaphoreKey()
if err != nil {
return nil, err
}
// Create an etcd-compatible boolean stop channel from the provided
// interface stop channel.
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
defer cancel()
// Create a channel to signal when we lose the semaphore key.
done := make(chan struct{})
defer func() {
if retErr != nil {
close(done)
}
}()
go c.periodicallyRenewSemaphoreKey(done)
// Loop until the we current semaphore key matches ours.
for semaphoreKey != currentSemaphoreKey {
var err error
// Start a watch of the entire lock directory
w := c.kAPI.Watcher(c.semaphoreDirKey, &client.WatcherOptions{AfterIndex: currentEtcdIndex, Recursive: true})
response, err := w.Next(ctx)
if err != nil {
// If the error is not an etcd error, we can assume it's a notification
// of the stop channel having closed. In this scenario, we also want to
// remove our semaphore key as we are no longer waiting to aquire the
// lock.
if _, ok := err.(*client.Error); !ok {
delOpts := &client.DeleteOptions{
Recursive: false,
}
_, err = c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts)
}
return nil, err
}
// Make sure the index we are waiting for has not been removed. If it has,
// this is an error and nothing else needs to be done.
if response.Node.Key == semaphoreKey &&
(response.Action == "delete" || response.Action == "expire") {
return nil, EtcdSemaphoreKeyRemovedError
}
// Get the current semaphore key and etcd index.
currentSemaphoreKey, _, currentEtcdIndex, err = c.getSemaphoreKey()
if err != nil {
return nil, err
}
}
go c.watchForKeyRemoval(c.semaphoreKey, currentEtcdIndex, done)
return done, nil
}
// Unlock releases the lock by deleting the associated semaphore key in etcd.
//
// If the lock is not currently held by this instance of EtcdLock, Unlock will
// return an EtcdLockNotHeldError error.
func (c *EtcdLock) Unlock() error {
// Get the local lock before interacting with etcd.
c.lock.Lock()
defer c.lock.Unlock()
// Check that the lock is held.
if err := c.assertHeld(); err != nil {
return err
}
// Delete our semaphore key.
delOpts := &client.DeleteOptions{
Recursive: false,
}
if _, err := c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts); err != nil {
return err
}
return nil
}
// Value checks whether or not the lock is held by any instance of EtcdLock,
// including this one, and returns the current value.
func (c *EtcdLock) Value() (bool, string, error) {
semaphoreKey, semaphoreValue, _, err := c.getSemaphoreKey()
if err != nil {
return false, "", err
}
if semaphoreKey == "" {
return false, "", nil
}
return true, semaphoreValue, nil
}
|
Fixed missing endpoint.
|
package dropbox
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/ayoisaiah/stellar-photos-server/config"
"github.com/ayoisaiah/stellar-photos-server/unsplash"
"github.com/ayoisaiah/stellar-photos-server/utils"
)
// Dropbox application key
type key struct {
DropboxKey string `json:"dropbox_key"`
}
// SendDropboxKey sends the application key to the client on request to avoid
// exposing it in the extension code
func SendDropboxKey(w http.ResponseWriter, r *http.Request) error {
dropboxKey := config.Conf.Dropbox.Key
d := key{
DropboxKey: dropboxKey,
}
bytes, err := json.Marshal(d)
if err != nil {
return err
}
return utils.JsonResponse(w, bytes)
}
// SaveToDropbox saves the requested photo to the current user's Dropbox account
func SaveToDropbox(w http.ResponseWriter, r *http.Request) error {
values, err := utils.GetURLQueryParams(r.URL.String())
if err != nil {
return err
}
token := values.Get("token")
id := values.Get("id")
url := values.Get("url")
err = unsplash.TrackPhotoDownload(id)
if err != nil {
return err
}
v := fmt.Sprintf("Bearer %s", token)
requestBody, err := json.Marshal(map[string]string{
"path": fmt.Sprintf("/photo-%s.jpg", id),
"url": url,
})
if err != nil {
return err
}
endpoint := "https://api.dropboxapi.com/2/files/save_url"
request, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(requestBody))
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Authorization", v)
client := &http.Client{Timeout: 10 * time.Second}
response, err := client.Do(request)
if err != nil {
if os.IsTimeout(err) {
return utils.NewHTTPError(err, http.StatusRequestTimeout, "Request to external API timed out")
}
return err
}
defer response.Body.Close()
_, err = utils.CheckForErrors(response)
if err != nil {
return err
}
w.WriteHeader(http.StatusOK)
return nil
}
Ensure save url job is completed before responding
package dropbox
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/ayoisaiah/stellar-photos-server/config"
"github.com/ayoisaiah/stellar-photos-server/unsplash"
"github.com/ayoisaiah/stellar-photos-server/utils"
)
// Dropbox application key
type key struct {
DropboxKey string `json:"dropbox_key"`
}
type SaveURLResponse struct {
Tag string `json:".tag"`
AsyncJobID string `json:"async_job_id"`
}
// SendDropboxKey sends the application key to the client on request to avoid
// exposing it in the extension code
func SendDropboxKey(w http.ResponseWriter, r *http.Request) error {
dropboxKey := config.Conf.Dropbox.Key
d := key{
DropboxKey: dropboxKey,
}
bytes, err := json.Marshal(d)
if err != nil {
return err
}
return utils.JsonResponse(w, bytes)
}
func checkJobStatus(jobId string, token string) error {
v := fmt.Sprintf("Bearer %s", token)
requestBody, err := json.Marshal(map[string]string{
"async_job_id": jobId,
})
if err != nil {
return err
}
endpoint := "https://api.dropboxapi.com/2/files/save_url/check_job_status"
request, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(requestBody))
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Authorization", v)
response, err := utils.Client.Do(request)
if err != nil {
if os.IsTimeout(err) {
return utils.NewHTTPError(err, http.StatusRequestTimeout, "Request to external API timed out")
}
return err
}
defer response.Body.Close()
b, err := utils.CheckForErrors(response)
if err != nil {
return err
}
resp := &SaveURLResponse{}
err = json.Unmarshal(b, resp)
if err != nil {
return err
}
if resp.Tag == "complete" {
return nil
} else if resp.Tag == "in_progress" {
time.Sleep(1 * time.Second)
return checkJobStatus(jobId, token)
}
return fmt.Errorf("Job failed. Response from Dropbox: %s", string(b))
}
// SaveToDropbox saves the requested photo to the current user's Dropbox account
func SaveToDropbox(w http.ResponseWriter, r *http.Request) error {
values, err := utils.GetURLQueryParams(r.URL.String())
if err != nil {
return err
}
token := values.Get("token")
id := values.Get("id")
url := values.Get("url")
err = unsplash.TrackPhotoDownload(id)
if err != nil {
return err
}
v := fmt.Sprintf("Bearer %s", token)
requestBody, err := json.Marshal(map[string]string{
"path": fmt.Sprintf("/photo-%s.jpg", id),
"url": url,
})
if err != nil {
return err
}
endpoint := "https://api.dropboxapi.com/2/files/save_url"
request, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(requestBody))
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Authorization", v)
response, err := utils.Client.Do(request)
if err != nil {
if os.IsTimeout(err) {
return utils.NewHTTPError(err, http.StatusRequestTimeout, "Request to external API timed out")
}
return err
}
defer response.Body.Close()
b, err := utils.CheckForErrors(response)
if err != nil {
return err
}
resp := &SaveURLResponse{}
err = json.Unmarshal(b, resp)
if err != nil {
return err
}
if resp.Tag == "async_job_id" {
err = checkJobStatus(resp.AsyncJobID, token)
if err != nil {
return err
}
w.WriteHeader(http.StatusOK)
return nil
} else if resp.Tag == "complete" {
w.WriteHeader(http.StatusOK)
return nil
}
return fmt.Errorf("Save URL error. Response from Dropbox: %s", string(b))
}
|
package typesys
import (
"fmt"
"strings"
"bitbucket.org/yyuu/bs/core"
)
type TypeTable struct {
charSize int
shortSize int
intSize int
longSize int
ptrSize int
table map[string]core.IType
}
func NewTypeTable(charSize, shortSize, intSize, longSize, ptrSize int) *TypeTable {
loc := core.NewLocation("[builtin:typesys]", 0, 0)
tt := TypeTable { charSize, shortSize, intSize, longSize, ptrSize, make(map[string]core.IType) }
tt.PutType(NewVoidTypeRef(loc), NewVoidType())
tt.PutType(NewCharTypeRef(loc), NewCharType(charSize))
tt.PutType(NewShortTypeRef(loc), NewShortType(shortSize))
tt.PutType(NewIntTypeRef(loc), NewIntType(intSize))
tt.PutType(NewLongTypeRef(loc), NewLongType(longSize))
tt.PutType(NewUnsignedCharTypeRef(loc), NewUnsignedCharType(charSize))
tt.PutType(NewUnsignedShortTypeRef(loc), NewUnsignedShortType(shortSize))
tt.PutType(NewUnsignedIntTypeRef(loc), NewUnsignedIntType(intSize))
tt.PutType(NewUnsignedLongTypeRef(loc), NewUnsignedLongType(longSize))
return &tt
}
func NewTypeTableILP32() *TypeTable {
return NewTypeTable(1, 2, 4, 4, 4)
}
func NewTypeTableILP64() *TypeTable {
return NewTypeTable(1, 2, 8, 8, 8)
}
func NewTypeTableLP64() *TypeTable {
return NewTypeTable(1, 2, 4, 8, 8)
}
func NewTypeTableLLP64() *TypeTable {
return NewTypeTable(1, 2, 4, 4, 8)
}
func NewTypeTableFor(platform string) *TypeTable {
switch platform {
case "x86-linux": return NewTypeTableILP32()
default: panic(fmt.Errorf("unknown platform: %s", platform))
}
}
func (self *TypeTable) PutType(ref core.ITypeRef, t core.IType) {
self.table[ref.String()] = t
}
func (self TypeTable) GetType(ref core.ITypeRef) core.IType {
t := self.table[ref.String()]
if t == nil {
switch typed := ref.(type) {
case *UserTypeRef: {
panic(fmt.Errorf("undefined type: %s", typed.GetName()))
}
case *PointerTypeRef: {
t = NewPointerType(self.ptrSize, self.GetType(typed.GetBaseType()))
self.PutType(typed, t)
}
case *ArrayTypeRef: {
t = NewArrayType(self.GetType(typed.GetBaseType()), typed.GetLength(), self.ptrSize)
self.PutType(typed, t)
}
case *FunctionTypeRef: {
params := typed.GetParams()
paramRefs := params.GetParamDescs()
paramTypes := make([]core.IType, len(paramRefs))
for i := range paramRefs {
paramTypes[i] = self.GetParamType(paramRefs[i])
}
t = NewFunctionType(
self.GetType(typed.GetReturnType()),
NewParamTypes(typed.GetLocation(), paramTypes, params.IsVararg()),
)
self.PutType(typed, t)
}
default: {
panic(fmt.Errorf("unregistered type: %s", ref.String()))
}
}
}
return t
}
func (self TypeTable) GetCharSize() int {
return self.charSize
}
func (self TypeTable) GetShortSize() int {
return self.shortSize
}
func (self TypeTable) GetIntSize() int {
return self.intSize
}
func (self TypeTable) GetLongSize() int {
return self.longSize
}
func (self TypeTable) GetPointerSize() int {
return self.ptrSize
}
func (self TypeTable) IsTypeTable() bool {
return true
}
func (self TypeTable) String() string {
xs := make([]string, len(self.table))
for key, _ := range self.table {
xs = append(xs, fmt.Sprintf("%s", key))
}
return fmt.Sprintf("(%s)", strings.Join(xs, "\n"))
}
func (self TypeTable) IsDefined(ref core.ITypeRef) bool {
_, ok := self.table[ref.String()]
return ok
}
// array is really a pointer on parameters.
func (self TypeTable) GetParamType(ref core.ITypeRef) core.IType {
t := self.GetType(ref)
if t == nil {
panic(fmt.Errorf("unknown parameter type: %s", ref))
}
if t.IsArray() {
return NewPointerType(self.ptrSize, t.(*ArrayType).GetBaseType())
} else {
return t
}
}
func (self TypeTable) NumTypes() int {
return len(self.table)
}
func (self TypeTable) GetTypes() []core.IType {
ts := []core.IType { }
for _, t := range self.table {
ts = append(ts, t)
}
return ts
}
func (self *TypeTable) SemanticCheck(errorHandler *core.ErrorHandler) {
ts := self.GetTypes()
for i := range ts {
t := ts[i]
if t.IsCompositeType() {
ct, ok := t.(core.ICompositeType)
if ! ok {
errorHandler.Panicln("not a composite type")
}
self.checkCompositeVoidMembers(ct, errorHandler)
self.checkDuplicatedMembers(ct, errorHandler)
} else {
if t.IsArray() {
at, ok := t.(*ArrayType)
if ! ok {
errorHandler.Panicln("not an array type")
}
self.checkArrayVoidMembers(at, errorHandler)
}
}
self.checkRecursiveDefinition(t, errorHandler)
}
}
func (self TypeTable) checkCompositeVoidMembers(t core.ICompositeType, errorHandler *core.ErrorHandler) {
members := t.GetMembers()
for i := range members {
slot := members[i]
if slot.GetType().IsVoid() {
errorHandler.Errorln("struct/union cannot contain void")
}
}
}
func (self TypeTable) checkArrayVoidMembers(t *ArrayType, errorHandler *core.ErrorHandler) {
if t.GetBaseType().IsVoid() {
errorHandler.Errorln("array cannot contain void")
}
}
func (self TypeTable) checkDuplicatedMembers(t core.ICompositeType, errorHandler *core.ErrorHandler) {
seen := make(map[string]core.ISlot)
members := t.GetMembers()
for i := range members {
slot := members[i]
name := slot.GetName()
_, found := seen[name]
if found {
errorHandler.Errorf("%s has duplicated member: %s", t.GetName(), name)
}
seen[name] = slot
}
}
func (self TypeTable) checkRecursiveDefinition(t core.IType, errorHandler *core.ErrorHandler) {
errorHandler.Warnln("TypeTable#checkRecursiveDefinition is not implemented yet")
}
Remove verbose type checks
package typesys
import (
"fmt"
"strings"
"bitbucket.org/yyuu/bs/core"
)
type TypeTable struct {
charSize int
shortSize int
intSize int
longSize int
ptrSize int
table map[string]core.IType
}
func NewTypeTable(charSize, shortSize, intSize, longSize, ptrSize int) *TypeTable {
loc := core.NewLocation("[builtin:typesys]", 0, 0)
tt := TypeTable { charSize, shortSize, intSize, longSize, ptrSize, make(map[string]core.IType) }
tt.PutType(NewVoidTypeRef(loc), NewVoidType())
tt.PutType(NewCharTypeRef(loc), NewCharType(charSize))
tt.PutType(NewShortTypeRef(loc), NewShortType(shortSize))
tt.PutType(NewIntTypeRef(loc), NewIntType(intSize))
tt.PutType(NewLongTypeRef(loc), NewLongType(longSize))
tt.PutType(NewUnsignedCharTypeRef(loc), NewUnsignedCharType(charSize))
tt.PutType(NewUnsignedShortTypeRef(loc), NewUnsignedShortType(shortSize))
tt.PutType(NewUnsignedIntTypeRef(loc), NewUnsignedIntType(intSize))
tt.PutType(NewUnsignedLongTypeRef(loc), NewUnsignedLongType(longSize))
return &tt
}
func NewTypeTableILP32() *TypeTable {
return NewTypeTable(1, 2, 4, 4, 4)
}
func NewTypeTableILP64() *TypeTable {
return NewTypeTable(1, 2, 8, 8, 8)
}
func NewTypeTableLP64() *TypeTable {
return NewTypeTable(1, 2, 4, 8, 8)
}
func NewTypeTableLLP64() *TypeTable {
return NewTypeTable(1, 2, 4, 4, 8)
}
func NewTypeTableFor(platform string) *TypeTable {
switch platform {
case "x86-linux": return NewTypeTableILP32()
default: panic(fmt.Errorf("unknown platform: %s", platform))
}
}
func (self *TypeTable) PutType(ref core.ITypeRef, t core.IType) {
self.table[ref.String()] = t
}
func (self TypeTable) GetType(ref core.ITypeRef) core.IType {
t := self.table[ref.String()]
if t == nil {
switch typed := ref.(type) {
case *UserTypeRef: {
panic(fmt.Errorf("undefined type: %s", typed.GetName()))
}
case *PointerTypeRef: {
t = NewPointerType(self.ptrSize, self.GetType(typed.GetBaseType()))
self.PutType(typed, t)
}
case *ArrayTypeRef: {
t = NewArrayType(self.GetType(typed.GetBaseType()), typed.GetLength(), self.ptrSize)
self.PutType(typed, t)
}
case *FunctionTypeRef: {
params := typed.GetParams()
paramRefs := params.GetParamDescs()
paramTypes := make([]core.IType, len(paramRefs))
for i := range paramRefs {
paramTypes[i] = self.GetParamType(paramRefs[i])
}
t = NewFunctionType(
self.GetType(typed.GetReturnType()),
NewParamTypes(typed.GetLocation(), paramTypes, params.IsVararg()),
)
self.PutType(typed, t)
}
default: {
panic(fmt.Errorf("unregistered type: %s", ref.String()))
}
}
}
return t
}
func (self TypeTable) GetCharSize() int {
return self.charSize
}
func (self TypeTable) GetShortSize() int {
return self.shortSize
}
func (self TypeTable) GetIntSize() int {
return self.intSize
}
func (self TypeTable) GetLongSize() int {
return self.longSize
}
func (self TypeTable) GetPointerSize() int {
return self.ptrSize
}
func (self TypeTable) IsTypeTable() bool {
return true
}
func (self TypeTable) String() string {
xs := make([]string, len(self.table))
for key, _ := range self.table {
xs = append(xs, fmt.Sprintf("%s", key))
}
return fmt.Sprintf("(%s)", strings.Join(xs, "\n"))
}
func (self TypeTable) IsDefined(ref core.ITypeRef) bool {
_, ok := self.table[ref.String()]
return ok
}
// array is really a pointer on parameters.
func (self TypeTable) GetParamType(ref core.ITypeRef) core.IType {
t := self.GetType(ref)
if t == nil {
panic(fmt.Errorf("unknown parameter type: %s", ref))
}
if t.IsArray() {
return NewPointerType(self.ptrSize, t.(*ArrayType).GetBaseType())
} else {
return t
}
}
func (self TypeTable) NumTypes() int {
return len(self.table)
}
func (self TypeTable) GetTypes() []core.IType {
ts := []core.IType { }
for _, t := range self.table {
ts = append(ts, t)
}
return ts
}
func (self *TypeTable) SemanticCheck(errorHandler *core.ErrorHandler) {
ts := self.GetTypes()
for i := range ts {
t := ts[i]
if t.IsCompositeType() {
self.checkCompositeVoidMembers(t.(core.ICompositeType), errorHandler)
self.checkDuplicatedMembers(t.(core.ICompositeType), errorHandler)
} else {
if t.IsArray() {
self.checkArrayVoidMembers(t.(*ArrayType), errorHandler)
}
}
self.checkRecursiveDefinition(t, errorHandler)
}
}
func (self TypeTable) checkCompositeVoidMembers(t core.ICompositeType, errorHandler *core.ErrorHandler) {
members := t.GetMembers()
for i := range members {
slot := members[i]
if slot.GetType().IsVoid() {
errorHandler.Errorln("struct/union cannot contain void")
}
}
}
func (self TypeTable) checkArrayVoidMembers(t *ArrayType, errorHandler *core.ErrorHandler) {
if t.GetBaseType().IsVoid() {
errorHandler.Errorln("array cannot contain void")
}
}
func (self TypeTable) checkDuplicatedMembers(t core.ICompositeType, errorHandler *core.ErrorHandler) {
seen := make(map[string]core.ISlot)
members := t.GetMembers()
for i := range members {
slot := members[i]
name := slot.GetName()
_, found := seen[name]
if found {
errorHandler.Errorf("%s has duplicated member: %s", t.GetName(), name)
}
seen[name] = slot
}
}
func (self TypeTable) checkRecursiveDefinition(t core.IType, errorHandler *core.ErrorHandler) {
errorHandler.Warnln("TypeTable#checkRecursiveDefinition is not implemented yet")
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memfs_test
import (
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/samples/memfs"
"github.com/jacobsa/gcsfuse/timeutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
"golang.org/x/net/context"
)
func TestMemFS(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
func currentUid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
uid, err := strconv.ParseUint(user.Uid, 10, 32)
if err != nil {
panic(err)
}
return uint32(uid)
}
func currentGid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
gid, err := strconv.ParseUint(user.Gid, 10, 32)
if err != nil {
panic(err)
}
return uint32(gid)
}
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec)
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
type MemFSTest struct {
clock timeutil.SimulatedClock
mfs *fuse.MountedFileSystem
// Files to close when tearing down. Nil entries are skipped.
toClose []io.Closer
}
var _ SetUpInterface = &MemFSTest{}
var _ TearDownInterface = &MemFSTest{}
func init() { RegisterTestSuite(&MemFSTest{}) }
func (t *MemFSTest) SetUp(ti *TestInfo) {
var err error
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Now())
// Set up a temporary directory for mounting.
mountPoint, err := ioutil.TempDir("", "memfs_test")
if err != nil {
panic("ioutil.TempDir: " + err.Error())
}
// Mount a file system.
fs := memfs.NewMemFS(&t.clock)
if t.mfs, err = fuse.Mount(mountPoint, fs); err != nil {
panic("Mount: " + err.Error())
}
if err = t.mfs.WaitForReady(context.Background()); err != nil {
panic("MountedFileSystem.WaitForReady: " + err.Error())
}
}
func (t *MemFSTest) TearDown() {
// Close any files we opened.
for _, c := range t.toClose {
if c == nil {
continue
}
err := c.Close()
if err != nil {
panic(err)
}
}
// Unmount the file system. Try again on "resource busy" errors.
delay := 10 * time.Millisecond
for {
err := t.mfs.Unmount()
if err == nil {
break
}
if strings.Contains(err.Error(), "resource busy") {
log.Println("Resource busy error while unmounting; trying again")
time.Sleep(delay)
delay = time.Duration(1.3 * float64(delay))
continue
}
panic("MountedFileSystem.Unmount: " + err.Error())
}
if err := t.mfs.Join(context.Background()); err != nil {
panic("MountedFileSystem.Join: " + err.Error())
}
}
////////////////////////////////////////////////////////////////////////
// Test functions
////////////////////////////////////////////////////////////////////////
func (t *MemFSTest) ContentsOfEmptyFileSystem() {
entries, err := ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Mkdir_OneLevel() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
dirName := path.Join(t.mfs.Dir(), "dir")
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Create a directory within the root.
createTime := t.clock.Now()
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(dirName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Check the root's mtime.
fi, err = os.Stat(t.mfs.Dir())
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(dirName)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the root.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_TwoLevels() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
// Create a directory within the root.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Create a child of that directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent/dir"))
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Check the parent's mtime.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent/dir"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_AlreadyExists() {
var err error
dirName := path.Join(t.mfs.Dir(), "dir")
// Create the directory once.
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Attempt to create it again.
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("exists")))
}
func (t *MemFSTest) Mkdir_IntermediateIsFile() {
var err error
// Create a file.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte{}, 0700)
AssertEq(nil, err)
// Attempt to create a directory within the file.
dirName := path.Join(fileName, "dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not a directory")))
}
func (t *MemFSTest) Mkdir_IntermediateIsNonExistent() {
var err error
// Attempt to create a sub-directory of a non-existent sub-directory.
dirName := path.Join(t.mfs.Dir(), "foo/dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Mkdir_PermissionDenied() {
var err error
// Create a directory within the root without write permissions.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0500)
AssertEq(nil, err)
// Attempt to create a child of that directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("permission denied")))
}
func (t *MemFSTest) CreateNewFile_InRoot() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
// Write a file.
fileName := path.Join(t.mfs.Dir(), "foo")
const contents = "Hello\x00world"
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte(contents), 0400)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat it.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len(contents), fi.Size())
ExpectEq(0400, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len(contents), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read it back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq(contents, string(slice))
}
func (t *MemFSTest) CreateNewFile_InSubDir() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
// Create a sub-dir.
dirName := path.Join(t.mfs.Dir(), "dir")
err = os.Mkdir(dirName, 0700)
AssertEq(nil, err)
// Write a file.
fileName := path.Join(dirName, "foo")
const contents = "Hello\x00world"
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte(contents), 0400)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat it.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len(contents), fi.Size())
ExpectEq(0400, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len(contents), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read it back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq(contents, string(slice))
}
func (t *MemFSTest) ModifyExistingFile_InRoot() {
var err error
var n int
var fi os.FileInfo
var stat *syscall.Stat_t
// Write a file.
fileName := path.Join(t.mfs.Dir(), "foo")
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte("Jello, world!"), 0600)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the file and modify it.
f, err := os.OpenFile(fileName, os.O_WRONLY, 0400)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
modifyTime := t.clock.Now()
n, err = f.WriteAt([]byte("H"), 0)
AssertEq(nil, err)
AssertEq(1, n)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the file.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len("Hello, world!"), fi.Size())
ExpectEq(0600, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(modifyTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len("Hello, world!"), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(modifyTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read the file back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("Hello, world!", string(slice))
}
func (t *MemFSTest) ModifyExistingFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_Exists() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NotAFile() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NonExistent() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_StillOpen() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) Rmdir_NonEmpty() {
var err error
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Attempt to remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not empty")))
}
func (t *MemFSTest) Rmdir_Empty() {
var err error
var entries []os.FileInfo
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Remove the leaf.
rmTime := t.clock.Now()
err = os.Remove(path.Join(t.mfs.Dir(), "foo/bar"))
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// There should be nothing left in the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Check the parent's mtime.
fi, err := os.Stat(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(rmTime))
// Remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
// Now the root directory should be empty, too.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Rmdir_NonExistent() {
err := os.Remove(path.Join(t.mfs.Dir(), "blah"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Rmdir_OpenedForReading() {
var err error
// Create a directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the directory for reading.
f, err := os.Open(path.Join(t.mfs.Dir(), "dir"))
defer func() {
if f != nil {
ExpectEq(nil, f.Close())
}
}()
AssertEq(nil, err)
// Remove the directory.
err = os.Remove(path.Join(t.mfs.Dir(), "dir"))
AssertEq(nil, err)
// Create a new directory, with the same name even, and add some contents
// within it.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/foo"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/bar"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/baz"), 0700)
AssertEq(nil, err)
// We should still be able to stat the open file handle. It should show up as
// unlinked.
fi, err := f.Stat()
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.ModTime().Sub(createTime))
// TODO(jacobsa): Re-enable this assertion if the following issue is fixed:
// https://github.com/bazillion/fuse/issues/66
// ExpectEq(0, fi.Sys().(*syscall.Stat_t).Nlink)
// Attempt to read from the directory. This should succeed even though it has
// been unlinked, and we shouldn't see any junk from the new directory.
entries, err := f.Readdir(0)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) CaseSensitive() {
var err error
// Create a file.
err = ioutil.WriteFile(path.Join(t.mfs.Dir(), "file"), []byte{}, 0400)
AssertEq(nil, err)
// Create a directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0400)
AssertEq(nil, err)
// Attempt to stat with the wrong case.
names := []string{
"FILE",
"File",
"filE",
"DIR",
"Dir",
"dIr",
}
for _, name := range names {
_, err = os.Stat(path.Join(t.mfs.Dir(), name))
AssertNe(nil, err, "Name: %s", name)
AssertThat(err, Error(HasSubstr("no such file or directory")))
}
}
func (t *MemFSTest) FileReadsAndWrites() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) WriteOverlapsEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 4 bytes long.
err = f.Truncate(4)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteStartsAtEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 2 bytes long.
err = f.Truncate(2)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteStartsPastEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteAtDoesntChangeOffset_NotAppendMode() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 16 bytes long.
err = f.Truncate(16)
AssertEq(nil, err)
// Seek to offset 4.
_, err = f.Seek(4, 0)
AssertEq(nil, err)
// Write the range [10, 14).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// We should still be at offset 4.
offset, err := getFileOffset(f)
AssertEq(nil, err)
ExpectEq(4, offset)
}
func (t *MemFSTest) WriteAtDoesntChangeOffset_AppendMode() {
var err error
var n int
// Create a file in append mode.
f, err := os.OpenFile(
path.Join(t.mfs.Dir(), "foo"),
os.O_RDWR|os.O_APPEND|os.O_CREATE,
0600)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 16 bytes long.
err = f.Truncate(16)
AssertEq(nil, err)
// Seek to offset 4.
_, err = f.Seek(4, 0)
AssertEq(nil, err)
// Write the range [10, 14).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// We should still be at offset 4.
offset, err := getFileOffset(f)
AssertEq(nil, err)
ExpectEq(4, offset)
}
func (t *MemFSTest) AppendMode() {
var err error
var n int
var off int64
buf := make([]byte, 1024)
// Create a file with some contents.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte("Jello, "), 0600)
AssertEq(nil, err)
// Open the file in append mode.
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_APPEND, 0600)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Seek to somewhere silly and then write.
off, err = f.Seek(2, 0)
AssertEq(nil, err)
AssertEq(2, off)
n, err = f.Write([]byte("world!"))
AssertEq(nil, err)
AssertEq(6, n)
// The offset should have been updated to point at the end of the file.
off, err = getFileOffset(f)
AssertEq(nil, err)
ExpectEq(13, off)
// A random write should still work, without updating the offset.
n, err = f.WriteAt([]byte("H"), 0)
AssertEq(nil, err)
AssertEq(1, n)
off, err = getFileOffset(f)
AssertEq(nil, err)
ExpectEq(13, off)
// Read back the contents of the file, which should be correct even though we
// seeked to a silly place before writing the world part.
n, err = f.ReadAt(buf, 0)
AssertEq(io.EOF, err)
ExpectEq("Hello, world!", string(buf[:n]))
}
func (t *MemFSTest) ReadsPastEndOfFile() {
var err error
var n int
buf := make([]byte, 1024)
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Give it some contents.
n, err = f.Write([]byte("taco"))
AssertEq(nil, err)
AssertEq(4, n)
// Read a range overlapping EOF.
n, err = f.ReadAt(buf[:4], 2)
AssertEq(io.EOF, err)
ExpectEq(2, n)
ExpectEq("co", string(buf[:n]))
// Read a range starting at EOF.
n, err = f.ReadAt(buf[:4], 4)
AssertEq(io.EOF, err)
ExpectEq(0, n)
ExpectEq("", string(buf[:n]))
// Read a range starting past EOF.
n, err = f.ReadAt(buf[:4], 100)
AssertEq(io.EOF, err)
ExpectEq(0, n)
ExpectEq("", string(buf[:n]))
}
func (t *MemFSTest) Truncate_Smaller() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(2)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(2, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("ta", string(contents))
}
func (t *MemFSTest) Truncate_SameSize() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(4)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(4, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("taco", string(contents))
}
func (t *MemFSTest) Truncate_Larger() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(6)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(6, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("taco\x00\x00", string(contents))
}
func (t *MemFSTest) Chmod() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte(""), 0600)
AssertEq(nil, err)
// Chmod it.
err = os.Chmod(fileName, 0754)
AssertEq(nil, err)
// Stat it.
fi, err := os.Stat(fileName)
AssertEq(nil, err)
ExpectEq(os.FileMode(0754), fi.Mode())
}
func (t *MemFSTest) Chtimes() {
AssertTrue(false, "TODO")
}
MemFSTest.Chtimes
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memfs_test
import (
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/samples/memfs"
"github.com/jacobsa/gcsfuse/timeutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
"golang.org/x/net/context"
)
func TestMemFS(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
func currentUid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
uid, err := strconv.ParseUint(user.Uid, 10, 32)
if err != nil {
panic(err)
}
return uint32(uid)
}
func currentGid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
gid, err := strconv.ParseUint(user.Gid, 10, 32)
if err != nil {
panic(err)
}
return uint32(gid)
}
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec)
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
type MemFSTest struct {
clock timeutil.SimulatedClock
mfs *fuse.MountedFileSystem
// Files to close when tearing down. Nil entries are skipped.
toClose []io.Closer
}
var _ SetUpInterface = &MemFSTest{}
var _ TearDownInterface = &MemFSTest{}
func init() { RegisterTestSuite(&MemFSTest{}) }
func (t *MemFSTest) SetUp(ti *TestInfo) {
var err error
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Now())
// Set up a temporary directory for mounting.
mountPoint, err := ioutil.TempDir("", "memfs_test")
if err != nil {
panic("ioutil.TempDir: " + err.Error())
}
// Mount a file system.
fs := memfs.NewMemFS(&t.clock)
if t.mfs, err = fuse.Mount(mountPoint, fs); err != nil {
panic("Mount: " + err.Error())
}
if err = t.mfs.WaitForReady(context.Background()); err != nil {
panic("MountedFileSystem.WaitForReady: " + err.Error())
}
}
func (t *MemFSTest) TearDown() {
// Close any files we opened.
for _, c := range t.toClose {
if c == nil {
continue
}
err := c.Close()
if err != nil {
panic(err)
}
}
// Unmount the file system. Try again on "resource busy" errors.
delay := 10 * time.Millisecond
for {
err := t.mfs.Unmount()
if err == nil {
break
}
if strings.Contains(err.Error(), "resource busy") {
log.Println("Resource busy error while unmounting; trying again")
time.Sleep(delay)
delay = time.Duration(1.3 * float64(delay))
continue
}
panic("MountedFileSystem.Unmount: " + err.Error())
}
if err := t.mfs.Join(context.Background()); err != nil {
panic("MountedFileSystem.Join: " + err.Error())
}
}
////////////////////////////////////////////////////////////////////////
// Test functions
////////////////////////////////////////////////////////////////////////
func (t *MemFSTest) ContentsOfEmptyFileSystem() {
entries, err := ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Mkdir_OneLevel() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
dirName := path.Join(t.mfs.Dir(), "dir")
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Create a directory within the root.
createTime := t.clock.Now()
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(dirName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Check the root's mtime.
fi, err = os.Stat(t.mfs.Dir())
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(dirName)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the root.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_TwoLevels() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
// Create a directory within the root.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Create a child of that directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent/dir"))
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Check the parent's mtime.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent/dir"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_AlreadyExists() {
var err error
dirName := path.Join(t.mfs.Dir(), "dir")
// Create the directory once.
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Attempt to create it again.
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("exists")))
}
func (t *MemFSTest) Mkdir_IntermediateIsFile() {
var err error
// Create a file.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte{}, 0700)
AssertEq(nil, err)
// Attempt to create a directory within the file.
dirName := path.Join(fileName, "dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not a directory")))
}
func (t *MemFSTest) Mkdir_IntermediateIsNonExistent() {
var err error
// Attempt to create a sub-directory of a non-existent sub-directory.
dirName := path.Join(t.mfs.Dir(), "foo/dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Mkdir_PermissionDenied() {
var err error
// Create a directory within the root without write permissions.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0500)
AssertEq(nil, err)
// Attempt to create a child of that directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("permission denied")))
}
func (t *MemFSTest) CreateNewFile_InRoot() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
// Write a file.
fileName := path.Join(t.mfs.Dir(), "foo")
const contents = "Hello\x00world"
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte(contents), 0400)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat it.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len(contents), fi.Size())
ExpectEq(0400, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len(contents), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read it back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq(contents, string(slice))
}
func (t *MemFSTest) CreateNewFile_InSubDir() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
// Create a sub-dir.
dirName := path.Join(t.mfs.Dir(), "dir")
err = os.Mkdir(dirName, 0700)
AssertEq(nil, err)
// Write a file.
fileName := path.Join(dirName, "foo")
const contents = "Hello\x00world"
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte(contents), 0400)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat it.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len(contents), fi.Size())
ExpectEq(0400, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len(contents), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read it back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq(contents, string(slice))
}
func (t *MemFSTest) ModifyExistingFile_InRoot() {
var err error
var n int
var fi os.FileInfo
var stat *syscall.Stat_t
// Write a file.
fileName := path.Join(t.mfs.Dir(), "foo")
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte("Jello, world!"), 0600)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the file and modify it.
f, err := os.OpenFile(fileName, os.O_WRONLY, 0400)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
modifyTime := t.clock.Now()
n, err = f.WriteAt([]byte("H"), 0)
AssertEq(nil, err)
AssertEq(1, n)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the file.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len("Hello, world!"), fi.Size())
ExpectEq(0600, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(modifyTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len("Hello, world!"), stat.Size)
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(modifyTime))
ExpectEq(0, timespecToTime(stat.Birthtimespec).Sub(createTime))
// Read the file back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("Hello, world!", string(slice))
}
func (t *MemFSTest) ModifyExistingFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_Exists() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NotAFile() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NonExistent() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_StillOpen() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) Rmdir_NonEmpty() {
var err error
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Attempt to remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not empty")))
}
func (t *MemFSTest) Rmdir_Empty() {
var err error
var entries []os.FileInfo
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Remove the leaf.
rmTime := t.clock.Now()
err = os.Remove(path.Join(t.mfs.Dir(), "foo/bar"))
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// There should be nothing left in the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Check the parent's mtime.
fi, err := os.Stat(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(rmTime))
// Remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
// Now the root directory should be empty, too.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Rmdir_NonExistent() {
err := os.Remove(path.Join(t.mfs.Dir(), "blah"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Rmdir_OpenedForReading() {
var err error
// Create a directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the directory for reading.
f, err := os.Open(path.Join(t.mfs.Dir(), "dir"))
defer func() {
if f != nil {
ExpectEq(nil, f.Close())
}
}()
AssertEq(nil, err)
// Remove the directory.
err = os.Remove(path.Join(t.mfs.Dir(), "dir"))
AssertEq(nil, err)
// Create a new directory, with the same name even, and add some contents
// within it.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/foo"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/bar"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/baz"), 0700)
AssertEq(nil, err)
// We should still be able to stat the open file handle. It should show up as
// unlinked.
fi, err := f.Stat()
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.ModTime().Sub(createTime))
// TODO(jacobsa): Re-enable this assertion if the following issue is fixed:
// https://github.com/bazillion/fuse/issues/66
// ExpectEq(0, fi.Sys().(*syscall.Stat_t).Nlink)
// Attempt to read from the directory. This should succeed even though it has
// been unlinked, and we shouldn't see any junk from the new directory.
entries, err := f.Readdir(0)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) CaseSensitive() {
var err error
// Create a file.
err = ioutil.WriteFile(path.Join(t.mfs.Dir(), "file"), []byte{}, 0400)
AssertEq(nil, err)
// Create a directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0400)
AssertEq(nil, err)
// Attempt to stat with the wrong case.
names := []string{
"FILE",
"File",
"filE",
"DIR",
"Dir",
"dIr",
}
for _, name := range names {
_, err = os.Stat(path.Join(t.mfs.Dir(), name))
AssertNe(nil, err, "Name: %s", name)
AssertThat(err, Error(HasSubstr("no such file or directory")))
}
}
func (t *MemFSTest) FileReadsAndWrites() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) WriteOverlapsEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 4 bytes long.
err = f.Truncate(4)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteStartsAtEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 2 bytes long.
err = f.Truncate(2)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteStartsPastEndOfFile() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Write the range [2, 6).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// Read the full contents of the file.
contents, err := ioutil.ReadAll(f)
AssertEq(nil, err)
ExpectEq("\x00\x00taco", string(contents))
}
func (t *MemFSTest) WriteAtDoesntChangeOffset_NotAppendMode() {
var err error
var n int
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 16 bytes long.
err = f.Truncate(16)
AssertEq(nil, err)
// Seek to offset 4.
_, err = f.Seek(4, 0)
AssertEq(nil, err)
// Write the range [10, 14).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// We should still be at offset 4.
offset, err := getFileOffset(f)
AssertEq(nil, err)
ExpectEq(4, offset)
}
func (t *MemFSTest) WriteAtDoesntChangeOffset_AppendMode() {
var err error
var n int
// Create a file in append mode.
f, err := os.OpenFile(
path.Join(t.mfs.Dir(), "foo"),
os.O_RDWR|os.O_APPEND|os.O_CREATE,
0600)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Make it 16 bytes long.
err = f.Truncate(16)
AssertEq(nil, err)
// Seek to offset 4.
_, err = f.Seek(4, 0)
AssertEq(nil, err)
// Write the range [10, 14).
n, err = f.WriteAt([]byte("taco"), 2)
AssertEq(nil, err)
AssertEq(4, n)
// We should still be at offset 4.
offset, err := getFileOffset(f)
AssertEq(nil, err)
ExpectEq(4, offset)
}
func (t *MemFSTest) AppendMode() {
var err error
var n int
var off int64
buf := make([]byte, 1024)
// Create a file with some contents.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte("Jello, "), 0600)
AssertEq(nil, err)
// Open the file in append mode.
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_APPEND, 0600)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Seek to somewhere silly and then write.
off, err = f.Seek(2, 0)
AssertEq(nil, err)
AssertEq(2, off)
n, err = f.Write([]byte("world!"))
AssertEq(nil, err)
AssertEq(6, n)
// The offset should have been updated to point at the end of the file.
off, err = getFileOffset(f)
AssertEq(nil, err)
ExpectEq(13, off)
// A random write should still work, without updating the offset.
n, err = f.WriteAt([]byte("H"), 0)
AssertEq(nil, err)
AssertEq(1, n)
off, err = getFileOffset(f)
AssertEq(nil, err)
ExpectEq(13, off)
// Read back the contents of the file, which should be correct even though we
// seeked to a silly place before writing the world part.
n, err = f.ReadAt(buf, 0)
AssertEq(io.EOF, err)
ExpectEq("Hello, world!", string(buf[:n]))
}
func (t *MemFSTest) ReadsPastEndOfFile() {
var err error
var n int
buf := make([]byte, 1024)
// Create a file.
f, err := os.Create(path.Join(t.mfs.Dir(), "foo"))
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Give it some contents.
n, err = f.Write([]byte("taco"))
AssertEq(nil, err)
AssertEq(4, n)
// Read a range overlapping EOF.
n, err = f.ReadAt(buf[:4], 2)
AssertEq(io.EOF, err)
ExpectEq(2, n)
ExpectEq("co", string(buf[:n]))
// Read a range starting at EOF.
n, err = f.ReadAt(buf[:4], 4)
AssertEq(io.EOF, err)
ExpectEq(0, n)
ExpectEq("", string(buf[:n]))
// Read a range starting past EOF.
n, err = f.ReadAt(buf[:4], 100)
AssertEq(io.EOF, err)
ExpectEq(0, n)
ExpectEq("", string(buf[:n]))
}
func (t *MemFSTest) Truncate_Smaller() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(2)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(2, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("ta", string(contents))
}
func (t *MemFSTest) Truncate_SameSize() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(4)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(4, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("taco", string(contents))
}
func (t *MemFSTest) Truncate_Larger() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte("taco"), 0600)
AssertEq(nil, err)
// Open it for modification.
f, err := os.OpenFile(fileName, os.O_RDWR, 0)
t.toClose = append(t.toClose, f)
AssertEq(nil, err)
// Truncate it.
err = f.Truncate(6)
AssertEq(nil, err)
// Stat it.
fi, err := f.Stat()
AssertEq(nil, err)
ExpectEq(6, fi.Size())
// Read the contents.
contents, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq("taco\x00\x00", string(contents))
}
func (t *MemFSTest) Chmod() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte(""), 0600)
AssertEq(nil, err)
// Chmod it.
err = os.Chmod(fileName, 0754)
AssertEq(nil, err)
// Stat it.
fi, err := os.Stat(fileName)
AssertEq(nil, err)
ExpectEq(os.FileMode(0754), fi.Mode())
}
func (t *MemFSTest) Chtimes() {
var err error
fileName := path.Join(t.mfs.Dir(), "foo")
// Create a file.
err = ioutil.WriteFile(fileName, []byte(""), 0600)
AssertEq(nil, err)
// Chtimes it.
expectedMtime := time.Now().Add(123 * time.Millisecond)
err = os.Chtimes(fileName, time.Time{}, expectedMtime)
AssertEq(nil, err)
// Stat it.
fi, err := os.Stat(fileName)
AssertEq(nil, err)
ExpectEq(0, fi.ModTime().Sub(expectedMtime))
}
|
// Package nodejs implements the "nodejs" runtime.
package nodejs
import (
"strings"
"github.com/apex/apex/function"
)
const (
// Runtime for inference.
Runtime = "nodejs6.10"
)
func init() {
function.RegisterPlugin(Runtime, &Plugin{})
}
// Plugin implementation.
type Plugin struct{}
// Open adds nodejs defaults.
func (p *Plugin) Open(fn *function.Function) error {
if !strings.HasPrefix(fn.Runtime, "nodejs") {
return nil
}
if fn.Runtime == "nodejs" {
fn.Runtime = Runtime
}
if fn.Handler == "" {
fn.Handler = "index.handle"
}
return nil
}
fix node runtime
// Package nodejs implements the "nodejs" runtime.
package nodejs
import (
"strings"
"github.com/apex/apex/function"
)
const (
// Runtime for inference.
Runtime = "nodejs6.10"
)
func init() {
function.RegisterPlugin("nodejs", &Plugin{})
}
// Plugin implementation.
type Plugin struct{}
// Open adds nodejs defaults.
func (p *Plugin) Open(fn *function.Function) error {
if !strings.HasPrefix(fn.Runtime, "nodejs") {
return nil
}
if fn.Runtime == "nodejs" {
fn.Runtime = Runtime
}
if fn.Handler == "" {
fn.Handler = "index.handle"
}
return nil
}
|
package client
import (
"bytes"
"encoding/json"
"fmt"
ninchat "github.com/ninchat/ninchat-go"
)
func asError(x interface{}) error {
if x == nil {
return nil
}
if err, ok := x.(error); ok {
return err
} else {
return fmt.Errorf("%v", x)
}
}
type JSON struct {
x json.RawMessage
}
func NewJSON(s string) *JSON { return &JSON{json.RawMessage(s)} }
type Strings struct {
a []string
}
func NewStrings() *Strings { return new(Strings) }
func (ss *Strings) Append(val string) { ss.a = append(ss.a, val) }
func (ss *Strings) Get(i int) string { return ss.a[i] }
func (ss *Strings) Length() int { return len(ss.a) }
func (ss *Strings) String() string { return fmt.Sprint(ss.a) }
type Props struct {
m map[string]interface{}
}
func NewProps() *Props { return &Props{make(map[string]interface{})} }
func (ps *Props) String() string { return fmt.Sprint(ps.m) }
func (ps *Props) SetBool(key string, val bool) { ps.m[key] = val }
func (ps *Props) SetInt(key string, val int) { ps.m[key] = val }
func (ps *Props) SetFloat(key string, val float64) { ps.m[key] = val }
func (ps *Props) SetString(key string, val string) { ps.m[key] = val }
func (ps *Props) SetStringArray(key string, ref *Strings) { ps.m[key] = ref.a }
func (ps *Props) SetObject(key string, ref *Props) { ps.m[key] = ref.m }
func (ps *Props) SetJSON(key string, ref *JSON) { ps.m[key] = ref.x }
func (ps *Props) GetBool(key string) (val bool, err error) {
if x, found := ps.m[key]; found {
if b, ok := x.(bool); ok {
val = b
} else {
err = fmt.Errorf("Prop type: %q is not a bool", key)
}
}
return
}
func (ps *Props) GetInt(key string) (val int, err error) {
if x, found := ps.m[key]; found {
if f, ok := x.(float64); ok {
val = int(f)
} else {
err = fmt.Errorf("Prop type: %q is not a number", key)
}
}
return
}
func (ps *Props) GetFloat(key string) (val float64, err error) {
if x, found := ps.m[key]; found {
if f, ok := x.(float64); ok {
val = f
} else {
err = fmt.Errorf("Prop type: %q is not a number", key)
}
}
return
}
func (ps *Props) GetString(key string) (val string, err error) {
if x, found := ps.m[key]; found {
if s, ok := x.(string); ok {
val = s
} else {
err = fmt.Errorf("Prop type: %q is not a string", key)
}
}
return
}
func (ps *Props) GetStringArray(key string) (ref *Strings, err error) {
if x, found := ps.m[key]; found {
if xs, ok := x.([]interface{}); ok {
ref = &Strings{make([]string, len(xs))}
for i, x := range xs {
if s, ok := x.(string); ok {
ref.a[i] = s
} else {
err = fmt.Errorf("Prop type: %q is not a string array", key)
return
}
}
} else {
err = fmt.Errorf("Prop type: %q is not an array", key)
}
}
return
}
func (ps *Props) GetObject(key string) (ref *Props, err error) {
if x, found := ps.m[key]; found {
if m, ok := x.(map[string]interface{}); ok {
ref = &Props{m}
} else {
err = fmt.Errorf("Prop type: %q is not an object", key)
}
}
return
}
type PropVisitor interface {
VisitBool(string, bool) error
VisitNumber(string, float64) error
VisitString(string, string) error
VisitStringArray(string, *Strings) error
VisitObject(string, *Props) error
}
func (ps *Props) Accept(callback PropVisitor) (err error) {
var (
array *Strings
object *Props
)
for k, x := range ps.m {
switch v := x.(type) {
case bool:
err = callback.VisitBool(k, v)
case float64:
err = callback.VisitNumber(k, v)
case string:
err = callback.VisitString(k, v)
case []interface{}:
array, err = ps.GetStringArray(k)
if err == nil {
err = callback.VisitStringArray(k, array)
}
case map[string]interface{}:
object, err = ps.GetObject(k)
if err == nil {
err = callback.VisitObject(k, object)
}
}
if err != nil {
break
}
}
return
}
type Payload struct {
a []ninchat.Frame
}
func NewPayload() *Payload { return new(Payload) }
func (p *Payload) Append(blob []byte) { p.a = append(p.a, blob) }
func (p *Payload) Get(i int) []byte { return p.a[i] }
func (p *Payload) Length() int { return len(p.a) }
func (p *Payload) String() string { return fmt.Sprint(p.a) }
type SessionEventHandler interface {
OnSessionEvent(params *Props)
}
type EventHandler interface {
OnEvent(params *Props, payload *Payload, lastReply bool)
}
type CloseHandler interface {
OnClose()
}
type ConnStateHandler interface {
OnConnState(state string)
}
type ConnActiveHandler interface {
OnConnActive()
}
type LogHandler interface {
OnLog(msg string)
}
type Session struct {
s ninchat.Session
}
func NewSession() *Session {
return new(Session)
}
func (s *Session) SetOnSessionEvent(callback SessionEventHandler) {
s.s.OnSessionEvent = func(e *ninchat.Event) {
callback.OnSessionEvent(&Props{e.Params})
}
}
func (s *Session) SetOnEvent(callback EventHandler) {
s.s.OnEvent = func(e *ninchat.Event) {
callback.OnEvent(&Props{e.Params}, &Payload{e.Payload}, e.LastReply)
}
}
func (s *Session) SetOnClose(callback CloseHandler) {
s.s.OnClose = callback.OnClose
}
func (s *Session) SetOnConnState(callback ConnStateHandler) {
s.s.OnConnState = callback.OnConnState
}
func (s *Session) SetOnConnActive(callback ConnActiveHandler) {
s.s.OnConnActive = callback.OnConnActive
}
func (s *Session) SetOnLog(callback LogHandler) {
s.s.OnLog = func(fragments ...interface{}) {
var msg bytes.Buffer
for i, x := range fragments {
fmt.Fprint(&msg, x)
if i < len(fragments)-1 {
msg.WriteString(" ")
}
}
callback.OnLog(msg.String())
}
}
func (s *Session) SetAddress(address string) {
s.s.Address = address
}
func (s *Session) SetParams(params *Props) (err error) {
defer func() {
err = asError(recover())
}()
s.s.SetParams(params.m)
return
}
func (s *Session) Open() (err error) {
defer func() {
err = asError(recover())
}()
s.s.Open()
return
}
func (s *Session) Close() {
s.s.Close()
}
func (s *Session) Send(params *Props, payload *Payload) (actionId int64, err error) {
defer func() {
if x := recover(); x != nil {
err = asError(x)
}
}()
action := &ninchat.Action{
Params: params.m,
}
if payload != nil {
action.Payload = payload.a
}
s.s.OnLog(fmt.Sprintf("send: payload: %#v", action.Payload))
err = s.s.Send(action)
if err == nil {
if x, found := action.Params["action_id"]; found && x != nil {
actionId = x.(int64)
}
}
return
}
type Event ninchat.Event
func (e *Event) GetParams() *Props { return &Props{e.Params} }
func (e *Event) GetPayload() *Payload { return &Payload{e.Payload} }
func (e *Event) String() string { return fmt.Sprint(*e) }
type Events struct {
a []*ninchat.Event
}
func (es *Events) Get(i int) *Event { return (*Event)(es.a[i]) }
func (es *Events) Length() int { return len(es.a) }
func (es *Events) String() string { return fmt.Sprint(es.a) }
type Caller struct {
c ninchat.Caller
}
func NewCaller() *Caller {
return new(Caller)
}
func (c *Caller) SetAddress(address string) {
c.c.Address = address
}
func (c *Caller) Call(params *Props, payload *Payload) (events *Events, err error) {
defer func() {
if x := recover(); x != nil {
err = asError(x)
}
}()
action := &ninchat.Action{
Params: params.m,
}
if payload != nil {
action.Payload = payload.a
}
es, err := c.c.Call(action)
if err == nil {
events = &Events{es}
}
return
}
mobile: payload: append copies
package client
import (
"bytes"
"encoding/json"
"fmt"
ninchat "github.com/ninchat/ninchat-go"
)
func asError(x interface{}) error {
if x == nil {
return nil
}
if err, ok := x.(error); ok {
return err
} else {
return fmt.Errorf("%v", x)
}
}
type JSON struct {
x json.RawMessage
}
func NewJSON(s string) *JSON { return &JSON{json.RawMessage(s)} }
type Strings struct {
a []string
}
func NewStrings() *Strings { return new(Strings) }
func (ss *Strings) Append(val string) { ss.a = append(ss.a, val) }
func (ss *Strings) Get(i int) string { return ss.a[i] }
func (ss *Strings) Length() int { return len(ss.a) }
func (ss *Strings) String() string { return fmt.Sprint(ss.a) }
type Props struct {
m map[string]interface{}
}
func NewProps() *Props { return &Props{make(map[string]interface{})} }
func (ps *Props) String() string { return fmt.Sprint(ps.m) }
func (ps *Props) SetBool(key string, val bool) { ps.m[key] = val }
func (ps *Props) SetInt(key string, val int) { ps.m[key] = val }
func (ps *Props) SetFloat(key string, val float64) { ps.m[key] = val }
func (ps *Props) SetString(key string, val string) { ps.m[key] = val }
func (ps *Props) SetStringArray(key string, ref *Strings) { ps.m[key] = ref.a }
func (ps *Props) SetObject(key string, ref *Props) { ps.m[key] = ref.m }
func (ps *Props) SetJSON(key string, ref *JSON) { ps.m[key] = ref.x }
func (ps *Props) GetBool(key string) (val bool, err error) {
if x, found := ps.m[key]; found {
if b, ok := x.(bool); ok {
val = b
} else {
err = fmt.Errorf("Prop type: %q is not a bool", key)
}
}
return
}
func (ps *Props) GetInt(key string) (val int, err error) {
if x, found := ps.m[key]; found {
if f, ok := x.(float64); ok {
val = int(f)
} else {
err = fmt.Errorf("Prop type: %q is not a number", key)
}
}
return
}
func (ps *Props) GetFloat(key string) (val float64, err error) {
if x, found := ps.m[key]; found {
if f, ok := x.(float64); ok {
val = f
} else {
err = fmt.Errorf("Prop type: %q is not a number", key)
}
}
return
}
func (ps *Props) GetString(key string) (val string, err error) {
if x, found := ps.m[key]; found {
if s, ok := x.(string); ok {
val = s
} else {
err = fmt.Errorf("Prop type: %q is not a string", key)
}
}
return
}
func (ps *Props) GetStringArray(key string) (ref *Strings, err error) {
if x, found := ps.m[key]; found {
if xs, ok := x.([]interface{}); ok {
ref = &Strings{make([]string, len(xs))}
for i, x := range xs {
if s, ok := x.(string); ok {
ref.a[i] = s
} else {
err = fmt.Errorf("Prop type: %q is not a string array", key)
return
}
}
} else {
err = fmt.Errorf("Prop type: %q is not an array", key)
}
}
return
}
func (ps *Props) GetObject(key string) (ref *Props, err error) {
if x, found := ps.m[key]; found {
if m, ok := x.(map[string]interface{}); ok {
ref = &Props{m}
} else {
err = fmt.Errorf("Prop type: %q is not an object", key)
}
}
return
}
type PropVisitor interface {
VisitBool(string, bool) error
VisitNumber(string, float64) error
VisitString(string, string) error
VisitStringArray(string, *Strings) error
VisitObject(string, *Props) error
}
func (ps *Props) Accept(callback PropVisitor) (err error) {
var (
array *Strings
object *Props
)
for k, x := range ps.m {
switch v := x.(type) {
case bool:
err = callback.VisitBool(k, v)
case float64:
err = callback.VisitNumber(k, v)
case string:
err = callback.VisitString(k, v)
case []interface{}:
array, err = ps.GetStringArray(k)
if err == nil {
err = callback.VisitStringArray(k, array)
}
case map[string]interface{}:
object, err = ps.GetObject(k)
if err == nil {
err = callback.VisitObject(k, object)
}
}
if err != nil {
break
}
}
return
}
type Payload struct {
a []ninchat.Frame
}
func NewPayload() *Payload { return new(Payload) }
func (p *Payload) Append(blob []byte) {
p.a = append(p.a, append([]byte{}, blob...))
}
func (p *Payload) Get(i int) []byte { return p.a[i] }
func (p *Payload) Length() int { return len(p.a) }
func (p *Payload) String() string { return fmt.Sprint(p.a) }
type SessionEventHandler interface {
OnSessionEvent(params *Props)
}
type EventHandler interface {
OnEvent(params *Props, payload *Payload, lastReply bool)
}
type CloseHandler interface {
OnClose()
}
type ConnStateHandler interface {
OnConnState(state string)
}
type ConnActiveHandler interface {
OnConnActive()
}
type LogHandler interface {
OnLog(msg string)
}
type Session struct {
s ninchat.Session
}
func NewSession() *Session {
return new(Session)
}
func (s *Session) SetOnSessionEvent(callback SessionEventHandler) {
s.s.OnSessionEvent = func(e *ninchat.Event) {
callback.OnSessionEvent(&Props{e.Params})
}
}
func (s *Session) SetOnEvent(callback EventHandler) {
s.s.OnEvent = func(e *ninchat.Event) {
callback.OnEvent(&Props{e.Params}, &Payload{e.Payload}, e.LastReply)
}
}
func (s *Session) SetOnClose(callback CloseHandler) {
s.s.OnClose = callback.OnClose
}
func (s *Session) SetOnConnState(callback ConnStateHandler) {
s.s.OnConnState = callback.OnConnState
}
func (s *Session) SetOnConnActive(callback ConnActiveHandler) {
s.s.OnConnActive = callback.OnConnActive
}
func (s *Session) SetOnLog(callback LogHandler) {
s.s.OnLog = func(fragments ...interface{}) {
var msg bytes.Buffer
for i, x := range fragments {
fmt.Fprint(&msg, x)
if i < len(fragments)-1 {
msg.WriteString(" ")
}
}
callback.OnLog(msg.String())
}
}
func (s *Session) SetAddress(address string) {
s.s.Address = address
}
func (s *Session) SetParams(params *Props) (err error) {
defer func() {
err = asError(recover())
}()
s.s.SetParams(params.m)
return
}
func (s *Session) Open() (err error) {
defer func() {
err = asError(recover())
}()
s.s.Open()
return
}
func (s *Session) Close() {
s.s.Close()
}
func (s *Session) Send(params *Props, payload *Payload) (actionId int64, err error) {
defer func() {
if x := recover(); x != nil {
err = asError(x)
}
}()
action := &ninchat.Action{
Params: params.m,
}
if payload != nil {
action.Payload = payload.a
}
s.s.OnLog(fmt.Sprintf("send: payload: %#v", action.Payload))
err = s.s.Send(action)
if err == nil {
if x, found := action.Params["action_id"]; found && x != nil {
actionId = x.(int64)
}
}
return
}
type Event ninchat.Event
func (e *Event) GetParams() *Props { return &Props{e.Params} }
func (e *Event) GetPayload() *Payload { return &Payload{e.Payload} }
func (e *Event) String() string { return fmt.Sprint(*e) }
type Events struct {
a []*ninchat.Event
}
func (es *Events) Get(i int) *Event { return (*Event)(es.a[i]) }
func (es *Events) Length() int { return len(es.a) }
func (es *Events) String() string { return fmt.Sprint(es.a) }
type Caller struct {
c ninchat.Caller
}
func NewCaller() *Caller {
return new(Caller)
}
func (c *Caller) SetAddress(address string) {
c.c.Address = address
}
func (c *Caller) Call(params *Props, payload *Payload) (events *Events, err error) {
defer func() {
if x := recover(); x != nil {
err = asError(x)
}
}()
action := &ninchat.Action{
Params: params.m,
}
if payload != nil {
action.Payload = payload.a
}
es, err := c.c.Call(action)
if err == nil {
events = &Events{es}
}
return
}
|
package system
import (
"fmt"
"github.com/influxdb/telegraf/plugins"
)
type MemStats struct {
ps PS
}
func (_ *MemStats) Description() string {
return "Read metrics about memory usage"
}
func (_ *MemStats) SampleConfig() string { return "" }
func (s *MemStats) Gather(acc plugins.Accumulator) error {
vm, err := s.ps.VMStat()
if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err)
}
vmtags := map[string]string(nil)
acc.Add("total", vm.Total, vmtags)
acc.Add("available", vm.Available, vmtags)
acc.Add("used", vm.Used, vmtags)
acc.Add("used_perc", vm.UsedPercent, vmtags)
acc.Add("free", vm.Free, vmtags)
acc.Add("active", vm.Active, vmtags)
acc.Add("inactive", vm.Inactive, vmtags)
acc.Add("buffers", vm.Buffers, vmtags)
acc.Add("cached", vm.Cached, vmtags)
acc.Add("wired", vm.Wired, vmtags)
acc.Add("shared", vm.Shared, vmtags)
return nil
}
type SwapStats struct {
ps PS
}
func (_ *SwapStats) Description() string {
return "Read metrics about swap memory usage"
}
func (_ *SwapStats) SampleConfig() string { return "" }
func (s *SwapStats) Gather(acc plugins.Accumulator) error {
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
swaptags := map[string]string(nil)
acc.Add("total", swap.Total, swaptags)
acc.Add("used", swap.Used, swaptags)
acc.Add("free", swap.Free, swaptags)
acc.Add("used_perc", swap.UsedPercent, swaptags)
acc.Add("in", swap.Sin, swaptags)
acc.Add("out", swap.Sout, swaptags)
return nil
}
func init() {
plugins.Add("mem", func() plugins.Plugin {
return &MemStats{ps: &systemPS{}}
})
plugins.Add("swap", func() plugins.Plugin {
return &SwapStats{ps: &systemPS{}}
})
}
Refactor memory stats, remove some, add 'actual_' stats
package system
import (
"fmt"
"github.com/influxdb/telegraf/plugins"
)
type MemStats struct {
ps PS
}
func (_ *MemStats) Description() string {
return "Read metrics about memory usage"
}
func (_ *MemStats) SampleConfig() string { return "" }
func (s *MemStats) Gather(acc plugins.Accumulator) error {
vm, err := s.ps.VMStat()
if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err)
}
vmtags := map[string]string(nil)
acc.Add("total", vm.Total, vmtags)
acc.Add("actual_free", vm.Available, vmtags)
acc.Add("actual_used", vm.Total-vm.Available, vmtags)
acc.Add("used", vm.Used, vmtags)
acc.Add("free", vm.Free, vmtags)
acc.Add("used_percent", 100*vm.Used/vm.Total, vmtags)
acc.Add("actual_used_percent", 100*(vm.Total-vm.Available)/vm.Total, vmtags)
return nil
}
type SwapStats struct {
ps PS
}
func (_ *SwapStats) Description() string {
return "Read metrics about swap memory usage"
}
func (_ *SwapStats) SampleConfig() string { return "" }
func (s *SwapStats) Gather(acc plugins.Accumulator) error {
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
swaptags := map[string]string(nil)
acc.Add("total", swap.Total, swaptags)
acc.Add("used", swap.Used, swaptags)
acc.Add("free", swap.Free, swaptags)
acc.Add("used_percent", swap.UsedPercent, swaptags)
acc.Add("in", swap.Sin, swaptags)
acc.Add("out", swap.Sout, swaptags)
return nil
}
func init() {
plugins.Add("mem", func() plugins.Plugin {
return &MemStats{ps: &systemPS{}}
})
plugins.Add("swap", func() plugins.Plugin {
return &SwapStats{ps: &systemPS{}}
})
}
|
package object
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
"gopkg.in/src-d/go-git.v4/utils/diff"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
var (
ErrCanceled = errors.New("operation canceled")
)
func getPatch(message string, changes ...*Change) (*Patch, error) {
ctx := context.Background()
return getPatchContext(ctx, message, changes...)
}
func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
var filePatches []fdiff.FilePatch
for _, c := range changes {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
fp, err := filePatchWithContext(ctx, c)
if err != nil {
return nil, err
}
filePatches = append(filePatches, fp)
}
return &Patch{message, filePatches}, nil
}
func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
from, to, err := c.Files()
if err != nil {
return nil, err
}
fromContent, fIsBinary, err := fileContent(from)
if err != nil {
return nil, err
}
toContent, tIsBinary, err := fileContent(to)
if err != nil {
return nil, err
}
if fIsBinary || tIsBinary {
return &textFilePatch{from: c.From, to: c.To}, nil
}
diffs := diff.Do(fromContent, toContent)
var chunks []fdiff.Chunk
for _, d := range diffs {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
var op fdiff.Operation
switch d.Type {
case dmp.DiffEqual:
op = fdiff.Equal
case dmp.DiffDelete:
op = fdiff.Delete
case dmp.DiffInsert:
op = fdiff.Add
}
chunks = append(chunks, &textChunk{d.Text, op})
}
return &textFilePatch{
chunks: chunks,
from: c.From,
to: c.To,
}, nil
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
}
isBinary, err = f.IsBinary()
if err != nil || isBinary {
return
}
content, err = f.Contents()
return
}
// textPatch is an implementation of fdiff.Patch interface
type Patch struct {
message string
filePatches []fdiff.FilePatch
}
func (t *Patch) FilePatches() []fdiff.FilePatch {
return t.filePatches
}
func (t *Patch) Message() string {
return t.message
}
func (p *Patch) Encode(w io.Writer) error {
ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
return ue.Encode(p)
}
func (p *Patch) Stats() FileStats {
return getFileStatsFromFilePatches(p.FilePatches())
}
func (p *Patch) String() string {
buf := bytes.NewBuffer(nil)
err := p.Encode(buf)
if err != nil {
return fmt.Sprintf("malformed patch: %s", err.Error())
}
return buf.String()
}
// changeEntryWrapper is an implementation of fdiff.File interface
type changeEntryWrapper struct {
ce ChangeEntry
}
func (f *changeEntryWrapper) Hash() plumbing.Hash {
if !f.ce.TreeEntry.Mode.IsFile() {
return plumbing.ZeroHash
}
return f.ce.TreeEntry.Hash
}
func (f *changeEntryWrapper) Mode() filemode.FileMode {
return f.ce.TreeEntry.Mode
}
func (f *changeEntryWrapper) Path() string {
if !f.ce.TreeEntry.Mode.IsFile() {
return ""
}
return f.ce.Name
}
func (f *changeEntryWrapper) Empty() bool {
return !f.ce.TreeEntry.Mode.IsFile()
}
// textFilePatch is an implementation of fdiff.FilePatch interface
type textFilePatch struct {
chunks []fdiff.Chunk
from, to ChangeEntry
}
func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
f := &changeEntryWrapper{tf.from}
t := &changeEntryWrapper{tf.to}
if !f.Empty() {
from = f
}
if !t.Empty() {
to = t
}
return
}
func (t *textFilePatch) IsBinary() bool {
return len(t.chunks) == 0
}
func (t *textFilePatch) Chunks() []fdiff.Chunk {
return t.chunks
}
// textChunk is an implementation of fdiff.Chunk interface
type textChunk struct {
content string
op fdiff.Operation
}
func (t *textChunk) Content() string {
return t.content
}
func (t *textChunk) Type() fdiff.Operation {
return t.op
}
// FileStat stores the status of changes in content of a file.
type FileStat struct {
Name string
Addition int
Deletion int
}
func (fs FileStat) String() string {
return printStat([]FileStat{fs})
}
// FileStats is a collection of FileStat.
type FileStats []FileStat
func (fileStats FileStats) String() string {
return printStat(fileStats)
}
func printStat(fileStats []FileStat) string {
padLength := float64(len(" "))
newlineLength := float64(len("\n"))
separatorLength := float64(len("|"))
// Soft line length limit. The text length calculation below excludes
// length of the change number. Adding that would take it closer to 80,
// but probably not more than 80, until it's a huge number.
lineLength := 72.0
// Get the longest filename and longest total change.
var longestLength float64
var longestTotalChange float64
for _, fs := range fileStats {
if int(longestLength) < len(fs.Name) {
longestLength = float64(len(fs.Name))
}
totalChange := fs.Addition + fs.Deletion
if int(longestTotalChange) < totalChange {
longestTotalChange = float64(totalChange)
}
}
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
// <pad><filename><pad>
leftTextLength := padLength + longestLength + padLength
// <pad><number><pad><+++++/-----><newline>
// Excluding number length here.
rightTextLength := padLength + padLength + newlineLength
totalTextArea := leftTextLength + separatorLength + rightTextLength
heightOfHistogram := lineLength - totalTextArea
// Scale the histogram.
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = float64(longestTotalChange / heightOfHistogram)
} else {
scaleFactor = 1.0
}
finalOutput := ""
for _, fs := range fileStats {
addn := float64(fs.Addition)
deln := float64(fs.Deletion)
adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
}
return finalOutput
}
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
var fileStats FileStats
for _, fp := range filePatches {
// ignore empty patches (binary files, submodule refs updates)
if len(fp.Chunks()) == 0 {
continue
}
cs := FileStat{}
from, to := fp.Files()
if from == nil {
// New File is created.
cs.Name = to.Path()
} else if to == nil {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}
for _, chunk := range fp.Chunks() {
switch chunk.Type() {
case fdiff.Add:
cs.Addition += strings.Count(chunk.Content(), "\n")
case fdiff.Delete:
cs.Deletion += strings.Count(chunk.Content(), "\n")
}
}
fileStats = append(fileStats, cs)
}
return fileStats
}
plumbing: object, Count stats properly when no new line added at the end. Fixes #1074
Signed-off-by: Oleksii Shnyra <4cafbbb36498ef57bc59b8a350893edee74db4f8@global>
package object
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
"gopkg.in/src-d/go-git.v4/utils/diff"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
var (
ErrCanceled = errors.New("operation canceled")
)
func getPatch(message string, changes ...*Change) (*Patch, error) {
ctx := context.Background()
return getPatchContext(ctx, message, changes...)
}
func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
var filePatches []fdiff.FilePatch
for _, c := range changes {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
fp, err := filePatchWithContext(ctx, c)
if err != nil {
return nil, err
}
filePatches = append(filePatches, fp)
}
return &Patch{message, filePatches}, nil
}
func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
from, to, err := c.Files()
if err != nil {
return nil, err
}
fromContent, fIsBinary, err := fileContent(from)
if err != nil {
return nil, err
}
toContent, tIsBinary, err := fileContent(to)
if err != nil {
return nil, err
}
if fIsBinary || tIsBinary {
return &textFilePatch{from: c.From, to: c.To}, nil
}
diffs := diff.Do(fromContent, toContent)
var chunks []fdiff.Chunk
for _, d := range diffs {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
var op fdiff.Operation
switch d.Type {
case dmp.DiffEqual:
op = fdiff.Equal
case dmp.DiffDelete:
op = fdiff.Delete
case dmp.DiffInsert:
op = fdiff.Add
}
chunks = append(chunks, &textChunk{d.Text, op})
}
return &textFilePatch{
chunks: chunks,
from: c.From,
to: c.To,
}, nil
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
}
isBinary, err = f.IsBinary()
if err != nil || isBinary {
return
}
content, err = f.Contents()
return
}
// textPatch is an implementation of fdiff.Patch interface
type Patch struct {
message string
filePatches []fdiff.FilePatch
}
func (t *Patch) FilePatches() []fdiff.FilePatch {
return t.filePatches
}
func (t *Patch) Message() string {
return t.message
}
func (p *Patch) Encode(w io.Writer) error {
ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
return ue.Encode(p)
}
func (p *Patch) Stats() FileStats {
return getFileStatsFromFilePatches(p.FilePatches())
}
func (p *Patch) String() string {
buf := bytes.NewBuffer(nil)
err := p.Encode(buf)
if err != nil {
return fmt.Sprintf("malformed patch: %s", err.Error())
}
return buf.String()
}
// changeEntryWrapper is an implementation of fdiff.File interface
type changeEntryWrapper struct {
ce ChangeEntry
}
func (f *changeEntryWrapper) Hash() plumbing.Hash {
if !f.ce.TreeEntry.Mode.IsFile() {
return plumbing.ZeroHash
}
return f.ce.TreeEntry.Hash
}
func (f *changeEntryWrapper) Mode() filemode.FileMode {
return f.ce.TreeEntry.Mode
}
func (f *changeEntryWrapper) Path() string {
if !f.ce.TreeEntry.Mode.IsFile() {
return ""
}
return f.ce.Name
}
func (f *changeEntryWrapper) Empty() bool {
return !f.ce.TreeEntry.Mode.IsFile()
}
// textFilePatch is an implementation of fdiff.FilePatch interface
type textFilePatch struct {
chunks []fdiff.Chunk
from, to ChangeEntry
}
func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
f := &changeEntryWrapper{tf.from}
t := &changeEntryWrapper{tf.to}
if !f.Empty() {
from = f
}
if !t.Empty() {
to = t
}
return
}
func (t *textFilePatch) IsBinary() bool {
return len(t.chunks) == 0
}
func (t *textFilePatch) Chunks() []fdiff.Chunk {
return t.chunks
}
// textChunk is an implementation of fdiff.Chunk interface
type textChunk struct {
content string
op fdiff.Operation
}
func (t *textChunk) Content() string {
return t.content
}
func (t *textChunk) Type() fdiff.Operation {
return t.op
}
// FileStat stores the status of changes in content of a file.
type FileStat struct {
Name string
Addition int
Deletion int
}
func (fs FileStat) String() string {
return printStat([]FileStat{fs})
}
// FileStats is a collection of FileStat.
type FileStats []FileStat
func (fileStats FileStats) String() string {
return printStat(fileStats)
}
func printStat(fileStats []FileStat) string {
padLength := float64(len(" "))
newlineLength := float64(len("\n"))
separatorLength := float64(len("|"))
// Soft line length limit. The text length calculation below excludes
// length of the change number. Adding that would take it closer to 80,
// but probably not more than 80, until it's a huge number.
lineLength := 72.0
// Get the longest filename and longest total change.
var longestLength float64
var longestTotalChange float64
for _, fs := range fileStats {
if int(longestLength) < len(fs.Name) {
longestLength = float64(len(fs.Name))
}
totalChange := fs.Addition + fs.Deletion
if int(longestTotalChange) < totalChange {
longestTotalChange = float64(totalChange)
}
}
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
// <pad><filename><pad>
leftTextLength := padLength + longestLength + padLength
// <pad><number><pad><+++++/-----><newline>
// Excluding number length here.
rightTextLength := padLength + padLength + newlineLength
totalTextArea := leftTextLength + separatorLength + rightTextLength
heightOfHistogram := lineLength - totalTextArea
// Scale the histogram.
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = float64(longestTotalChange / heightOfHistogram)
} else {
scaleFactor = 1.0
}
finalOutput := ""
for _, fs := range fileStats {
addn := float64(fs.Addition)
deln := float64(fs.Deletion)
adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
}
return finalOutput
}
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
var fileStats FileStats
for _, fp := range filePatches {
// ignore empty patches (binary files, submodule refs updates)
if len(fp.Chunks()) == 0 {
continue
}
cs := FileStat{}
from, to := fp.Files()
if from == nil {
// New File is created.
cs.Name = to.Path()
} else if to == nil {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}
for _, chunk := range fp.Chunks() {
s := chunk.Content()
switch chunk.Type() {
case fdiff.Add:
cs.Addition += strings.Count(s, "\n")
if s[len(s)-1] != '\n' {
cs.Addition++
}
case fdiff.Delete:
cs.Deletion += strings.Count(s, "\n")
if s[len(s)-1] != '\n' {
cs.Deletion++
}
}
}
fileStats = append(fileStats, cs)
}
return fileStats
}
|
package model
import (
"github.com/stretchr/testify/assert"
"testing"
)
func fixtureEmptySlave() *Slave {
return &Slave{
Hostname: "host1",
Port: 1,
MongodPortRangeBegin: 2,
MongodPortRangeEnd: 3,
PersistentStorage: true,
Mongods: []*Mongod{},
ConfiguredState: SlaveStateActive,
}
}
func fixtureEmptyMongod() *Mongod {
return &Mongod{
Port: 8080,
ReplSetName: "repl1",
}
}
func fixtureEmptyMongodState() MongodState {
return MongodState{}
}
func fixtureEmptyRiskGroup() *RiskGroup {
return &RiskGroup{
Name: "rg1",
Slaves: []*Slave{},
}
}
func fixtureEmptyReplicaSet() *ReplicaSet {
return &ReplicaSet{
Name: "repl1",
PersistentMemberCount: 1,
VolatileMemberCount: 2,
ConfigureAsShardingConfigServer: false,
}
}
func fixtureEmptyProblem() *Problem {
return &Problem{
Description: "Test",
}
}
////////////////////////////////////////////////////////////////////////////////
func TestCanInitializeDB(t *testing.T) {
db, _, err := InitializeTestDB()
defer db.CloseAndDrop()
assert.NoError(t, err)
}
/*
This elaborate test demonstrates how resolving an association works in gorm.
Check the assertions to learn about the behavior of gorm.
*/
func TestRelationshipMongodParentSlave(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
s := fixtureEmptySlave()
tx.Create(s)
m := fixtureEmptyMongod()
m.ParentSlave = s
tx.Create(m)
assert.Equal(t, m.ParentSlaveID, s.ID)
assert.Equal(t, s.Mongods, []*Mongod{})
var sdb Slave
// Check what happens when just SELECTing the slave
err := tx.First(&sdb).Error
assert.NoError(t, err)
assert.Nil(t, sdb.Mongods)
// Now resolve the slave->mongod 1:n association
err = tx.Model(&sdb).Related(&sdb.Mongods, "Mongods").Error
assert.NoError(t, err)
assert.Equal(t, len(sdb.Mongods), 1)
assert.Equal(t, sdb.Mongods[0].ReplSetName, m.ReplSetName)
assert.Zero(t, sdb.Mongods[0].ParentSlave)
assert.Equal(t, sdb.Mongods[0].ParentSlaveID, s.ID)
// Now resolve the mongod->(parent)slave relation
parentSlave := &Slave{}
err = tx.Model(&sdb.Mongods[0]).Related(parentSlave, "ParentSlave").Error
assert.NoError(t, err)
assert.NotZero(t, parentSlave)
assert.Equal(t, s.ID, parentSlave.ID)
}
// Test RiskGroup Slave relationship
func TestRiskGroupSlaveRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
s := fixtureEmptySlave()
r := fixtureEmptyRiskGroup()
r.Slaves = []*Slave{s}
err := tx.Create(&r).Error
assert.NoError(t, err)
var rdb RiskGroup
err = tx.First(&rdb).Error
assert.NoError(t, err)
assert.Zero(t, rdb.Slaves)
err = tx.Model(&rdb).Related(&rdb.Slaves, "Slaves").Error
assert.NoError(t, err)
assert.NotZero(t, rdb.Slaves)
assert.Equal(t, len(rdb.Slaves), 1)
assert.Equal(t, rdb.Slaves[0].ID, s.ID)
}
// Test ReplicaSet - Mongod Relationship
func TestReplicaSetMongodRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
r := fixtureEmptyReplicaSet()
m := fixtureEmptyMongod()
r.Mongods = []*Mongod{m}
err := tx.Create(&r).Error
assert.NoError(t, err)
var rdb ReplicaSet
err = tx.First(&rdb).Error
assert.NoError(t, err)
assert.Zero(t, rdb.Mongods)
err = tx.Model(&rdb).Related(&rdb.Mongods, "Mongods").Error
assert.NoError(t, err)
assert.NotZero(t, rdb.Mongods)
assert.Equal(t, len(rdb.Mongods), 1)
assert.Equal(t, rdb.Mongods[0].ID, m.ID)
}
// Test Mongod - MongodState relationship
func TestMongodMongodStateRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
m := fixtureEmptyMongod()
o := MongodState{
IsShardingConfigServer: false,
ExecutionState: MongodExecutionStateNotRunning,
ReplicaSetMembers: []ReplicaSetMember{},
}
d := MongodState{
IsShardingConfigServer: false,
ExecutionState: MongodExecutionStateRunning,
ReplicaSetMembers: []ReplicaSetMember{},
}
assert.NoError(t, tx.Create(m).Error)
o.ParentMongodID = m.ID
d.ParentMongodID = m.ID
assert.NoError(t, tx.Create(&o).Error)
assert.NoError(t, tx.Create(&d).Error)
assert.NoError(t, tx.Model(&m).Update("DesiredStateID", d.ID).Error)
assert.NoError(t, tx.Model(&m).Update("ObservedStateID", o.ID).Error)
var mdb Mongod
// Observed
assert.NoError(t, tx.First(&mdb).Error)
assert.Zero(t, mdb.ObservedState)
assert.NoError(t, tx.Model(&mdb).Related(&mdb.ObservedState, "ObservedState").Error)
assert.NotZero(t, mdb.ObservedState)
assert.Equal(t, mdb.ObservedState.ExecutionState, MongodExecutionStateNotRunning)
assert.NoError(t, tx.Model(&mdb).Related(&mdb.DesiredState, "DesiredState").Error)
assert.NotZero(t, mdb.DesiredState)
assert.Equal(t, mdb.DesiredState.ExecutionState, MongodExecutionStateRunning)
}
// Test MongodState - ReplicaSetMember relationship
func TestMongodStateReplicaSetMembersRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
m := ReplicaSetMember{Hostname: "h1"}
s := MongodState{ReplicaSetMembers: []ReplicaSetMember{m}}
assert.NoError(t, tx.Create(&m).Error)
s.ParentMongodID = m.ID
assert.NoError(t, tx.Create(&s).Error)
assert.NoError(t, tx.Model(&m).Update("DesiredStateID", s.ID).Error)
var sdb MongodState
assert.NoError(t, tx.First(&sdb).Error)
assert.Zero(t, sdb.ReplicaSetMembers)
assert.NoError(t, tx.Model(&sdb).Related(&sdb.ReplicaSetMembers, "ReplicaSetMembers").Error)
assert.NotZero(t, sdb.ReplicaSetMembers)
assert.Equal(t, len(sdb.ReplicaSetMembers), 1)
assert.Equal(t, sdb.ReplicaSetMembers[0].Hostname, m.Hostname)
}
func TestDeleteBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
m := fixtureEmptyMongod()
m.ID = 1000
// Create it
tx.Create(&m)
var mdb Mongod
// Read it once
d := tx.First(&mdb)
assert.NoError(t, d.Error)
assert.Equal(t, mdb.ID, m.ID)
// Destroy it once, by ID
d = tx.Delete(&Mongod{ID: 1000})
assert.NoError(t, d.Error)
assert.EqualValues(t, 1, d.RowsAffected)
// Destroy it a second time.
// No Error will occur, have to check RowsAffected if we deleted something
d = tx.Delete(&Mongod{ID: 1000})
assert.NoError(t, d.Error)
assert.EqualValues(t, 0, d.RowsAffected)
}
func TestGormFirstBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
var m Mongod
assert.Error(t, tx.First(&m).Error)
}
func TestGormFindBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
var ms []Mongod
d := tx.Find(&ms)
assert.NoError(t, d.Error)
assert.EqualValues(t, 0, d.RowsAffected) // RowsAffected does NOT indicate "nothing found"!!!!
assert.Equal(t, 0, len(ms)) // Use this instead
}
func TestCascade(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx0 := db.Begin()
assert.NoError(t, tx0.Exec("CREATE TABLE foo(id int primary key);").Error)
assert.NoError(t, tx0.Exec("CREATE TABLE bar3(foreignKey int null references foo(id) on delete cascade deferrable initially deferred);").Error)
assert.NoError(t, tx0.Commit().Error)
tx1 := db.Begin()
assert.NoError(t, tx1.Exec("INSERT INTO foo VALUES(1); INSERT INTO bar3 VALUES(1);").Error)
assert.NoError(t, tx1.Commit().Error)
tx3 := db.Begin()
var count int
tx3.Raw("SELECT count(*) FROM bar3;").Row().Scan(&count)
assert.EqualValues(t, 1, count)
tx3.Commit()
tx2 := db.Begin()
tx2.Exec("DELETE FROM foo WHERE id = 1;")
assert.NoError(t, tx2.Commit().Error)
tx3 = db.Begin()
tx3.Raw("SELECT count(*) FROM bar3;").Row().Scan(&count)
assert.EqualValues(t, 0, count)
tx3.Commit()
}
func TestCascadeSlaves(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
r := fixtureEmptyRiskGroup()
assert.NoError(t, tx.Create(r).Error)
rs := fixtureEmptyReplicaSet()
assert.NoError(t, tx.Create(rs).Error)
s := fixtureEmptySlave()
s.RiskGroupID = NullIntValue(r.ID)
assert.NoError(t, tx.Create(s).Error)
ds := fixtureEmptyMongodState()
assert.NoError(t, tx.Create(&ds).Error)
m := fixtureEmptyMongod()
m.ParentSlaveID = s.ID
m.ReplicaSetID = NullIntValue(rs.ID)
m.DesiredStateID = ds.ID
assert.NoError(t, tx.Create(m).Error)
assert.NoError(t, tx.Model(&ds).Update("ParentMongodID", m.ID).Error)
p := fixtureEmptyProblem()
p.SlaveID = NullIntValue(s.ID)
assert.NoError(t, tx.Create(p).Error)
assert.NoError(t, tx.Commit().Error)
tx1 := db.Begin()
tx1.Delete(&Mongod{}, m.ID)
assert.NoError(t, tx1.Commit().Error)
tx2 := db.Begin()
tx2.Delete(&Slave{}, s.ID)
assert.NoError(t, tx2.Commit().Error)
tx3 := db.Begin()
assert.True(t, tx3.First(&Problem{}, p.ID).RecordNotFound())
tx3.Rollback()
}
// Test case demonstrating how to do overwrites
func TestObservationErrorOverwriteBehavior(t *testing.T) {
// Assume a situation where Slave already has an ObservationError
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
o1 := MSPError{
Identifier: "id1",
}
assert.NoError(t, tx.Create(&o1).Error)
s := Slave{
Hostname: "s1",
Port: 1,
MongodPortRangeBegin: 1,
MongodPortRangeEnd: 2,
ObservationErrorID: NullIntValue(o1.ID),
}
assert.NoError(t, tx.Create(&s).Error)
var countBeforeUpdate int64
assert.NoError(t, tx.Model(&MSPError{}).Count(&countBeforeUpdate).Error)
assert.EqualValues(t, 1, countBeforeUpdate)
// Now we attempt to observe Slave again and want to update the value pointed to by slave
// We could create, update, delete
// Or we could use the following hack to just UPDATE all values of the existing observation,
// saving us an annoying DELETE
o2 := MSPError{
Identifier: "id2",
}
o2.ID = o1.ID
tx.Save(&o2)
// That was it
var countAfterUpdate int64
assert.NoError(t, tx.Model(&MSPError{}).Count(&countAfterUpdate).Error)
assert.EqualValues(t, 1, countAfterUpdate, "updates should remove the row previously referenced in the overwritten column")
tx.Commit()
}
FIX: model: adjust test to model changes
package model
import (
"github.com/stretchr/testify/assert"
"testing"
)
func fixtureEmptySlave() *Slave {
return &Slave{
Hostname: "host1",
Port: 1,
MongodPortRangeBegin: 2,
MongodPortRangeEnd: 3,
PersistentStorage: true,
Mongods: []*Mongod{},
ConfiguredState: SlaveStateActive,
}
}
func fixtureEmptyMongod() *Mongod {
return &Mongod{
Port: 8080,
ReplSetName: "repl1",
}
}
func fixtureEmptyMongodState() MongodState {
return MongodState{
ShardingRole: ShardingRoleNone,
}
}
func fixtureEmptyRiskGroup() *RiskGroup {
return &RiskGroup{
Name: "rg1",
Slaves: []*Slave{},
}
}
func fixtureEmptyReplicaSet() *ReplicaSet {
return &ReplicaSet{
Name: "repl1",
PersistentMemberCount: 1,
VolatileMemberCount: 2,
ShardingRole: ShardingRoleNone,
}
}
func fixtureEmptyProblem() *Problem {
return &Problem{
Description: "Test",
}
}
////////////////////////////////////////////////////////////////////////////////
func TestCanInitializeDB(t *testing.T) {
db, _, err := InitializeTestDB()
defer db.CloseAndDrop()
assert.NoError(t, err)
}
/*
This elaborate test demonstrates how resolving an association works in gorm.
Check the assertions to learn about the behavior of gorm.
*/
func TestRelationshipMongodParentSlave(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
s := fixtureEmptySlave()
tx.Create(s)
m := fixtureEmptyMongod()
m.ParentSlave = s
tx.Create(m)
assert.Equal(t, m.ParentSlaveID, s.ID)
assert.Equal(t, s.Mongods, []*Mongod{})
var sdb Slave
// Check what happens when just SELECTing the slave
err := tx.First(&sdb).Error
assert.NoError(t, err)
assert.Nil(t, sdb.Mongods)
// Now resolve the slave->mongod 1:n association
err = tx.Model(&sdb).Related(&sdb.Mongods, "Mongods").Error
assert.NoError(t, err)
assert.Equal(t, len(sdb.Mongods), 1)
assert.Equal(t, sdb.Mongods[0].ReplSetName, m.ReplSetName)
assert.Zero(t, sdb.Mongods[0].ParentSlave)
assert.Equal(t, sdb.Mongods[0].ParentSlaveID, s.ID)
// Now resolve the mongod->(parent)slave relation
parentSlave := &Slave{}
err = tx.Model(&sdb.Mongods[0]).Related(parentSlave, "ParentSlave").Error
assert.NoError(t, err)
assert.NotZero(t, parentSlave)
assert.Equal(t, s.ID, parentSlave.ID)
}
// Test RiskGroup Slave relationship
func TestRiskGroupSlaveRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
s := fixtureEmptySlave()
r := fixtureEmptyRiskGroup()
r.Slaves = []*Slave{s}
err := tx.Create(&r).Error
assert.NoError(t, err)
var rdb RiskGroup
err = tx.First(&rdb).Error
assert.NoError(t, err)
assert.Zero(t, rdb.Slaves)
err = tx.Model(&rdb).Related(&rdb.Slaves, "Slaves").Error
assert.NoError(t, err)
assert.NotZero(t, rdb.Slaves)
assert.Equal(t, len(rdb.Slaves), 1)
assert.Equal(t, rdb.Slaves[0].ID, s.ID)
}
// Test ReplicaSet - Mongod Relationship
func TestReplicaSetMongodRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
r := fixtureEmptyReplicaSet()
m := fixtureEmptyMongod()
r.Mongods = []*Mongod{m}
err := tx.Create(&r).Error
assert.NoError(t, err)
var rdb ReplicaSet
err = tx.First(&rdb).Error
assert.NoError(t, err)
assert.Zero(t, rdb.Mongods)
err = tx.Model(&rdb).Related(&rdb.Mongods, "Mongods").Error
assert.NoError(t, err)
assert.NotZero(t, rdb.Mongods)
assert.Equal(t, len(rdb.Mongods), 1)
assert.Equal(t, rdb.Mongods[0].ID, m.ID)
}
// Test Mongod - MongodState relationship
func TestMongodMongodStateRelationship(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
m := fixtureEmptyMongod()
o := MongodState{
ShardingRole: ShardingRoleNone,
ExecutionState: MongodExecutionStateNotRunning,
}
d := MongodState{
ShardingRole: ShardingRoleNone,
ExecutionState: MongodExecutionStateRunning,
}
assert.NoError(t, tx.Create(m).Error)
o.ParentMongodID = m.ID
d.ParentMongodID = m.ID
assert.NoError(t, tx.Create(&o).Error)
assert.NoError(t, tx.Create(&d).Error)
assert.NoError(t, tx.Model(&m).Update("DesiredStateID", d.ID).Error)
assert.NoError(t, tx.Model(&m).Update("ObservedStateID", o.ID).Error)
var mdb Mongod
// Observed
assert.NoError(t, tx.First(&mdb).Error)
assert.Zero(t, mdb.ObservedState)
assert.NoError(t, tx.Model(&mdb).Related(&mdb.ObservedState, "ObservedState").Error)
assert.NotZero(t, mdb.ObservedState)
assert.Equal(t, mdb.ObservedState.ExecutionState, MongodExecutionStateNotRunning)
assert.NoError(t, tx.Model(&mdb).Related(&mdb.DesiredState, "DesiredState").Error)
assert.NotZero(t, mdb.DesiredState)
assert.Equal(t, mdb.DesiredState.ExecutionState, MongodExecutionStateRunning)
}
func TestDeleteBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
m := fixtureEmptyMongod()
m.ID = 1000
// Create it
tx.Create(&m)
var mdb Mongod
// Read it once
d := tx.First(&mdb)
assert.NoError(t, d.Error)
assert.Equal(t, mdb.ID, m.ID)
// Destroy it once, by ID
d = tx.Delete(&Mongod{ID: 1000})
assert.NoError(t, d.Error)
assert.EqualValues(t, 1, d.RowsAffected)
// Destroy it a second time.
// No Error will occur, have to check RowsAffected if we deleted something
d = tx.Delete(&Mongod{ID: 1000})
assert.NoError(t, d.Error)
assert.EqualValues(t, 0, d.RowsAffected)
}
func TestGormFirstBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
var m Mongod
assert.Error(t, tx.First(&m).Error)
}
func TestGormFindBehavior(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
defer tx.Rollback()
var ms []Mongod
d := tx.Find(&ms)
assert.NoError(t, d.Error)
assert.EqualValues(t, 0, d.RowsAffected) // RowsAffected does NOT indicate "nothing found"!!!!
assert.Equal(t, 0, len(ms)) // Use this instead
}
func TestCascade(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx0 := db.Begin()
assert.NoError(t, tx0.Exec("CREATE TABLE foo(id int primary key);").Error)
assert.NoError(t, tx0.Exec("CREATE TABLE bar3(foreignKey int null references foo(id) on delete cascade deferrable initially deferred);").Error)
assert.NoError(t, tx0.Commit().Error)
tx1 := db.Begin()
assert.NoError(t, tx1.Exec("INSERT INTO foo VALUES(1); INSERT INTO bar3 VALUES(1);").Error)
assert.NoError(t, tx1.Commit().Error)
tx3 := db.Begin()
var count int
tx3.Raw("SELECT count(*) FROM bar3;").Row().Scan(&count)
assert.EqualValues(t, 1, count)
tx3.Commit()
tx2 := db.Begin()
tx2.Exec("DELETE FROM foo WHERE id = 1;")
assert.NoError(t, tx2.Commit().Error)
tx3 = db.Begin()
tx3.Raw("SELECT count(*) FROM bar3;").Row().Scan(&count)
assert.EqualValues(t, 0, count)
tx3.Commit()
}
func TestCascadeSlaves(t *testing.T) {
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
r := fixtureEmptyRiskGroup()
assert.NoError(t, tx.Create(r).Error)
rs := fixtureEmptyReplicaSet()
assert.NoError(t, tx.Create(rs).Error)
s := fixtureEmptySlave()
s.RiskGroupID = NullIntValue(r.ID)
assert.NoError(t, tx.Create(s).Error)
ds := fixtureEmptyMongodState()
assert.NoError(t, tx.Create(&ds).Error)
m := fixtureEmptyMongod()
m.ParentSlaveID = s.ID
m.ReplicaSetID = NullIntValue(rs.ID)
m.DesiredStateID = ds.ID
assert.NoError(t, tx.Create(m).Error)
assert.NoError(t, tx.Model(&ds).Update("ParentMongodID", m.ID).Error)
p := fixtureEmptyProblem()
p.SlaveID = NullIntValue(s.ID)
assert.NoError(t, tx.Create(p).Error)
assert.NoError(t, tx.Commit().Error)
tx1 := db.Begin()
tx1.Delete(&Mongod{}, m.ID)
assert.NoError(t, tx1.Commit().Error)
tx2 := db.Begin()
tx2.Delete(&Slave{}, s.ID)
assert.NoError(t, tx2.Commit().Error)
tx3 := db.Begin()
assert.True(t, tx3.First(&Problem{}, p.ID).RecordNotFound())
tx3.Rollback()
}
// Test case demonstrating how to do overwrites
func TestObservationErrorOverwriteBehavior(t *testing.T) {
// Assume a situation where Slave already has an ObservationError
db, _, _ := InitializeTestDB()
defer db.CloseAndDrop()
tx := db.Begin()
o1 := MSPError{
Identifier: "id1",
}
assert.NoError(t, tx.Create(&o1).Error)
s := Slave{
Hostname: "s1",
Port: 1,
MongodPortRangeBegin: 1,
MongodPortRangeEnd: 2,
ObservationErrorID: NullIntValue(o1.ID),
}
assert.NoError(t, tx.Create(&s).Error)
var countBeforeUpdate int64
assert.NoError(t, tx.Model(&MSPError{}).Count(&countBeforeUpdate).Error)
assert.EqualValues(t, 1, countBeforeUpdate)
// Now we attempt to observe Slave again and want to update the value pointed to by slave
// We could create, update, delete
// Or we could use the following hack to just UPDATE all values of the existing observation,
// saving us an annoying DELETE
o2 := MSPError{
Identifier: "id2",
}
o2.ID = o1.ID
tx.Save(&o2)
// That was it
var countAfterUpdate int64
assert.NoError(t, tx.Model(&MSPError{}).Count(&countAfterUpdate).Error)
assert.EqualValues(t, 1, countAfterUpdate, "updates should remove the row previously referenced in the overwritten column")
tx.Commit()
}
|
package model
import (
"log"
"net/rpc"
"time"
)
import (
"github.com/kc1212/virtual-grid/common"
"github.com/kc1212/virtual-grid/discosrv"
)
// GridSdr describes the properties of one grid scheduler
type GridSdr struct {
common.Node
gsNodes *common.SyncedSet // other grid schedulers, not including myself
rmNodes *common.SyncedSet // the resource managers
leader string // the lead grid scheduler
incomingJobAddChan chan Job // when user adds a job, it comes here
incomingJobRmChan chan int
incomingJobs []Job
scheduledJobAddChan chan Job // channel for new scheduled jobs
scheduledJobRmChan chan int64 // channel for removing jobs that are completed
scheduledJobReqChan chan chan Job // channel inside a channel to sync with new GS when it's online
scheduledJobs map[int64]Job // when GS schedules a job, it gets stored here via the two channels
tasks chan common.Task // these tasks require critical section (CS)
inElection *common.SyncedVal
mutexRespChan chan int
mutexReqChan chan common.Task
mutexState *common.SyncedVal
clock *common.SyncedVal
reqClock int64
discosrvAddr string
}
// RPCArgs is the arguments for RPC calls between grid schedulers and/or resource maanagers
type RPCArgs struct {
ID int
Addr string
Type common.MsgType
Clock int64
}
// InitGridSdr creates a grid scheduler.
func InitGridSdr(id int, addr string, dsAddr string) GridSdr {
// NOTE: the following three values are initiated in `Run`
gsNodes := &common.SyncedSet{S: make(map[string]common.IntClient)}
rmNodes := &common.SyncedSet{S: make(map[string]common.IntClient)}
var leader string
return GridSdr{
common.Node{id, addr, common.GSNode},
gsNodes,
rmNodes,
leader,
make(chan Job, 1000000),
make(chan int),
make([]Job, 0),
make(chan Job, 1000000),
make(chan int64),
make(chan chan Job),
make(map[int64]Job),
make(chan common.Task, 100),
&common.SyncedVal{V: false},
make(chan int, 100),
make(chan common.Task, 100),
&common.SyncedVal{V: common.StateReleased},
&common.SyncedVal{V: int64(0)},
0,
dsAddr,
}
}
// Run is the main function for GridSdr, it starts all its services, do not run it more than once.
func (gs *GridSdr) Run() {
reply, e := discosrv.ImAliveProbe(gs.Addr, gs.Type, gs.discosrvAddr)
if e != nil {
log.Panicf("Discosrv on %v not online\n", gs.discosrvAddr)
}
gs.notifyAndPopulateGSs(reply.GSs)
gs.notifyAndPopulateRMs(reply.RMs)
go discosrv.ImAlivePoll(gs.Addr, gs.Type, gs.discosrvAddr)
go common.RunRPC(gs, gs.Addr)
go gs.pollLeader()
go gs.runTasks()
go gs.updateScheduledJobs()
gs.scheduleJobs()
}
func (gs *GridSdr) imLeader() bool {
return gs.leader == gs.Addr
}
func (gs *GridSdr) updateScheduledJobs() {
for {
timeout := time.After(time.Second)
select {
case <-timeout:
// every `timeout` check the RMs and see whether they're up
// then re-schedule the jobs if the responsible RM is down
// only do this for leader
if !gs.imLeader() || len(gs.scheduledJobs) == 0 {
break
}
rms := gs.getAliveRMs()
for _, v := range gs.scheduledJobs {
if _, ok := rms[v.ResMan]; !ok {
gs.incomingJobAddChan <- v
delete(gs.scheduledJobs, v.ID)
}
}
case job := <-gs.scheduledJobAddChan:
gs.scheduledJobs[job.ID] = job
case id := <-gs.scheduledJobRmChan:
delete(gs.scheduledJobs, id)
case c := <-gs.scheduledJobReqChan:
for _, v := range gs.scheduledJobs {
c <- v
}
close(c)
}
}
}
func (gs *GridSdr) getAliveRMs() map[string]common.IntClient {
res := make(map[string]common.IntClient)
for k, v := range gs.rmNodes.GetAll() {
remote, e := rpc.DialHTTP("tcp", k)
if e == nil {
res[k] = v
remote.Close()
}
}
return res
}
func (gs *GridSdr) scheduleJobs() {
for {
// schedule jobs if there are any, for every 100ms
timeout := time.After(100 * time.Millisecond)
select {
case <-timeout:
// try again later if I'm not leader
if !gs.imLeader() {
time.Sleep(time.Second)
break
}
// schedule jobs if there are any
if len(gs.incomingJobs) == 0 {
break
}
// try again later if no free RMs
addr, cap := gs.getNextFreeRM()
if cap == -1 || addr == "" {
break
}
minCap := common.MinInt(cap, len(gs.incomingJobs))
jobs := gs.incomingJobs[0:minCap]
for i := range jobs {
jobs[i].ResMan = addr
}
gs.runJobsTask(jobs, addr) // this function blocks util the task finishes executing
case job := <-gs.incomingJobAddChan:
// take all the jobs in the channel and put them in the slice
rest := takeJobs(1000000, gs.incomingJobAddChan)
gs.incomingJobs = append(gs.incomingJobs, job)
gs.incomingJobs = append(gs.incomingJobs, rest...)
log.Println("newJob!!!", gs.incomingJobs)
case n := <-gs.incomingJobRmChan:
log.Println(n, gs.incomingJobs)
gs.incomingJobs = gs.incomingJobs[n:]
}
}
}
// runJobsTask pushes to tasks channel, it blocks until the task is completed
// TODO: rmAddr is contained in jobs already, we can possibly reduce redundancy
func (gs *GridSdr) runJobsTask(jobs []Job, rmAddr string) {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// send the job to RM
reply, e := rpcAddJobsToRM(rmAddr, &jobs)
// add jobs to the submitted list for all GSs
// jobsToChan(jobs, gs.scheduledJobAddChan) // for myself
for _, job := range jobs {
gs.scheduledJobAddChan <- job
}
rpcJobsGo(common.SliceFromMap(gs.gsNodes.GetAll()), &jobs, rpcSyncScheduledJobs)
// remove jobs from the incomingJobs list
gs.incomingJobRmChan <- len(jobs) // for myself
rpcIntGo(common.SliceFromMap(gs.gsNodes.GetAll()), len(jobs), rpcDropJobs)
c <- 0
return reply, e
}
<-c
}
// rpcArgsForGS sets default values for GS
func (gs *GridSdr) rpcArgsForGS(msgType common.MsgType) RPCArgs {
return RPCArgs{gs.ID, gs.Addr, msgType, gs.clock.Geti64()}
}
// NOTE: there are various ways to improve this function, i.e. get the the RM with highest number of free workers
func (gs *GridSdr) getNextFreeRM() (string, int) {
caps := gs.getRMCapacities()
for k, v := range caps {
if v > 0 {
return k, int(v)
}
}
return "", -1
}
func (gs *GridSdr) getRMCapacities() map[string]int64 {
capacities := make(map[string]int64)
args := gs.rpcArgsForGS(common.GetCapacityMsg)
for k := range gs.rmNodes.GetAll() {
x, e := rpcSendMsgToRM(k, &args)
if e == nil {
capacities[k] = int64(x)
}
}
return capacities
}
func (gs *GridSdr) notifyAndPopulateGSs(nodes []string) {
args := gs.rpcArgsForGS(common.GSUpMsg)
for _, node := range nodes {
id, e := rpcSendMsgToGS(node, &args)
if e == nil {
gs.gsNodes.SetInt(node, int64(id))
}
}
}
func (gs *GridSdr) notifyAndPopulateRMs(nodes []string) {
args := gs.rpcArgsForGS(common.RMUpMsg)
for _, node := range nodes {
id, e := rpcSendMsgToRM(node, &args)
if e == nil {
gs.rmNodes.SetInt(node, int64(id))
}
}
}
// obtainCritSection implements most of the Ricart-Agrawala algorithm, it sends the critical section request and then wait for responses until some timeout.
// Initially we set the mutexState to StateWanted, if the critical section is obtained we set it to StateHeld.
// NOTE: this function isn't designed to be thread safe, it is run periodically in `runTasks`.
func (gs *GridSdr) obtainCritSection() {
if gs.mutexState.Get().(common.MutexState) != common.StateReleased {
log.Panicf("Should not be in CS, state: %v\n", gs)
}
if len(gs.mutexRespChan) != 0 {
log.Panic("Nodes following the protocol shouldn't send more messages")
}
gs.mutexState.Set(common.StateWanted)
gs.clock.Tick()
args := gs.rpcArgsForGS(common.MutexReq)
addrs := common.SliceFromMap(gs.gsNodes.GetAll())
successes := rpcGo(addrs, &args, rpcSendMsgToGS)
gs.reqClock = gs.clock.Geti64()
// wait until others has written to mutexRespChan or time out (5s)
cnt := 0
timeout := time.After(5 * time.Second)
loop:
for {
if cnt >= successes {
break
}
select {
case <-gs.mutexRespChan:
cnt++
case <-timeout:
break loop
}
}
// now empty gs.mutexRespChan because we received all the messages
// NOTE: probably not necessary
common.EmptyIntChan(gs.mutexRespChan)
// here we're in critical section
gs.mutexState.Set(common.StateHeld)
log.Println("In CS!", gs.ID)
}
// releaseCritSection sets the mutexState to StateReleased and then runs all the queued requests.
func (gs *GridSdr) releaseCritSection() {
gs.mutexState.Set(common.StateReleased)
for {
select {
case req := <-gs.mutexReqChan:
_, e := req()
if e != nil {
log.Panic("request failed with", e)
}
default:
log.Println("Out CS!", gs.ID)
return
}
}
}
// elect implements the Bully algorithm.
func (gs *GridSdr) elect() {
defer func() {
gs.inElection.Set(false)
}()
gs.inElection.Set(true)
gs.clock.Tick()
oks := 0
args := gs.rpcArgsForGS(common.ElectionMsg)
for k, v := range gs.gsNodes.GetAll() {
if v.ID < int64(gs.ID) {
continue // do nothing to lower ids
}
_, e := rpcSendMsgToGS(k, &args)
if e == nil {
oks++
}
}
// if no responses, then set the node itself as leader, and tell the others
if oks == 0 {
gs.clock.Tick()
gs.leader = gs.Addr
log.Printf("I'm the leader (%v).\n", gs.leader)
args := gs.rpcArgsForGS(common.CoordinateMsg)
addrs := common.SliceFromMap(gs.gsNodes.GetAll())
rpcGo(addrs, &args, rpcSendMsgToGS) // NOTE: ok to fail the send, because nodes might be done
}
// artificially make the election last longer so that multiple messages
// requests won't initialise multiple election runs
time.Sleep(time.Second)
}
// RecvMsg is called remotely, it updates the Lamport clock first and then performs tasks depending on the message type.
func (gs *GridSdr) RecvMsg(args *RPCArgs, reply *int) error {
log.Printf("Msg received %v\n", *args)
*reply = 1
gs.clock.Set(common.Max64(gs.clock.Geti64(), args.Clock) + 1) // update Lamport clock
if args.Type == common.CoordinateMsg {
gs.leader = args.Addr
log.Printf("Leader set to %v\n", gs.leader)
} else if args.Type == common.ElectionMsg {
// don't start a new election if one is already running
if !gs.inElection.Get().(bool) {
go gs.elect()
}
} else if args.Type == common.MutexReq {
go gs.respCritSection(*args)
} else if args.Type == common.MutexResp {
gs.mutexRespChan <- 0
} else if args.Type == common.GSUpMsg {
*reply = gs.ID
gs.gsNodes.SetInt(args.Addr, int64(args.ID))
} else if args.Type == common.RMUpMsg {
*reply = gs.ID
gs.rmNodes.SetInt(args.Addr, int64(args.ID))
} else {
log.Panic("Invalid message!", args)
}
return nil
}
// RecvJobs appends new jobs into the jobs queue.
// NOTE: this function should not be called directly by the client, it requires CS.
func (gs *GridSdr) RecvJobs(jobs *[]Job, reply *int) error {
log.Printf("%v new incoming jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.incomingJobAddChan <- job
}
// jobsToChan(*jobs, gs.incomingJobAddChan)
*reply = 0
return nil
}
// NOTE: this function should not be called directly by the client, it requires CS.
func (gs *GridSdr) RecvScheduledJobs(jobs *[]Job, reply *int) error {
log.Printf("Adding %v scheduled jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.scheduledJobAddChan <- job
}
// jobsToChan(*jobs, gs.scheduledJobAddChan)
*reply = 0
return nil
}
func (gs *GridSdr) DropJobs(n *int, reply *int) error {
log.Printf("Dropping %v jobs\n", *n)
gs.incomingJobRmChan <- *n
// dropJobs(*n, gs.incomingJobAddChan)
*reply = 0
return nil
}
// SyncCompletedJobs is called by the RM when job(s) are completed.
// We acquire a critical section and propogate the change to everybody.
func (gs *GridSdr) SyncCompletedJobs(jobs *[]int64, reply *int) error {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// remove it from myself too
// TODO this is repeated code, more elegant if RPC call can be done on myself too
r := -1
gs.RemoveCompletedJobs(jobs, &r)
// remove it from everybody else
rpcInt64sGo(common.SliceFromMap(gs.gsNodes.GetAll()), jobs, rpcRemoveCompletedJobs)
c <- 0
return r, nil
}
<-c
*reply = 0
return nil
}
// RemoveCompletedJobs is called by another GS to remove job(s) from the scheduledJobAddChan
func (gs *GridSdr) RemoveCompletedJobs(jobs *[]int64, reply *int) error {
log.Printf("I'm removing %v jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.scheduledJobRmChan <- job
}
*reply = 0
return nil
}
// AddJobsTask is called by the client to add job(s) to the tasks queue, it returns when the job is synchronised.
func (gs *GridSdr) AddJobsTask(jobs *[]Job, reply *int) error {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// add jobs to myself
// TODO more elegant if RPC call on myself
r := -1
e := gs.RecvJobs(jobs, &r)
// add jobs to the others
rpcJobsGo(common.SliceFromMap(gs.gsNodes.GetAll()), jobs, rpcSyncJobs)
c <- 0
return r, e
}
<-c
*reply = 0
return nil
}
// runTasks queries the tasks queue and if there are outstanding tasks it will request for critical and run the tasks.
func (gs *GridSdr) runTasks() {
for {
// check whether there are tasks that needs running every 100ms
time.Sleep(100 * time.Millisecond)
if len(gs.tasks) > 0 {
// acquire CS, run the tasks, run for 1ms at most, then release CS
gs.obtainCritSection()
timeout := time.After(time.Millisecond)
inner_loop:
for {
select {
case task := <-gs.tasks:
_, e := task()
if e != nil {
log.Panic("task failed with", e)
}
case <-timeout:
break inner_loop
default:
break inner_loop
}
}
gs.releaseCritSection()
}
}
}
// argsIsLater checks whether args has a later Lamport clock, tie break using node ID.
func (gs *GridSdr) argsIsLater(args RPCArgs) bool {
return gs.clock.Geti64() < args.Clock || (gs.clock.Geti64() == args.Clock && gs.ID < args.ID)
}
// respCritSection puts the critical section response into the response queue when it can't respond straight away.
func (gs *GridSdr) respCritSection(args RPCArgs) {
resp := func() (interface{}, error) {
// NOTE: use gs.reqClock instead of the normal clock
rpcSendMsgToGS(args.Addr, &RPCArgs{gs.ID, gs.Addr, common.MutexResp, gs.reqClock})
return 0, nil
}
st := gs.mutexState.Get().(common.MutexState)
if st == common.StateHeld || (st == common.StateWanted && gs.argsIsLater(args)) {
gs.mutexReqChan <- resp
} else {
resp()
}
}
// pollLeader polls the leader node and initiates the election algorithm is the leader goes offline.
func (gs *GridSdr) pollLeader() {
for {
time.Sleep(time.Second)
// don't do anything if election is running or I'm leader
if gs.inElection.Get().(bool) || gs.imLeader() {
continue
}
remote, e := rpc.DialHTTP("tcp", gs.leader)
if e != nil {
log.Printf("Leader %v not online (DialHTTP), initialising election.\n", gs.leader)
gs.elect()
} else {
remote.Close()
}
}
}
Using channel inside its own select is bad...
package model
import (
"log"
"net/rpc"
"time"
)
import (
"github.com/kc1212/virtual-grid/common"
"github.com/kc1212/virtual-grid/discosrv"
)
// GridSdr describes the properties of one grid scheduler
type GridSdr struct {
common.Node
gsNodes *common.SyncedSet // other grid schedulers, not including myself
rmNodes *common.SyncedSet // the resource managers
leader string // the lead grid scheduler
incomingJobAddChan chan Job // when user adds a job, it comes here
incomingJobRmChan chan int
incomingJobs []Job
scheduledJobAddChan chan Job // channel for new scheduled jobs
scheduledJobRmChan chan int64 // channel for removing jobs that are completed
scheduledJobReqChan chan chan Job // channel inside a channel to sync with new GS when it's online
scheduledJobs map[int64]Job // when GS schedules a job, it gets stored here via the two channels
tasks chan common.Task // these tasks require critical section (CS)
inElection *common.SyncedVal
mutexRespChan chan int
mutexReqChan chan common.Task
mutexState *common.SyncedVal
clock *common.SyncedVal
reqClock int64
discosrvAddr string
}
// RPCArgs is the arguments for RPC calls between grid schedulers and/or resource maanagers
type RPCArgs struct {
ID int
Addr string
Type common.MsgType
Clock int64
}
// InitGridSdr creates a grid scheduler.
func InitGridSdr(id int, addr string, dsAddr string) GridSdr {
// NOTE: the following three values are initiated in `Run`
gsNodes := &common.SyncedSet{S: make(map[string]common.IntClient)}
rmNodes := &common.SyncedSet{S: make(map[string]common.IntClient)}
var leader string
return GridSdr{
common.Node{id, addr, common.GSNode},
gsNodes,
rmNodes,
leader,
make(chan Job, 1000000),
make(chan int),
make([]Job, 0),
make(chan Job, 1000000),
make(chan int64),
make(chan chan Job),
make(map[int64]Job),
make(chan common.Task, 100),
&common.SyncedVal{V: false},
make(chan int, 100),
make(chan common.Task, 100),
&common.SyncedVal{V: common.StateReleased},
&common.SyncedVal{V: int64(0)},
0,
dsAddr,
}
}
// Run is the main function for GridSdr, it starts all its services, do not run it more than once.
func (gs *GridSdr) Run() {
reply, e := discosrv.ImAliveProbe(gs.Addr, gs.Type, gs.discosrvAddr)
if e != nil {
log.Panicf("Discosrv on %v not online\n", gs.discosrvAddr)
}
gs.notifyAndPopulateGSs(reply.GSs)
gs.notifyAndPopulateRMs(reply.RMs)
go discosrv.ImAlivePoll(gs.Addr, gs.Type, gs.discosrvAddr)
go common.RunRPC(gs, gs.Addr)
go gs.pollLeader()
go gs.runTasks()
go gs.updateScheduledJobs()
gs.scheduleJobs()
}
func (gs *GridSdr) imLeader() bool {
return gs.leader == gs.Addr
}
func (gs *GridSdr) updateScheduledJobs() {
for {
timeout := time.After(100 * time.Millisecond)
select {
case <-timeout:
// every `timeout` check the RMs and see whether they're up
// then re-schedule the jobs if the responsible RM is down
// only do this for leader
if !gs.imLeader() || len(gs.scheduledJobs) == 0 {
break
}
rms := gs.getAliveRMs()
for _, v := range gs.scheduledJobs {
if _, ok := rms[v.ResMan]; !ok {
gs.incomingJobAddChan <- v
delete(gs.scheduledJobs, v.ID)
}
}
case job := <-gs.scheduledJobAddChan:
gs.scheduledJobs[job.ID] = job
case id := <-gs.scheduledJobRmChan:
delete(gs.scheduledJobs, id)
case c := <-gs.scheduledJobReqChan:
for _, v := range gs.scheduledJobs {
c <- v
}
close(c)
}
}
}
func (gs *GridSdr) getAliveRMs() map[string]common.IntClient {
res := make(map[string]common.IntClient)
for k, v := range gs.rmNodes.GetAll() {
remote, e := rpc.DialHTTP("tcp", k)
if e == nil {
res[k] = v
remote.Close()
}
}
return res
}
func (gs *GridSdr) scheduleJobs() {
for {
// schedule jobs if there are any, for every 100ms
timeout := time.After(100 * time.Millisecond)
select {
case <-timeout:
log.Println("here!!!1111")
// try again later if I'm not leader
if !gs.imLeader() {
break
}
// schedule jobs if there are any
if len(gs.incomingJobs) == 0 {
break
}
// try again later if no free RMs
addr, cap := gs.getNextFreeRM()
if cap == -1 || addr == "" {
break
}
minCap := common.MinInt(cap, len(gs.incomingJobs))
jobs := gs.incomingJobs[0:minCap]
for i := range jobs {
jobs[i].ResMan = addr
}
gs.runJobsTask(jobs, addr) // this function blocks util the task finishes executing
case job := <-gs.incomingJobAddChan:
log.Println("here!!!2222")
// take all the jobs in the channel and put them in the slice
rest := takeJobs(1000000, gs.incomingJobAddChan)
gs.incomingJobs = append(gs.incomingJobs, job)
gs.incomingJobs = append(gs.incomingJobs, rest...)
log.Println("newJob!!!", gs.incomingJobs)
case n := <-gs.incomingJobRmChan:
log.Println("here!!!3333")
log.Println(n, gs.incomingJobs)
gs.incomingJobs = gs.incomingJobs[n:]
}
}
}
// runJobsTask pushes to tasks channel, it blocks until the task is completed
// TODO: rmAddr is contained in jobs already, we can possibly reduce redundancy
func (gs *GridSdr) runJobsTask(jobs []Job, rmAddr string) {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// send the job to RM
reply, e := rpcAddJobsToRM(rmAddr, &jobs)
// add jobs to the submitted list for all GSs
// jobsToChan(jobs, gs.scheduledJobAddChan) // for myself
for _, job := range jobs {
gs.scheduledJobAddChan <- job
}
rpcJobsGo(common.SliceFromMap(gs.gsNodes.GetAll()), &jobs, rpcSyncScheduledJobs)
// remove jobs from the incomingJobs list
// gs.incomingJobRmChan <- len(jobs) // for myself
gs.incomingJobs = gs.incomingJobs[len(jobs):] // NOTE: don't use channel in its own select statement!
rpcIntGo(common.SliceFromMap(gs.gsNodes.GetAll()), len(jobs), rpcDropJobs)
c <- 0
return reply, e
}
<-c
}
// rpcArgsForGS sets default values for GS
func (gs *GridSdr) rpcArgsForGS(msgType common.MsgType) RPCArgs {
return RPCArgs{gs.ID, gs.Addr, msgType, gs.clock.Geti64()}
}
// NOTE: there are various ways to improve this function, i.e. get the the RM with highest number of free workers
func (gs *GridSdr) getNextFreeRM() (string, int) {
caps := gs.getRMCapacities()
for k, v := range caps {
if v > 0 {
return k, int(v)
}
}
return "", -1
}
func (gs *GridSdr) getRMCapacities() map[string]int64 {
capacities := make(map[string]int64)
args := gs.rpcArgsForGS(common.GetCapacityMsg)
for k := range gs.rmNodes.GetAll() {
x, e := rpcSendMsgToRM(k, &args)
if e == nil {
capacities[k] = int64(x)
}
}
return capacities
}
func (gs *GridSdr) notifyAndPopulateGSs(nodes []string) {
args := gs.rpcArgsForGS(common.GSUpMsg)
for _, node := range nodes {
id, e := rpcSendMsgToGS(node, &args)
if e == nil {
gs.gsNodes.SetInt(node, int64(id))
}
}
}
func (gs *GridSdr) notifyAndPopulateRMs(nodes []string) {
args := gs.rpcArgsForGS(common.RMUpMsg)
for _, node := range nodes {
id, e := rpcSendMsgToRM(node, &args)
if e == nil {
gs.rmNodes.SetInt(node, int64(id))
}
}
}
// obtainCritSection implements most of the Ricart-Agrawala algorithm, it sends the critical section request and then wait for responses until some timeout.
// Initially we set the mutexState to StateWanted, if the critical section is obtained we set it to StateHeld.
// NOTE: this function isn't designed to be thread safe, it is run periodically in `runTasks`.
func (gs *GridSdr) obtainCritSection() {
if gs.mutexState.Get().(common.MutexState) != common.StateReleased {
log.Panicf("Should not be in CS, state: %v\n", gs)
}
if len(gs.mutexRespChan) != 0 {
log.Panic("Nodes following the protocol shouldn't send more messages")
}
gs.mutexState.Set(common.StateWanted)
gs.clock.Tick()
args := gs.rpcArgsForGS(common.MutexReq)
addrs := common.SliceFromMap(gs.gsNodes.GetAll())
successes := rpcGo(addrs, &args, rpcSendMsgToGS)
gs.reqClock = gs.clock.Geti64()
// wait until others has written to mutexRespChan or time out (5s)
cnt := 0
timeout := time.After(5 * time.Second)
loop:
for {
if cnt >= successes {
break
}
select {
case <-gs.mutexRespChan:
cnt++
case <-timeout:
break loop
}
}
// now empty gs.mutexRespChan because we received all the messages
// NOTE: probably not necessary
common.EmptyIntChan(gs.mutexRespChan)
// here we're in critical section
gs.mutexState.Set(common.StateHeld)
log.Println("In CS!", gs.ID)
}
// releaseCritSection sets the mutexState to StateReleased and then runs all the queued requests.
func (gs *GridSdr) releaseCritSection() {
gs.mutexState.Set(common.StateReleased)
for {
select {
case req := <-gs.mutexReqChan:
_, e := req()
if e != nil {
log.Panic("request failed with", e)
}
default:
log.Println("Out CS!", gs.ID)
return
}
}
}
// elect implements the Bully algorithm.
func (gs *GridSdr) elect() {
defer func() {
gs.inElection.Set(false)
}()
gs.inElection.Set(true)
gs.clock.Tick()
oks := 0
args := gs.rpcArgsForGS(common.ElectionMsg)
for k, v := range gs.gsNodes.GetAll() {
if v.ID < int64(gs.ID) {
continue // do nothing to lower ids
}
_, e := rpcSendMsgToGS(k, &args)
if e == nil {
oks++
}
}
// if no responses, then set the node itself as leader, and tell the others
if oks == 0 {
gs.clock.Tick()
gs.leader = gs.Addr
log.Printf("I'm the leader (%v).\n", gs.leader)
args := gs.rpcArgsForGS(common.CoordinateMsg)
addrs := common.SliceFromMap(gs.gsNodes.GetAll())
rpcGo(addrs, &args, rpcSendMsgToGS) // NOTE: ok to fail the send, because nodes might be done
}
// artificially make the election last longer so that multiple messages
// requests won't initialise multiple election runs
time.Sleep(time.Second)
}
// RecvMsg is called remotely, it updates the Lamport clock first and then performs tasks depending on the message type.
func (gs *GridSdr) RecvMsg(args *RPCArgs, reply *int) error {
log.Printf("Msg received %v\n", *args)
*reply = 1
gs.clock.Set(common.Max64(gs.clock.Geti64(), args.Clock) + 1) // update Lamport clock
if args.Type == common.CoordinateMsg {
gs.leader = args.Addr
log.Printf("Leader set to %v\n", gs.leader)
} else if args.Type == common.ElectionMsg {
// don't start a new election if one is already running
if !gs.inElection.Get().(bool) {
go gs.elect()
}
} else if args.Type == common.MutexReq {
go gs.respCritSection(*args)
} else if args.Type == common.MutexResp {
gs.mutexRespChan <- 0
} else if args.Type == common.GSUpMsg {
*reply = gs.ID
gs.gsNodes.SetInt(args.Addr, int64(args.ID))
} else if args.Type == common.RMUpMsg {
*reply = gs.ID
gs.rmNodes.SetInt(args.Addr, int64(args.ID))
} else {
log.Panic("Invalid message!", args)
}
return nil
}
// RecvJobs appends new jobs into the jobs queue.
// NOTE: this function should not be called directly by the client, it requires CS.
func (gs *GridSdr) RecvJobs(jobs *[]Job, reply *int) error {
log.Printf("%v new incoming jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.incomingJobAddChan <- job
}
// jobsToChan(*jobs, gs.incomingJobAddChan)
*reply = 0
return nil
}
// NOTE: this function should not be called directly by the client, it requires CS.
func (gs *GridSdr) RecvScheduledJobs(jobs *[]Job, reply *int) error {
log.Printf("Adding %v scheduled jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.scheduledJobAddChan <- job
}
// jobsToChan(*jobs, gs.scheduledJobAddChan)
*reply = 0
return nil
}
func (gs *GridSdr) DropJobs(n *int, reply *int) error {
log.Printf("Dropping %v jobs\n", *n)
gs.incomingJobRmChan <- *n
// dropJobs(*n, gs.incomingJobAddChan)
*reply = 0
return nil
}
// SyncCompletedJobs is called by the RM when job(s) are completed.
// We acquire a critical section and propogate the change to everybody.
func (gs *GridSdr) SyncCompletedJobs(jobs *[]int64, reply *int) error {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// remove it from myself too
// TODO this is repeated code, more elegant if RPC call can be done on myself too
for _, job := range *jobs {
delete(gs.scheduledJobs, job)
}
// remove it from everybody else
rpcInt64sGo(common.SliceFromMap(gs.gsNodes.GetAll()), jobs, rpcRemoveCompletedJobs)
c <- 0
return 0, nil
}
<-c
*reply = 0
return nil
}
// RemoveCompletedJobs is called by another GS to remove job(s) from the scheduledJobAddChan
func (gs *GridSdr) RemoveCompletedJobs(jobs *[]int64, reply *int) error {
log.Printf("I'm removing %v jobs.\n", len(*jobs))
for _, job := range *jobs {
gs.scheduledJobRmChan <- job
}
*reply = 0
return nil
}
// AddJobsTask is called by the client to add job(s) to the tasks queue, it returns when the job is synchronised.
func (gs *GridSdr) AddJobsTask(jobs *[]Job, reply *int) error {
c := make(chan int)
gs.tasks <- func() (interface{}, error) {
// add jobs to myself
// TODO more elegant if RPC call on myself
r := -1
e := gs.RecvJobs(jobs, &r)
// add jobs to the others
rpcJobsGo(common.SliceFromMap(gs.gsNodes.GetAll()), jobs, rpcSyncJobs)
c <- 0
return r, e
}
<-c
*reply = 0
return nil
}
// runTasks queries the tasks queue and if there are outstanding tasks it will request for critical and run the tasks.
func (gs *GridSdr) runTasks() {
for {
// check whether there are tasks that needs running every 100ms
time.Sleep(100 * time.Millisecond)
if len(gs.tasks) > 0 {
// acquire CS, run the tasks, run for 1ms at most, then release CS
gs.obtainCritSection()
timeout := time.After(time.Millisecond)
inner_loop:
for {
select {
case task := <-gs.tasks:
_, e := task()
if e != nil {
log.Panic("task failed with", e)
}
case <-timeout:
break inner_loop
default:
break inner_loop
}
}
gs.releaseCritSection()
}
}
}
// argsIsLater checks whether args has a later Lamport clock, tie break using node ID.
func (gs *GridSdr) argsIsLater(args RPCArgs) bool {
return gs.clock.Geti64() < args.Clock || (gs.clock.Geti64() == args.Clock && gs.ID < args.ID)
}
// respCritSection puts the critical section response into the response queue when it can't respond straight away.
func (gs *GridSdr) respCritSection(args RPCArgs) {
resp := func() (interface{}, error) {
// NOTE: use gs.reqClock instead of the normal clock
rpcSendMsgToGS(args.Addr, &RPCArgs{gs.ID, gs.Addr, common.MutexResp, gs.reqClock})
return 0, nil
}
st := gs.mutexState.Get().(common.MutexState)
if st == common.StateHeld || (st == common.StateWanted && gs.argsIsLater(args)) {
gs.mutexReqChan <- resp
} else {
resp()
}
}
// pollLeader polls the leader node and initiates the election algorithm is the leader goes offline.
func (gs *GridSdr) pollLeader() {
for {
time.Sleep(time.Second)
// don't do anything if election is running or I'm leader
if gs.inElection.Get().(bool) || gs.imLeader() {
continue
}
remote, e := rpc.DialHTTP("tcp", gs.leader)
if e != nil {
log.Printf("Leader %v not online (DialHTTP), initialising election.\n", gs.leader)
gs.elect()
} else {
remote.Close()
}
}
}
|
package action
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"unicode"
"github.com/zyedidia/json5"
"github.com/zyedidia/micro/v2/internal/config"
"github.com/zyedidia/micro/v2/internal/screen"
"github.com/zyedidia/tcell"
)
var Binder = map[string]func(e Event, action string){
"info": InfoMapEvent,
"buffer": BufMapEvent,
"terminal": TermMapEvent,
}
func createBindingsIfNotExist(fname string) {
if _, e := os.Stat(fname); os.IsNotExist(e) {
ioutil.WriteFile(fname, []byte("{}"), 0644)
}
}
// InitBindings intializes the bindings map by reading from bindings.json
func InitBindings() {
config.Bindings = DefaultBindings("buffer")
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e := os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
screen.TermMessage("Error reading bindings.json file: " + err.Error())
return
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
screen.TermMessage("Error reading bindings.json:", err.Error())
}
}
for p, bind := range Binder {
defaults := DefaultBindings(p)
for k, v := range defaults {
BindKey(k, v, bind)
}
}
for k, v := range parsed {
switch val := v.(type) {
case string:
BindKey(k, val, Binder["buffer"])
case map[string]interface{}:
bind := Binder[k]
for e, a := range val {
s, ok := a.(string)
if !ok {
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
} else {
BindKey(e, s, bind)
}
}
default:
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
}
}
}
func BindKey(k, v string, bind func(e Event, a string)) {
event, err := findEvent(k)
if err != nil {
screen.TermMessage(err)
}
bind(event, v)
// switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func findSingleEvent(k string) (b Event, ok bool) {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-"):
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:]
modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if len(k) == 0 {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]string
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]string
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings[k]; ok {
BufUnmap(key)
delete(config.Bindings, k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.Button1,
"MouseMiddle": tcell.Button2,
"MouseRight": tcell.Button3,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": tcell.KeyUp,
"Down": tcell.KeyDown,
"Right": tcell.KeyRight,
"Left": tcell.KeyLeft,
"UpLeft": tcell.KeyUpLeft,
"UpRight": tcell.KeyUpRight,
"DownLeft": tcell.KeyDownLeft,
"DownRight": tcell.KeyDownRight,
"Center": tcell.KeyCenter,
"PageUp": tcell.KeyPgUp,
"PageDown": tcell.KeyPgDn,
"Home": tcell.KeyHome,
"End": tcell.KeyEnd,
"Insert": tcell.KeyInsert,
"Delete": tcell.KeyDelete,
"Help": tcell.KeyHelp,
"Exit": tcell.KeyExit,
"Clear": tcell.KeyClear,
"Cancel": tcell.KeyCancel,
"Print": tcell.KeyPrint,
"Pause": tcell.KeyPause,
"Backtab": tcell.KeyBacktab,
"F1": tcell.KeyF1,
"F2": tcell.KeyF2,
"F3": tcell.KeyF3,
"F4": tcell.KeyF4,
"F5": tcell.KeyF5,
"F6": tcell.KeyF6,
"F7": tcell.KeyF7,
"F8": tcell.KeyF8,
"F9": tcell.KeyF9,
"F10": tcell.KeyF10,
"F11": tcell.KeyF11,
"F12": tcell.KeyF12,
"F13": tcell.KeyF13,
"F14": tcell.KeyF14,
"F15": tcell.KeyF15,
"F16": tcell.KeyF16,
"F17": tcell.KeyF17,
"F18": tcell.KeyF18,
"F19": tcell.KeyF19,
"F20": tcell.KeyF20,
"F21": tcell.KeyF21,
"F22": tcell.KeyF22,
"F23": tcell.KeyF23,
"F24": tcell.KeyF24,
"F25": tcell.KeyF25,
"F26": tcell.KeyF26,
"F27": tcell.KeyF27,
"F28": tcell.KeyF28,
"F29": tcell.KeyF29,
"F30": tcell.KeyF30,
"F31": tcell.KeyF31,
"F32": tcell.KeyF32,
"F33": tcell.KeyF33,
"F34": tcell.KeyF34,
"F35": tcell.KeyF35,
"F36": tcell.KeyF36,
"F37": tcell.KeyF37,
"F38": tcell.KeyF38,
"F39": tcell.KeyF39,
"F40": tcell.KeyF40,
"F41": tcell.KeyF41,
"F42": tcell.KeyF42,
"F43": tcell.KeyF43,
"F44": tcell.KeyF44,
"F45": tcell.KeyF45,
"F46": tcell.KeyF46,
"F47": tcell.KeyF47,
"F48": tcell.KeyF48,
"F49": tcell.KeyF49,
"F50": tcell.KeyF50,
"F51": tcell.KeyF51,
"F52": tcell.KeyF52,
"F53": tcell.KeyF53,
"F54": tcell.KeyF54,
"F55": tcell.KeyF55,
"F56": tcell.KeyF56,
"F57": tcell.KeyF57,
"F58": tcell.KeyF58,
"F59": tcell.KeyF59,
"F60": tcell.KeyF60,
"F61": tcell.KeyF61,
"F62": tcell.KeyF62,
"F63": tcell.KeyF63,
"F64": tcell.KeyF64,
"CtrlSpace": tcell.KeyCtrlSpace,
"CtrlA": tcell.KeyCtrlA,
"CtrlB": tcell.KeyCtrlB,
"CtrlC": tcell.KeyCtrlC,
"CtrlD": tcell.KeyCtrlD,
"CtrlE": tcell.KeyCtrlE,
"CtrlF": tcell.KeyCtrlF,
"CtrlG": tcell.KeyCtrlG,
"CtrlH": tcell.KeyCtrlH,
"CtrlI": tcell.KeyCtrlI,
"CtrlJ": tcell.KeyCtrlJ,
"CtrlK": tcell.KeyCtrlK,
"CtrlL": tcell.KeyCtrlL,
"CtrlM": tcell.KeyCtrlM,
"CtrlN": tcell.KeyCtrlN,
"CtrlO": tcell.KeyCtrlO,
"CtrlP": tcell.KeyCtrlP,
"CtrlQ": tcell.KeyCtrlQ,
"CtrlR": tcell.KeyCtrlR,
"CtrlS": tcell.KeyCtrlS,
"CtrlT": tcell.KeyCtrlT,
"CtrlU": tcell.KeyCtrlU,
"CtrlV": tcell.KeyCtrlV,
"CtrlW": tcell.KeyCtrlW,
"CtrlX": tcell.KeyCtrlX,
"CtrlY": tcell.KeyCtrlY,
"CtrlZ": tcell.KeyCtrlZ,
"CtrlLeftSq": tcell.KeyCtrlLeftSq,
"CtrlBackslash": tcell.KeyCtrlBackslash,
"CtrlRightSq": tcell.KeyCtrlRightSq,
"CtrlCarat": tcell.KeyCtrlCarat,
"CtrlUnderscore": tcell.KeyCtrlUnderscore,
"Tab": tcell.KeyTab,
"Esc": tcell.KeyEsc,
"Escape": tcell.KeyEscape,
"Enter": tcell.KeyEnter,
"Backspace": tcell.KeyBackspace2,
"OldBackspace": tcell.KeyBackspace,
// I renamed these keys to PageUp and PageDown but I don't want to break someone's keybindings
"PgUp": tcell.KeyPgUp,
"PgDown": tcell.KeyPgDn,
}
Don't overwrite user bindings
This fix still needs more work.
Ref #1821
package action
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"unicode"
"github.com/zyedidia/json5"
"github.com/zyedidia/micro/v2/internal/config"
"github.com/zyedidia/micro/v2/internal/screen"
"github.com/zyedidia/tcell"
)
var Binder = map[string]func(e Event, action string){
"info": InfoMapEvent,
"buffer": BufMapEvent,
"terminal": TermMapEvent,
}
func createBindingsIfNotExist(fname string) {
if _, e := os.Stat(fname); os.IsNotExist(e) {
ioutil.WriteFile(fname, []byte("{}"), 0644)
}
}
// InitBindings intializes the bindings map by reading from bindings.json
func InitBindings() {
config.Bindings = DefaultBindings("buffer")
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e := os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
screen.TermMessage("Error reading bindings.json file: " + err.Error())
return
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
screen.TermMessage("Error reading bindings.json:", err.Error())
}
}
for k, v := range parsed {
switch val := v.(type) {
case string:
BindKey(k, val, Binder["buffer"])
case map[string]interface{}:
bind := Binder[k]
for e, a := range val {
s, ok := a.(string)
if !ok {
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
} else {
BindKey(e, s, bind)
}
}
default:
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
}
}
for p, bind := range Binder {
defaults := DefaultBindings(p)
for k, v := range defaults {
BindKey(k, v, bind)
}
}
}
func BindKey(k, v string, bind func(e Event, a string)) {
event, err := findEvent(k)
if err != nil {
screen.TermMessage(err)
}
bind(event, v)
// switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func findSingleEvent(k string) (b Event, ok bool) {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-"):
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:]
modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if len(k) == 0 {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]string
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]string
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings[k]; ok {
BufUnmap(key)
delete(config.Bindings, k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.Button1,
"MouseMiddle": tcell.Button2,
"MouseRight": tcell.Button3,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": tcell.KeyUp,
"Down": tcell.KeyDown,
"Right": tcell.KeyRight,
"Left": tcell.KeyLeft,
"UpLeft": tcell.KeyUpLeft,
"UpRight": tcell.KeyUpRight,
"DownLeft": tcell.KeyDownLeft,
"DownRight": tcell.KeyDownRight,
"Center": tcell.KeyCenter,
"PageUp": tcell.KeyPgUp,
"PageDown": tcell.KeyPgDn,
"Home": tcell.KeyHome,
"End": tcell.KeyEnd,
"Insert": tcell.KeyInsert,
"Delete": tcell.KeyDelete,
"Help": tcell.KeyHelp,
"Exit": tcell.KeyExit,
"Clear": tcell.KeyClear,
"Cancel": tcell.KeyCancel,
"Print": tcell.KeyPrint,
"Pause": tcell.KeyPause,
"Backtab": tcell.KeyBacktab,
"F1": tcell.KeyF1,
"F2": tcell.KeyF2,
"F3": tcell.KeyF3,
"F4": tcell.KeyF4,
"F5": tcell.KeyF5,
"F6": tcell.KeyF6,
"F7": tcell.KeyF7,
"F8": tcell.KeyF8,
"F9": tcell.KeyF9,
"F10": tcell.KeyF10,
"F11": tcell.KeyF11,
"F12": tcell.KeyF12,
"F13": tcell.KeyF13,
"F14": tcell.KeyF14,
"F15": tcell.KeyF15,
"F16": tcell.KeyF16,
"F17": tcell.KeyF17,
"F18": tcell.KeyF18,
"F19": tcell.KeyF19,
"F20": tcell.KeyF20,
"F21": tcell.KeyF21,
"F22": tcell.KeyF22,
"F23": tcell.KeyF23,
"F24": tcell.KeyF24,
"F25": tcell.KeyF25,
"F26": tcell.KeyF26,
"F27": tcell.KeyF27,
"F28": tcell.KeyF28,
"F29": tcell.KeyF29,
"F30": tcell.KeyF30,
"F31": tcell.KeyF31,
"F32": tcell.KeyF32,
"F33": tcell.KeyF33,
"F34": tcell.KeyF34,
"F35": tcell.KeyF35,
"F36": tcell.KeyF36,
"F37": tcell.KeyF37,
"F38": tcell.KeyF38,
"F39": tcell.KeyF39,
"F40": tcell.KeyF40,
"F41": tcell.KeyF41,
"F42": tcell.KeyF42,
"F43": tcell.KeyF43,
"F44": tcell.KeyF44,
"F45": tcell.KeyF45,
"F46": tcell.KeyF46,
"F47": tcell.KeyF47,
"F48": tcell.KeyF48,
"F49": tcell.KeyF49,
"F50": tcell.KeyF50,
"F51": tcell.KeyF51,
"F52": tcell.KeyF52,
"F53": tcell.KeyF53,
"F54": tcell.KeyF54,
"F55": tcell.KeyF55,
"F56": tcell.KeyF56,
"F57": tcell.KeyF57,
"F58": tcell.KeyF58,
"F59": tcell.KeyF59,
"F60": tcell.KeyF60,
"F61": tcell.KeyF61,
"F62": tcell.KeyF62,
"F63": tcell.KeyF63,
"F64": tcell.KeyF64,
"CtrlSpace": tcell.KeyCtrlSpace,
"CtrlA": tcell.KeyCtrlA,
"CtrlB": tcell.KeyCtrlB,
"CtrlC": tcell.KeyCtrlC,
"CtrlD": tcell.KeyCtrlD,
"CtrlE": tcell.KeyCtrlE,
"CtrlF": tcell.KeyCtrlF,
"CtrlG": tcell.KeyCtrlG,
"CtrlH": tcell.KeyCtrlH,
"CtrlI": tcell.KeyCtrlI,
"CtrlJ": tcell.KeyCtrlJ,
"CtrlK": tcell.KeyCtrlK,
"CtrlL": tcell.KeyCtrlL,
"CtrlM": tcell.KeyCtrlM,
"CtrlN": tcell.KeyCtrlN,
"CtrlO": tcell.KeyCtrlO,
"CtrlP": tcell.KeyCtrlP,
"CtrlQ": tcell.KeyCtrlQ,
"CtrlR": tcell.KeyCtrlR,
"CtrlS": tcell.KeyCtrlS,
"CtrlT": tcell.KeyCtrlT,
"CtrlU": tcell.KeyCtrlU,
"CtrlV": tcell.KeyCtrlV,
"CtrlW": tcell.KeyCtrlW,
"CtrlX": tcell.KeyCtrlX,
"CtrlY": tcell.KeyCtrlY,
"CtrlZ": tcell.KeyCtrlZ,
"CtrlLeftSq": tcell.KeyCtrlLeftSq,
"CtrlBackslash": tcell.KeyCtrlBackslash,
"CtrlRightSq": tcell.KeyCtrlRightSq,
"CtrlCarat": tcell.KeyCtrlCarat,
"CtrlUnderscore": tcell.KeyCtrlUnderscore,
"Tab": tcell.KeyTab,
"Esc": tcell.KeyEsc,
"Escape": tcell.KeyEscape,
"Enter": tcell.KeyEnter,
"Backspace": tcell.KeyBackspace2,
"OldBackspace": tcell.KeyBackspace,
// I renamed these keys to PageUp and PageDown but I don't want to break someone's keybindings
"PgUp": tcell.KeyPgUp,
"PgDown": tcell.KeyPgDn,
}
|
// Package fusefrontend interfaces directly with the go-fuse library.
package fusefrontend
// FUSE operations on paths
import (
"os"
"path/filepath"
"sync"
"syscall"
"time"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/hanwen/go-fuse/fuse/pathfs"
"github.com/rfjakob/gocryptfs/internal/configfile"
"github.com/rfjakob/gocryptfs/internal/contentenc"
"github.com/rfjakob/gocryptfs/internal/nametransform"
"github.com/rfjakob/gocryptfs/internal/serialize_reads"
"github.com/rfjakob/gocryptfs/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/internal/tlog"
)
// FS implements the go-fuse virtual filesystem interface.
type FS struct {
pathfs.FileSystem // loopbackFileSystem, see go-fuse/fuse/pathfs/loopback.go
args Args // Stores configuration arguments
// dirIVLock: Lock()ed if any "gocryptfs.diriv" file is modified
// Readers must RLock() it to prevent them from seeing intermediate
// states
dirIVLock sync.RWMutex
// Filename encryption helper
nameTransform *nametransform.NameTransform
// Content encryption helper
contentEnc *contentenc.ContentEnc
// This lock is used by openWriteOnlyFile() to block concurrent opens while
// it relaxes the permissions on a file.
openWriteOnlyLock sync.RWMutex
// MitigatedCorruptions is used to report data corruption that is internally
// mitigated by ignoring the corrupt item. For example, when OpenDir() finds
// a corrupt filename, we still return the other valid filenames.
// The corruption is logged to syslog to inform the user, and in addition,
// the corrupt filename is logged to this channel via
// reportMitigatedCorruption().
// "gocryptfs -fsck" reads from the channel to also catch these transparently-
// mitigated corruptions.
MitigatedCorruptions chan string
// Track accesses to the filesystem so that we can know when to autounmount.
// An access is considered to have happened on every call to encryptPath,
// which is called as part of every filesystem operation.
// (This flag uses a uint32 so that it can be reset with CompareAndSwapUint32.)
AccessedSinceLastCheck uint32
dirCache dirCacheStruct
}
var _ pathfs.FileSystem = &FS{} // Verify that interface is implemented.
// NewFS returns a new encrypted FUSE overlay filesystem.
func NewFS(args Args, c *contentenc.ContentEnc, n *nametransform.NameTransform) *FS {
if args.SerializeReads {
serialize_reads.InitSerializer()
}
if len(args.Exclude) > 0 {
tlog.Warn.Printf("Forward mode does not support -exclude")
}
return &FS{
FileSystem: pathfs.NewLoopbackFileSystem(args.Cipherdir),
args: args,
nameTransform: n,
contentEnc: c,
}
}
// GetAttr implements pathfs.Filesystem.
//
// GetAttr is symlink-safe through use of openBackingDir() and Fstatat().
func (fs *FS) GetAttr(relPath string, context *fuse.Context) (*fuse.Attr, fuse.Status) {
tlog.Debug.Printf("FS.GetAttr(%q)", relPath)
if fs.isFiltered(relPath) {
return nil, fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return nil, fuse.ToStatus(err)
}
var st unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
syscall.Close(dirfd)
if err != nil {
return nil, fuse.ToStatus(err)
}
a := &fuse.Attr{}
st2 := syscallcompat.Unix2syscall(st)
a.FromStat(&st2)
if a.IsRegular() {
a.Size = fs.contentEnc.CipherSizeToPlainSize(a.Size)
} else if a.IsSymlink() {
target, _ := fs.Readlink(relPath, context)
a.Size = uint64(len(target))
}
if fs.args.ForceOwner != nil {
a.Owner = *fs.args.ForceOwner
}
return a, fuse.OK
}
// mangleOpenFlags is used by Create() and Open() to convert the open flags the user
// wants to the flags we internally use to open the backing file.
// The returned flags always contain O_NOFOLLOW.
func (fs *FS) mangleOpenFlags(flags uint32) (newFlags int) {
newFlags = int(flags)
// Convert WRONLY to RDWR. We always need read access to do read-modify-write cycles.
if newFlags&os.O_WRONLY > 0 {
newFlags = newFlags ^ os.O_WRONLY | os.O_RDWR
}
// We also cannot open the file in append mode, we need to seek back for RMW
newFlags = newFlags &^ os.O_APPEND
// O_DIRECT accesses must be aligned in both offset and length. Due to our
// crypto header, alignment will be off, even if userspace makes aligned
// accesses. Running xfstests generic/013 on ext4 used to trigger lots of
// EINVAL errors due to missing alignment. Just fall back to buffered IO.
newFlags = newFlags &^ syscallcompat.O_DIRECT
// We always want O_NOFOLLOW to be safe against symlink races
newFlags |= syscall.O_NOFOLLOW
return newFlags
}
// Open - FUSE call. Open already-existing file.
//
// Symlink-safe through Openat().
func (fs *FS) Open(path string, flags uint32, context *fuse.Context) (fuseFile nodefs.File, status fuse.Status) {
if fs.isFiltered(path) {
return nil, fuse.EPERM
}
newFlags := fs.mangleOpenFlags(flags)
// Taking this lock makes sure we don't race openWriteOnlyFile()
fs.openWriteOnlyLock.RLock()
defer fs.openWriteOnlyLock.RUnlock()
// Symlink-safe open
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
fd, err := syscallcompat.Openat(dirfd, cName, newFlags, 0)
// Handle a few specific errors
if err != nil {
if err == syscall.EMFILE {
var lim syscall.Rlimit
syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)
tlog.Warn.Printf("Open %q: too many open files. Current \"ulimit -n\": %d", cName, lim.Cur)
}
if err == syscall.EACCES && (int(flags)&os.O_WRONLY > 0) {
return fs.openWriteOnlyFile(dirfd, cName, newFlags)
}
return nil, fuse.ToStatus(err)
}
f := os.NewFile(uintptr(fd), cName)
return NewFile(f, fs)
}
// openBackingFile opens the ciphertext file that backs relative plaintext
// path "relPath". Always adds O_NOFOLLOW to the flags.
func (fs *FS) openBackingFile(relPath string, flags int) (fd int, err error) {
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return -1, err
}
defer syscall.Close(dirfd)
return syscallcompat.Openat(dirfd, cName, flags|syscall.O_NOFOLLOW, 0)
}
// Due to RMW, we always need read permissions on the backing file. This is a
// problem if the file permissions do not allow reading (i.e. 0200 permissions).
// This function works around that problem by chmod'ing the file, obtaining a fd,
// and chmod'ing it back.
func (fs *FS) openWriteOnlyFile(dirfd int, cName string, newFlags int) (*File, fuse.Status) {
woFd, err := syscallcompat.Openat(dirfd, cName, syscall.O_WRONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(woFd)
var st syscall.Stat_t
err = syscall.Fstat(woFd, &st)
if err != nil {
return nil, fuse.ToStatus(err)
}
// The cast to uint32 fixes a build failure on Darwin, where st.Mode is uint16.
perms := uint32(st.Mode & 0777)
// Verify that we don't have read permissions
if perms&0400 != 0 {
tlog.Warn.Printf("openWriteOnlyFile: unexpected permissions %#o, returning EPERM", perms)
return nil, fuse.ToStatus(syscall.EPERM)
}
// Upgrade the lock to block other Open()s and downgrade again on return
fs.openWriteOnlyLock.RUnlock()
fs.openWriteOnlyLock.Lock()
defer func() {
fs.openWriteOnlyLock.Unlock()
fs.openWriteOnlyLock.RLock()
}()
// Relax permissions and revert on return
syscall.Fchmod(woFd, perms|0400)
if err != nil {
tlog.Warn.Printf("openWriteOnlyFile: changing permissions failed: %v", err)
return nil, fuse.ToStatus(err)
}
defer func() {
err2 := syscall.Fchmod(woFd, perms)
if err2 != nil {
tlog.Warn.Printf("openWriteOnlyFile: reverting permissions failed: %v", err2)
}
}()
rwFd, err := syscallcompat.Openat(dirfd, cName, newFlags, 0)
if err != nil {
return nil, fuse.ToStatus(err)
}
f := os.NewFile(uintptr(rwFd), cName)
return NewFile(f, fs)
}
// Create - FUSE call. Creates a new file.
//
// Symlink-safe through the use of Openat().
func (fs *FS) Create(path string, flags uint32, mode uint32, context *fuse.Context) (nodefs.File, fuse.Status) {
if fs.isFiltered(path) {
return nil, fuse.EPERM
}
newFlags := fs.mangleOpenFlags(flags)
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
fd := -1
// Handle long file name
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
// Create ".name"
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, path)
if err != nil {
return nil, fuse.ToStatus(err)
}
// Create content
fd, err = syscallcompat.Openat(dirfd, cName, newFlags|os.O_CREATE|os.O_EXCL, mode)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fuse.ToStatus(err)
}
} else {
// Create content, normal (short) file name
fd, err = syscallcompat.Openat(dirfd, cName, newFlags|syscall.O_CREAT|syscall.O_EXCL, mode)
if err != nil {
return nil, fuse.ToStatus(err)
}
}
// Set owner
if fs.args.PreserveOwner {
err = syscall.Fchown(fd, int(context.Owner.Uid), int(context.Owner.Gid))
if err != nil {
tlog.Warn.Printf("Create: Fchown() failed: %v", err)
}
}
f := os.NewFile(uintptr(fd), cName)
return NewFile(f, fs)
}
// Chmod - FUSE call. Change permissions on "path".
//
// Symlink-safe through use of Fchmodat().
func (fs *FS) Chmod(path string, mode uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// os.Chmod goes through the "syscallMode" translation function that messes
// up the suid and sgid bits. So use a syscall directly.
err = syscallcompat.Fchmodat(dirfd, cName, mode, unix.AT_SYMLINK_NOFOLLOW)
return fuse.ToStatus(err)
}
// Chown - FUSE call. Change the owner of "path".
//
// Symlink-safe through use of Fchownat().
func (fs *FS) Chown(path string, uid uint32, gid uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
code = fuse.ToStatus(syscallcompat.Fchownat(dirfd, cName, int(uid), int(gid), unix.AT_SYMLINK_NOFOLLOW))
if !code.Ok() {
return code
}
if !fs.args.PlaintextNames {
// When filename encryption is active, every directory contains
// a "gocryptfs.diriv" file. This file should also change the owner.
// Instead of checking if "cName" is a directory, we just blindly
// execute the chown on "cName/gocryptfs.diriv" and ignore errors.
dirIVPath := filepath.Join(cName, nametransform.DirIVFilename)
syscallcompat.Fchownat(dirfd, dirIVPath, int(uid), int(gid), unix.AT_SYMLINK_NOFOLLOW)
}
return fuse.OK
}
// Mknod - FUSE call. Create a device file.
//
// Symlink-safe through use of Mknodat().
func (fs *FS) Mknod(path string, mode uint32, dev uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// Create ".name" file to store long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, path)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." device node
err = syscallcompat.Mknodat(dirfd, cName, mode, int(dev))
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
}
} else {
// Create regular device node
err = syscallcompat.Mknodat(dirfd, cName, mode, int(dev))
}
if err != nil {
return fuse.ToStatus(err)
}
// Set owner
if fs.args.PreserveOwner {
err = syscallcompat.Fchownat(dirfd, cName, int(context.Owner.Uid),
int(context.Owner.Gid), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
tlog.Warn.Printf("Mknod: Fchownat failed: %v", err)
}
}
return fuse.OK
}
// Truncate - FUSE call. Truncates a file.
//
// Support truncate(2) by opening the file and calling ftruncate(2)
// While the glibc "truncate" wrapper seems to always use ftruncate, fsstress from
// xfstests uses this a lot by calling "truncate64" directly.
//
// Symlink-safe by letting file.Truncate() do all the work.
func (fs *FS) Truncate(path string, offset uint64, context *fuse.Context) (code fuse.Status) {
file, code := fs.Open(path, uint32(os.O_RDWR), context)
if code != fuse.OK {
return code
}
code = file.Truncate(offset)
file.Release()
return code
}
// Utimens - FUSE call. Set the timestamps on file "path".
//
// Symlink-safe through UtimesNanoAt.
func (fs *FS) Utimens(path string, a *time.Time, m *time.Time, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
ts := make([]unix.Timespec, 2)
ts[0] = unix.Timespec(fuse.UtimeToTimespec(a))
ts[1] = unix.Timespec(fuse.UtimeToTimespec(m))
err = unix.UtimesNanoAt(dirfd, cName, ts, unix.AT_SYMLINK_NOFOLLOW)
return fuse.ToStatus(err)
}
// StatFs - FUSE call. Returns information about the filesystem.
//
// Symlink-safe because the passed path is ignored.
func (fs *FS) StatFs(path string) *fuse.StatfsOut {
return fs.FileSystem.StatFs("")
}
// decryptSymlinkTarget: "cData64" is base64-decoded and decrypted
// like file contents (GCM).
// The empty string decrypts to the empty string.
//
// This function does not do any I/O and is hence symlink-safe.
func (fs *FS) decryptSymlinkTarget(cData64 string) (string, error) {
if cData64 == "" {
return "", nil
}
cData, err := fs.nameTransform.B64.DecodeString(cData64)
if err != nil {
return "", err
}
data, err := fs.contentEnc.DecryptBlock([]byte(cData), 0, nil)
if err != nil {
return "", err
}
return string(data), nil
}
// Readlink - FUSE call.
//
// Symlink-safe through openBackingDir() + Readlinkat().
func (fs *FS) Readlink(relPath string, context *fuse.Context) (out string, status fuse.Status) {
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return "", fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
cTarget, err := syscallcompat.Readlinkat(dirfd, cName)
if err != nil {
return "", fuse.ToStatus(err)
}
if fs.args.PlaintextNames {
return cTarget, fuse.OK
}
// Symlinks are encrypted like file contents (GCM) and base64-encoded
target, err := fs.decryptSymlinkTarget(cTarget)
if err != nil {
tlog.Warn.Printf("Readlink %q: decrypting target failed: %v", cName, err)
return "", fuse.EIO
}
return string(target), fuse.OK
}
// Unlink - FUSE call. Delete a file.
//
// Symlink-safe through use of Unlinkat().
func (fs *FS) Unlink(path string, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// Delete content
err = syscallcompat.Unlinkat(dirfd, cName, 0)
if err != nil {
return fuse.ToStatus(err)
}
// Delete ".name" file
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = nametransform.DeleteLongNameAt(dirfd, cName)
if err != nil {
tlog.Warn.Printf("Unlink: could not delete .name file: %v", err)
}
}
return fuse.ToStatus(err)
}
// encryptSymlinkTarget: "data" is encrypted like file contents (GCM)
// and base64-encoded.
// The empty string encrypts to the empty string.
//
// Symlink-safe because it does not do any I/O.
func (fs *FS) encryptSymlinkTarget(data string) (cData64 string) {
if data == "" {
return ""
}
cData := fs.contentEnc.EncryptBlock([]byte(data), 0, nil)
cData64 = fs.nameTransform.B64.EncodeToString(cData)
return cData64
}
// Symlink - FUSE call. Create a symlink.
//
// Symlink-safe through use of Symlinkat.
func (fs *FS) Symlink(target string, linkName string, context *fuse.Context) (code fuse.Status) {
tlog.Debug.Printf("Symlink(\"%s\", \"%s\")", target, linkName)
if fs.isFiltered(linkName) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(linkName)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
cTarget := target
if !fs.args.PlaintextNames {
// Symlinks are encrypted like file contents (GCM) and base64-encoded
cTarget = fs.encryptSymlinkTarget(target)
}
// Create ".name" file to store long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, linkName)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." symlink
err = syscallcompat.Symlinkat(cTarget, dirfd, cName)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
}
} else {
// Create symlink
err = syscallcompat.Symlinkat(cTarget, dirfd, cName)
}
if err != nil {
return fuse.ToStatus(err)
}
// Set owner
if fs.args.PreserveOwner {
err = syscallcompat.Fchownat(dirfd, cName, int(context.Owner.Uid),
int(context.Owner.Gid), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
tlog.Warn.Printf("Symlink: Fchownat failed: %v", err)
}
}
return fuse.OK
}
// Rename - FUSE call.
//
// Symlink-safe through Renameat().
func (fs *FS) Rename(oldPath string, newPath string, context *fuse.Context) (code fuse.Status) {
defer fs.dirCache.Clear()
if fs.isFiltered(newPath) {
return fuse.EPERM
}
oldDirfd, oldCName, err := fs.openBackingDir(oldPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(oldDirfd)
newDirfd, newCName, err := fs.openBackingDir(newPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(newDirfd)
// Easy case.
if fs.args.PlaintextNames {
return fuse.ToStatus(syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName))
}
// Long destination file name: create .name file
nameFileAlreadyThere := false
if nametransform.IsLongContent(newCName) {
err = fs.nameTransform.WriteLongNameAt(newDirfd, newCName, newPath)
// Failure to write the .name file is expected when the target path already
// exists. Since hashes are pretty unique, there is no need to modify the
// .name file in this case, and we ignore the error.
if err == syscall.EEXIST {
nameFileAlreadyThere = true
} else if err != nil {
return fuse.ToStatus(err)
}
}
// Actual rename
tlog.Debug.Printf("Renameat %d/%s -> %d/%s\n", oldDirfd, oldCName, newDirfd, newCName)
err = syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName)
if err == syscall.ENOTEMPTY || err == syscall.EEXIST {
// If an empty directory is overwritten we will always get an error as
// the "empty" directory will still contain gocryptfs.diriv.
// Interestingly, ext4 returns ENOTEMPTY while xfs returns EEXIST.
// We handle that by trying to fs.Rmdir() the target directory and trying
// again.
tlog.Debug.Printf("Rename: Handling ENOTEMPTY")
if fs.Rmdir(newPath, context) == fuse.OK {
err = syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName)
}
}
if err != nil {
if nametransform.IsLongContent(newCName) && nameFileAlreadyThere == false {
// Roll back .name creation unless the .name file was already there
nametransform.DeleteLongNameAt(newDirfd, newCName)
}
return fuse.ToStatus(err)
}
if nametransform.IsLongContent(oldCName) {
nametransform.DeleteLongNameAt(oldDirfd, oldCName)
}
return fuse.OK
}
// Link - FUSE call. Creates a hard link at "newPath" pointing to file
// "oldPath".
//
// Symlink-safe through use of Linkat().
func (fs *FS) Link(oldPath string, newPath string, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(newPath) {
return fuse.EPERM
}
oldDirFd, cOldName, err := fs.openBackingDir(oldPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(oldDirFd)
newDirFd, cNewName, err := fs.openBackingDir(newPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(newDirFd)
// Handle long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cNewName) {
err = fs.nameTransform.WriteLongNameAt(newDirFd, cNewName, newPath)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." link
err = syscallcompat.Linkat(oldDirFd, cOldName, newDirFd, cNewName, 0)
if err != nil {
nametransform.DeleteLongNameAt(newDirFd, cNewName)
}
} else {
// Create regular link
err = syscallcompat.Linkat(oldDirFd, cOldName, newDirFd, cNewName, 0)
}
return fuse.ToStatus(err)
}
// Access - FUSE call. Check if a file can be accessed in the specified mode(s)
// (read, write, execute).
//
// Symlink-safe through use of faccessat.
func (fs *FS) Access(relPath string, mode uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(relPath) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return fuse.ToStatus(err)
}
err = unix.Faccessat(dirfd, cName, mode, unix.AT_SYMLINK_NOFOLLOW)
syscall.Close(dirfd)
return fuse.ToStatus(err)
}
// reportMitigatedCorruption is used to report a corruption that was transparently
// mitigated and did not return an error to the user. Pass the name of the corrupt
// item (filename for OpenDir(), xattr name for ListXAttr() etc).
// See the MitigatedCorruptions channel for more info.
func (fs *FS) reportMitigatedCorruption(item string) {
if fs.MitigatedCorruptions == nil {
return
}
select {
case fs.MitigatedCorruptions <- item:
case <-time.After(1 * time.Second):
tlog.Warn.Printf("BUG: reportCorruptItem: timeout")
//debug.PrintStack()
return
}
}
// isFiltered - check if plaintext "path" should be forbidden
//
// Prevents name clashes with internal files when file names are not encrypted
func (fs *FS) isFiltered(path string) bool {
if !fs.args.PlaintextNames {
return false
}
// gocryptfs.conf in the root directory is forbidden
if path == configfile.ConfDefaultName {
tlog.Info.Printf("The name /%s is reserved when -plaintextnames is used\n",
configfile.ConfDefaultName)
return true
}
// Note: gocryptfs.diriv is NOT forbidden because diriv and plaintextnames
// are exclusive
return false
}
fusefrontend: print warning when Create() runs out of file descriptors
We alread have this warning in Open(), but xfstests generic/488
causes "too many open files" via Create. Add the same message so
the user sees what is going on.
// Package fusefrontend interfaces directly with the go-fuse library.
package fusefrontend
// FUSE operations on paths
import (
"os"
"path/filepath"
"sync"
"syscall"
"time"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/hanwen/go-fuse/fuse/pathfs"
"github.com/rfjakob/gocryptfs/internal/configfile"
"github.com/rfjakob/gocryptfs/internal/contentenc"
"github.com/rfjakob/gocryptfs/internal/nametransform"
"github.com/rfjakob/gocryptfs/internal/serialize_reads"
"github.com/rfjakob/gocryptfs/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/internal/tlog"
)
// FS implements the go-fuse virtual filesystem interface.
type FS struct {
pathfs.FileSystem // loopbackFileSystem, see go-fuse/fuse/pathfs/loopback.go
args Args // Stores configuration arguments
// dirIVLock: Lock()ed if any "gocryptfs.diriv" file is modified
// Readers must RLock() it to prevent them from seeing intermediate
// states
dirIVLock sync.RWMutex
// Filename encryption helper
nameTransform *nametransform.NameTransform
// Content encryption helper
contentEnc *contentenc.ContentEnc
// This lock is used by openWriteOnlyFile() to block concurrent opens while
// it relaxes the permissions on a file.
openWriteOnlyLock sync.RWMutex
// MitigatedCorruptions is used to report data corruption that is internally
// mitigated by ignoring the corrupt item. For example, when OpenDir() finds
// a corrupt filename, we still return the other valid filenames.
// The corruption is logged to syslog to inform the user, and in addition,
// the corrupt filename is logged to this channel via
// reportMitigatedCorruption().
// "gocryptfs -fsck" reads from the channel to also catch these transparently-
// mitigated corruptions.
MitigatedCorruptions chan string
// Track accesses to the filesystem so that we can know when to autounmount.
// An access is considered to have happened on every call to encryptPath,
// which is called as part of every filesystem operation.
// (This flag uses a uint32 so that it can be reset with CompareAndSwapUint32.)
AccessedSinceLastCheck uint32
dirCache dirCacheStruct
}
var _ pathfs.FileSystem = &FS{} // Verify that interface is implemented.
// NewFS returns a new encrypted FUSE overlay filesystem.
func NewFS(args Args, c *contentenc.ContentEnc, n *nametransform.NameTransform) *FS {
if args.SerializeReads {
serialize_reads.InitSerializer()
}
if len(args.Exclude) > 0 {
tlog.Warn.Printf("Forward mode does not support -exclude")
}
return &FS{
FileSystem: pathfs.NewLoopbackFileSystem(args.Cipherdir),
args: args,
nameTransform: n,
contentEnc: c,
}
}
// GetAttr implements pathfs.Filesystem.
//
// GetAttr is symlink-safe through use of openBackingDir() and Fstatat().
func (fs *FS) GetAttr(relPath string, context *fuse.Context) (*fuse.Attr, fuse.Status) {
tlog.Debug.Printf("FS.GetAttr(%q)", relPath)
if fs.isFiltered(relPath) {
return nil, fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return nil, fuse.ToStatus(err)
}
var st unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
syscall.Close(dirfd)
if err != nil {
return nil, fuse.ToStatus(err)
}
a := &fuse.Attr{}
st2 := syscallcompat.Unix2syscall(st)
a.FromStat(&st2)
if a.IsRegular() {
a.Size = fs.contentEnc.CipherSizeToPlainSize(a.Size)
} else if a.IsSymlink() {
target, _ := fs.Readlink(relPath, context)
a.Size = uint64(len(target))
}
if fs.args.ForceOwner != nil {
a.Owner = *fs.args.ForceOwner
}
return a, fuse.OK
}
// mangleOpenFlags is used by Create() and Open() to convert the open flags the user
// wants to the flags we internally use to open the backing file.
// The returned flags always contain O_NOFOLLOW.
func (fs *FS) mangleOpenFlags(flags uint32) (newFlags int) {
newFlags = int(flags)
// Convert WRONLY to RDWR. We always need read access to do read-modify-write cycles.
if newFlags&os.O_WRONLY > 0 {
newFlags = newFlags ^ os.O_WRONLY | os.O_RDWR
}
// We also cannot open the file in append mode, we need to seek back for RMW
newFlags = newFlags &^ os.O_APPEND
// O_DIRECT accesses must be aligned in both offset and length. Due to our
// crypto header, alignment will be off, even if userspace makes aligned
// accesses. Running xfstests generic/013 on ext4 used to trigger lots of
// EINVAL errors due to missing alignment. Just fall back to buffered IO.
newFlags = newFlags &^ syscallcompat.O_DIRECT
// We always want O_NOFOLLOW to be safe against symlink races
newFlags |= syscall.O_NOFOLLOW
return newFlags
}
// Open - FUSE call. Open already-existing file.
//
// Symlink-safe through Openat().
func (fs *FS) Open(path string, flags uint32, context *fuse.Context) (fuseFile nodefs.File, status fuse.Status) {
if fs.isFiltered(path) {
return nil, fuse.EPERM
}
newFlags := fs.mangleOpenFlags(flags)
// Taking this lock makes sure we don't race openWriteOnlyFile()
fs.openWriteOnlyLock.RLock()
defer fs.openWriteOnlyLock.RUnlock()
// Symlink-safe open
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
fd, err := syscallcompat.Openat(dirfd, cName, newFlags, 0)
// Handle a few specific errors
if err != nil {
if err == syscall.EMFILE {
var lim syscall.Rlimit
syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)
tlog.Warn.Printf("Open %q: too many open files. Current \"ulimit -n\": %d", cName, lim.Cur)
}
if err == syscall.EACCES && (int(flags)&os.O_WRONLY > 0) {
return fs.openWriteOnlyFile(dirfd, cName, newFlags)
}
return nil, fuse.ToStatus(err)
}
f := os.NewFile(uintptr(fd), cName)
return NewFile(f, fs)
}
// openBackingFile opens the ciphertext file that backs relative plaintext
// path "relPath". Always adds O_NOFOLLOW to the flags.
func (fs *FS) openBackingFile(relPath string, flags int) (fd int, err error) {
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return -1, err
}
defer syscall.Close(dirfd)
return syscallcompat.Openat(dirfd, cName, flags|syscall.O_NOFOLLOW, 0)
}
// Due to RMW, we always need read permissions on the backing file. This is a
// problem if the file permissions do not allow reading (i.e. 0200 permissions).
// This function works around that problem by chmod'ing the file, obtaining a fd,
// and chmod'ing it back.
func (fs *FS) openWriteOnlyFile(dirfd int, cName string, newFlags int) (*File, fuse.Status) {
woFd, err := syscallcompat.Openat(dirfd, cName, syscall.O_WRONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(woFd)
var st syscall.Stat_t
err = syscall.Fstat(woFd, &st)
if err != nil {
return nil, fuse.ToStatus(err)
}
// The cast to uint32 fixes a build failure on Darwin, where st.Mode is uint16.
perms := uint32(st.Mode & 0777)
// Verify that we don't have read permissions
if perms&0400 != 0 {
tlog.Warn.Printf("openWriteOnlyFile: unexpected permissions %#o, returning EPERM", perms)
return nil, fuse.ToStatus(syscall.EPERM)
}
// Upgrade the lock to block other Open()s and downgrade again on return
fs.openWriteOnlyLock.RUnlock()
fs.openWriteOnlyLock.Lock()
defer func() {
fs.openWriteOnlyLock.Unlock()
fs.openWriteOnlyLock.RLock()
}()
// Relax permissions and revert on return
syscall.Fchmod(woFd, perms|0400)
if err != nil {
tlog.Warn.Printf("openWriteOnlyFile: changing permissions failed: %v", err)
return nil, fuse.ToStatus(err)
}
defer func() {
err2 := syscall.Fchmod(woFd, perms)
if err2 != nil {
tlog.Warn.Printf("openWriteOnlyFile: reverting permissions failed: %v", err2)
}
}()
rwFd, err := syscallcompat.Openat(dirfd, cName, newFlags, 0)
if err != nil {
return nil, fuse.ToStatus(err)
}
f := os.NewFile(uintptr(rwFd), cName)
return NewFile(f, fs)
}
// Create - FUSE call. Creates a new file.
//
// Symlink-safe through the use of Openat().
func (fs *FS) Create(path string, flags uint32, mode uint32, context *fuse.Context) (nodefs.File, fuse.Status) {
if fs.isFiltered(path) {
return nil, fuse.EPERM
}
newFlags := fs.mangleOpenFlags(flags)
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return nil, fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
fd := -1
// Handle long file name
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
// Create ".name"
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, path)
if err != nil {
return nil, fuse.ToStatus(err)
}
// Create content
fd, err = syscallcompat.Openat(dirfd, cName, newFlags|os.O_CREATE|os.O_EXCL, mode)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fuse.ToStatus(err)
}
} else {
// Create content, normal (short) file name
fd, err = syscallcompat.Openat(dirfd, cName, newFlags|syscall.O_CREAT|syscall.O_EXCL, mode)
if err != nil {
// xfstests generic/488 triggers this
if err == syscall.EMFILE {
var lim syscall.Rlimit
syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)
tlog.Warn.Printf("Create %q: too many open files. Current \"ulimit -n\": %d", cName, lim.Cur)
}
return nil, fuse.ToStatus(err)
}
}
// Set owner
if fs.args.PreserveOwner {
err = syscall.Fchown(fd, int(context.Owner.Uid), int(context.Owner.Gid))
if err != nil {
tlog.Warn.Printf("Create: Fchown() failed: %v", err)
}
}
f := os.NewFile(uintptr(fd), cName)
return NewFile(f, fs)
}
// Chmod - FUSE call. Change permissions on "path".
//
// Symlink-safe through use of Fchmodat().
func (fs *FS) Chmod(path string, mode uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// os.Chmod goes through the "syscallMode" translation function that messes
// up the suid and sgid bits. So use a syscall directly.
err = syscallcompat.Fchmodat(dirfd, cName, mode, unix.AT_SYMLINK_NOFOLLOW)
return fuse.ToStatus(err)
}
// Chown - FUSE call. Change the owner of "path".
//
// Symlink-safe through use of Fchownat().
func (fs *FS) Chown(path string, uid uint32, gid uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
code = fuse.ToStatus(syscallcompat.Fchownat(dirfd, cName, int(uid), int(gid), unix.AT_SYMLINK_NOFOLLOW))
if !code.Ok() {
return code
}
if !fs.args.PlaintextNames {
// When filename encryption is active, every directory contains
// a "gocryptfs.diriv" file. This file should also change the owner.
// Instead of checking if "cName" is a directory, we just blindly
// execute the chown on "cName/gocryptfs.diriv" and ignore errors.
dirIVPath := filepath.Join(cName, nametransform.DirIVFilename)
syscallcompat.Fchownat(dirfd, dirIVPath, int(uid), int(gid), unix.AT_SYMLINK_NOFOLLOW)
}
return fuse.OK
}
// Mknod - FUSE call. Create a device file.
//
// Symlink-safe through use of Mknodat().
func (fs *FS) Mknod(path string, mode uint32, dev uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// Create ".name" file to store long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, path)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." device node
err = syscallcompat.Mknodat(dirfd, cName, mode, int(dev))
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
}
} else {
// Create regular device node
err = syscallcompat.Mknodat(dirfd, cName, mode, int(dev))
}
if err != nil {
return fuse.ToStatus(err)
}
// Set owner
if fs.args.PreserveOwner {
err = syscallcompat.Fchownat(dirfd, cName, int(context.Owner.Uid),
int(context.Owner.Gid), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
tlog.Warn.Printf("Mknod: Fchownat failed: %v", err)
}
}
return fuse.OK
}
// Truncate - FUSE call. Truncates a file.
//
// Support truncate(2) by opening the file and calling ftruncate(2)
// While the glibc "truncate" wrapper seems to always use ftruncate, fsstress from
// xfstests uses this a lot by calling "truncate64" directly.
//
// Symlink-safe by letting file.Truncate() do all the work.
func (fs *FS) Truncate(path string, offset uint64, context *fuse.Context) (code fuse.Status) {
file, code := fs.Open(path, uint32(os.O_RDWR), context)
if code != fuse.OK {
return code
}
code = file.Truncate(offset)
file.Release()
return code
}
// Utimens - FUSE call. Set the timestamps on file "path".
//
// Symlink-safe through UtimesNanoAt.
func (fs *FS) Utimens(path string, a *time.Time, m *time.Time, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
ts := make([]unix.Timespec, 2)
ts[0] = unix.Timespec(fuse.UtimeToTimespec(a))
ts[1] = unix.Timespec(fuse.UtimeToTimespec(m))
err = unix.UtimesNanoAt(dirfd, cName, ts, unix.AT_SYMLINK_NOFOLLOW)
return fuse.ToStatus(err)
}
// StatFs - FUSE call. Returns information about the filesystem.
//
// Symlink-safe because the passed path is ignored.
func (fs *FS) StatFs(path string) *fuse.StatfsOut {
return fs.FileSystem.StatFs("")
}
// decryptSymlinkTarget: "cData64" is base64-decoded and decrypted
// like file contents (GCM).
// The empty string decrypts to the empty string.
//
// This function does not do any I/O and is hence symlink-safe.
func (fs *FS) decryptSymlinkTarget(cData64 string) (string, error) {
if cData64 == "" {
return "", nil
}
cData, err := fs.nameTransform.B64.DecodeString(cData64)
if err != nil {
return "", err
}
data, err := fs.contentEnc.DecryptBlock([]byte(cData), 0, nil)
if err != nil {
return "", err
}
return string(data), nil
}
// Readlink - FUSE call.
//
// Symlink-safe through openBackingDir() + Readlinkat().
func (fs *FS) Readlink(relPath string, context *fuse.Context) (out string, status fuse.Status) {
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return "", fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
cTarget, err := syscallcompat.Readlinkat(dirfd, cName)
if err != nil {
return "", fuse.ToStatus(err)
}
if fs.args.PlaintextNames {
return cTarget, fuse.OK
}
// Symlinks are encrypted like file contents (GCM) and base64-encoded
target, err := fs.decryptSymlinkTarget(cTarget)
if err != nil {
tlog.Warn.Printf("Readlink %q: decrypting target failed: %v", cName, err)
return "", fuse.EIO
}
return string(target), fuse.OK
}
// Unlink - FUSE call. Delete a file.
//
// Symlink-safe through use of Unlinkat().
func (fs *FS) Unlink(path string, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(path) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(path)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
// Delete content
err = syscallcompat.Unlinkat(dirfd, cName, 0)
if err != nil {
return fuse.ToStatus(err)
}
// Delete ".name" file
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = nametransform.DeleteLongNameAt(dirfd, cName)
if err != nil {
tlog.Warn.Printf("Unlink: could not delete .name file: %v", err)
}
}
return fuse.ToStatus(err)
}
// encryptSymlinkTarget: "data" is encrypted like file contents (GCM)
// and base64-encoded.
// The empty string encrypts to the empty string.
//
// Symlink-safe because it does not do any I/O.
func (fs *FS) encryptSymlinkTarget(data string) (cData64 string) {
if data == "" {
return ""
}
cData := fs.contentEnc.EncryptBlock([]byte(data), 0, nil)
cData64 = fs.nameTransform.B64.EncodeToString(cData)
return cData64
}
// Symlink - FUSE call. Create a symlink.
//
// Symlink-safe through use of Symlinkat.
func (fs *FS) Symlink(target string, linkName string, context *fuse.Context) (code fuse.Status) {
tlog.Debug.Printf("Symlink(\"%s\", \"%s\")", target, linkName)
if fs.isFiltered(linkName) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(linkName)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(dirfd)
cTarget := target
if !fs.args.PlaintextNames {
// Symlinks are encrypted like file contents (GCM) and base64-encoded
cTarget = fs.encryptSymlinkTarget(target)
}
// Create ".name" file to store long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cName) {
err = fs.nameTransform.WriteLongNameAt(dirfd, cName, linkName)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." symlink
err = syscallcompat.Symlinkat(cTarget, dirfd, cName)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
}
} else {
// Create symlink
err = syscallcompat.Symlinkat(cTarget, dirfd, cName)
}
if err != nil {
return fuse.ToStatus(err)
}
// Set owner
if fs.args.PreserveOwner {
err = syscallcompat.Fchownat(dirfd, cName, int(context.Owner.Uid),
int(context.Owner.Gid), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
tlog.Warn.Printf("Symlink: Fchownat failed: %v", err)
}
}
return fuse.OK
}
// Rename - FUSE call.
//
// Symlink-safe through Renameat().
func (fs *FS) Rename(oldPath string, newPath string, context *fuse.Context) (code fuse.Status) {
defer fs.dirCache.Clear()
if fs.isFiltered(newPath) {
return fuse.EPERM
}
oldDirfd, oldCName, err := fs.openBackingDir(oldPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(oldDirfd)
newDirfd, newCName, err := fs.openBackingDir(newPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(newDirfd)
// Easy case.
if fs.args.PlaintextNames {
return fuse.ToStatus(syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName))
}
// Long destination file name: create .name file
nameFileAlreadyThere := false
if nametransform.IsLongContent(newCName) {
err = fs.nameTransform.WriteLongNameAt(newDirfd, newCName, newPath)
// Failure to write the .name file is expected when the target path already
// exists. Since hashes are pretty unique, there is no need to modify the
// .name file in this case, and we ignore the error.
if err == syscall.EEXIST {
nameFileAlreadyThere = true
} else if err != nil {
return fuse.ToStatus(err)
}
}
// Actual rename
tlog.Debug.Printf("Renameat %d/%s -> %d/%s\n", oldDirfd, oldCName, newDirfd, newCName)
err = syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName)
if err == syscall.ENOTEMPTY || err == syscall.EEXIST {
// If an empty directory is overwritten we will always get an error as
// the "empty" directory will still contain gocryptfs.diriv.
// Interestingly, ext4 returns ENOTEMPTY while xfs returns EEXIST.
// We handle that by trying to fs.Rmdir() the target directory and trying
// again.
tlog.Debug.Printf("Rename: Handling ENOTEMPTY")
if fs.Rmdir(newPath, context) == fuse.OK {
err = syscallcompat.Renameat(oldDirfd, oldCName, newDirfd, newCName)
}
}
if err != nil {
if nametransform.IsLongContent(newCName) && nameFileAlreadyThere == false {
// Roll back .name creation unless the .name file was already there
nametransform.DeleteLongNameAt(newDirfd, newCName)
}
return fuse.ToStatus(err)
}
if nametransform.IsLongContent(oldCName) {
nametransform.DeleteLongNameAt(oldDirfd, oldCName)
}
return fuse.OK
}
// Link - FUSE call. Creates a hard link at "newPath" pointing to file
// "oldPath".
//
// Symlink-safe through use of Linkat().
func (fs *FS) Link(oldPath string, newPath string, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(newPath) {
return fuse.EPERM
}
oldDirFd, cOldName, err := fs.openBackingDir(oldPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(oldDirFd)
newDirFd, cNewName, err := fs.openBackingDir(newPath)
if err != nil {
return fuse.ToStatus(err)
}
defer syscall.Close(newDirFd)
// Handle long file name (except in PlaintextNames mode)
if !fs.args.PlaintextNames && nametransform.IsLongContent(cNewName) {
err = fs.nameTransform.WriteLongNameAt(newDirFd, cNewName, newPath)
if err != nil {
return fuse.ToStatus(err)
}
// Create "gocryptfs.longfile." link
err = syscallcompat.Linkat(oldDirFd, cOldName, newDirFd, cNewName, 0)
if err != nil {
nametransform.DeleteLongNameAt(newDirFd, cNewName)
}
} else {
// Create regular link
err = syscallcompat.Linkat(oldDirFd, cOldName, newDirFd, cNewName, 0)
}
return fuse.ToStatus(err)
}
// Access - FUSE call. Check if a file can be accessed in the specified mode(s)
// (read, write, execute).
//
// Symlink-safe through use of faccessat.
func (fs *FS) Access(relPath string, mode uint32, context *fuse.Context) (code fuse.Status) {
if fs.isFiltered(relPath) {
return fuse.EPERM
}
dirfd, cName, err := fs.openBackingDir(relPath)
if err != nil {
return fuse.ToStatus(err)
}
err = unix.Faccessat(dirfd, cName, mode, unix.AT_SYMLINK_NOFOLLOW)
syscall.Close(dirfd)
return fuse.ToStatus(err)
}
// reportMitigatedCorruption is used to report a corruption that was transparently
// mitigated and did not return an error to the user. Pass the name of the corrupt
// item (filename for OpenDir(), xattr name for ListXAttr() etc).
// See the MitigatedCorruptions channel for more info.
func (fs *FS) reportMitigatedCorruption(item string) {
if fs.MitigatedCorruptions == nil {
return
}
select {
case fs.MitigatedCorruptions <- item:
case <-time.After(1 * time.Second):
tlog.Warn.Printf("BUG: reportCorruptItem: timeout")
//debug.PrintStack()
return
}
}
// isFiltered - check if plaintext "path" should be forbidden
//
// Prevents name clashes with internal files when file names are not encrypted
func (fs *FS) isFiltered(path string) bool {
if !fs.args.PlaintextNames {
return false
}
// gocryptfs.conf in the root directory is forbidden
if path == configfile.ConfDefaultName {
tlog.Info.Printf("The name /%s is reserved when -plaintextnames is used\n",
configfile.ConfDefaultName)
return true
}
// Note: gocryptfs.diriv is NOT forbidden because diriv and plaintextnames
// are exclusive
return false
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gophers is a list of names, emails, and Github usernames of people
// from the Go git repos and issue trackers.
package gophers
import (
"strings"
"golang.org/x/build/gerrit"
)
// Person represents a person.
type Person struct {
Name string // "Foo Bar"
Github string // "FooBar" (orig case, no '@')
Gerrit string // "foo@bar.com" (lowercase)
Emails []string // all lower
Googler bool // whether person is (or was) a Googler; determined via heuristics
Bot bool // whether it's a known bot (GopherBot, Gerrit Bot)
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
func (p *Person) mergeIDs(ids ...string) {
for _, id := range ids {
switch {
case strings.HasPrefix(id, "@"):
p.Github = id[1:]
idToPerson[strings.ToLower(id)] = p
case strings.Contains(id, "@"):
email := strings.ToLower(id)
if !strSliceContains(p.Emails, email) {
p.Emails = append(p.Emails, email)
}
idToPerson[email] = p
if strings.HasSuffix(email, "@golang.org") || strings.HasSuffix(email, "@google.com") {
p.Googler = true
}
// The first email seen is considered the person's Gerrit email.
if len(p.Emails) == 1 {
p.Gerrit = email
}
case id == "*goog":
p.Googler = true
case id == "*bot":
p.Bot = true
default:
p.Name = id
idToPerson[strings.ToLower(id)] = p
}
}
}
// keys are "@lowercasegithub", "lowercase name", "lowercase@email.com".
var idToPerson = map[string]*Person{}
// GetPerson looks up a person by id and returns one if found,
// or nil otherwise.
//
// The id is case insensitive, and may be one of:
//
// • full name (for example, "Dmitri Shuralyov")
//
// • GitHub username (for example, "@dmitshur"), leading '@' is mandatory
//
// • Gerrit <account ID>@<instance ID> (for example, "6005@62eb7196-b449-3ce5-99f1-c037f21e1705")
//
// • email (for example, "dmitshur@golang.org")
//
// Only exact matches are supported.
//
func GetPerson(id string) *Person {
return idToPerson[strings.ToLower(id)]
}
// GetGerritPerson looks up a person from the Gerrit account ai.
// It uses the name and email in the Gerrit account for the lookup.
func GetGerritPerson(ai gerrit.AccountInfo) *Person {
if p := GetPerson(ai.Name); p != nil {
return p
}
if p := GetPerson(ai.Email); p != nil {
return p
}
return nil
}
func addPerson(ids ...string) *Person {
var p *Person
for _, id := range ids {
p = GetPerson(id)
if p != nil {
break
}
}
if p == nil {
p = &Person{}
}
p.mergeIDs(ids...)
return p
}
func init() {
// Not people, but hereby granted personhood:
addPerson("Gopherbot", "gobot@golang.org", "@gopherbot", "5976@62eb7196-b449-3ce5-99f1-c037f21e1705", "*bot")
addPerson("Gerrit Bot", "letsusegerrit@gmail.com", "12446@62eb7196-b449-3ce5-99f1-c037f21e1705", "*bot")
addPerson("212472270", "ggp493@gmail.com", "@ggriffiths")
addPerson("9.nashi", "9.nashi@gmail.com", "@80nashi")
addPerson("AJ Yoo", "ajarusan@arista.com")
addPerson("Aamir Khan", "syst3m.w0rm@gmail.com", "7715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aamir Khan", "syst3m.w0rm@gmail.com", "@syst3mw0rm")
addPerson("Aaron Cannon", "cannona@fireantproductions.com", "@cannona")
addPerson("Aaron Clawson", "Aaron.Clawson@gmail.com")
addPerson("Aaron France", "aaron.l.france@gmail.com", "@AeroNotix")
addPerson("Aaron Jacobs", "jacobsa@google.com", "6475@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aaron Jacobs", "jacobsa@google.com", "@jacobsa")
addPerson("Aaron Kemp", "kemp.aaron@gmail.com", "@writeonlymemory")
addPerson("Aaron Kemp", "kemp@google.com")
addPerson("Aaron Torres", "tcboox@gmail.com", "6165@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aaron Torres", "tcboox@gmail.com", "@agtorre")
addPerson("Aaron Zinman", "aaron@azinman.com", "@azinman")
addPerson("Aarti Parikh", "aarti.parikh@gmail.com", "@aarti")
addPerson("Aashish Karki", "0133asis@gmail.com")
addPerson("Abe Haskins", "abeisgreat@abeisgreat.com")
addPerson("Abhijit Pai", "abhijitpai05@gmail.com")
addPerson("Abhinav Gupta", "abhinav.g90@gmail.com", "@abhinav")
addPerson("Adam Azarchs", "adam.azarchs@10xgenomics.com", "@adam-azarchs")
addPerson("Adam Bender", "abender@google.com", "@bitlux")
addPerson("Adam Eijdenberg", "adam@continusec.com")
addPerson("Adam Harvey", "aharvey@php.net")
addPerson("Adam Jones", "adam@modsrus.com")
addPerson("Adam Kisala", "adam.kisala@gmail.com", "@adamkisala")
addPerson("Adam Langley", "agl@golang.org", "5425@62eb7196-b449-3ce5-99f1-c037f21e1705", "@agl", "agl@google.com", "7285@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Medzinski", "adam.medzinski@gmail.com", "@medzin")
addPerson("Adam Ostor", "adam.ostor@gmail.com")
addPerson("Adam Ryman", "adamryman@gmail.com")
addPerson("Adam Shannon", "adamkshannon@gmail.com", "26193@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Shannon", "adamkshannon@gmail.com", "@adamdecaf")
addPerson("Adam Sindelar", "adamsh@google.com", "27224@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Thomason", "athomason@gmail.com")
addPerson("Adam Wolfe Gordon", "awg@xvx.ca")
addPerson("Adam Woodbeck", "adam@woodbeck.net", "@awoodbeck")
addPerson("Adam Yi", "i@adamyi.com")
addPerson("Adin Scannell", "ascannell@google.com")
addPerson("Aditya Mukerjee", "dev@chimeracoder.net", "@ChimeraCoder")
addPerson("Aditya Rastogi", "adirastogi@google.com")
addPerson("Adrian Hesketh", "adrianhesketh@hushmail.com", "24533@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adrian Hesketh", "adrianhesketh@hushmail.com", "@a-h")
addPerson("Adrian O'Grady", "elpollouk@gmail.com", "@elpollouk")
addPerson("Aeneas Rekkas (arekkas)", "aeneas@ory.am")
addPerson("Afanasev Stanislav", "phpprogger@gmail.com")
addPerson("Agis Anastasopoulos", "agis.anast@gmail.com", "@agis")
addPerson("Agniva De Sarker", "agniva.quicksilver@gmail.com", "24096@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Agniva De Sarker", "agniva.quicksilver@gmail.com", "@agnivade")
addPerson("Agniva De Sarker", "agnivade@yahoo.co.in", "@agnivade")
addPerson("Ahmed W.", "oneofone@gmail.com", "5255@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ahmet Alp Balkan", "ahmetb@google.com", "@ahmetb")
addPerson("Ahmet Soormally", "ahmet@mangomm.co.uk", "@asoorm")
addPerson("Ahmy Yulrizka", "yulrizka@gmail.com", "@yulrizka")
addPerson("Aiden Scandella", "ai@uber.com", "@sectioneight")
addPerson("Aiden Scandella", "sc@ndella.com")
addPerson("Ainar Garipov", "gugl.zadolbal@gmail.com", "@ainar-g")
addPerson("Aishraj", "aishraj@users.noreply.github.com", "@aishraj")
addPerson("Akhil Indurti", "aindurti@gmail.com", "contact@akhilindurti.com", "@smasher164", "17921@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Akihiko Odaki", "akihiko.odaki.4i@stu.hosei.ac.jp")
addPerson("Akihiro Suda", "suda.kyoto@gmail.com", "13030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Akihiro Suda", "suda.kyoto@gmail.com", "@AkihiroSuda")
addPerson("Alan Bradley", "alan@gangleri.net")
addPerson("Alan Braithwaite", "alan@ipaddr.org", "@abraithwaite")
addPerson("Alan Donovan", "adonovan@google.com", "5195@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alan Donovan", "adonovan@google.com", "@alandonovan") // work profile
addPerson("Alan Donovan", "alan@alandonovan.net", "@adonovan") // personal profile
addPerson("Alan Gardner", "alanctgardner@gmail.com")
addPerson("Alan Shreve", "alan@inconshreveable.com", "@inconshreveable")
addPerson("Albert Nigmatzianov", "albertnigma@gmail.com", "15270@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Albert Smith", "albert@horde.today")
addPerson("Albert Strasheim", "fullung@gmail.com", "@alberts")
addPerson("Albert Yu", "yukinying@gmail.com", "@yukinying")
addPerson("Alberto Bertogli", "albertito@blitiri.com.ar", "10985@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alberto Bertogli", "albertito@blitiri.com.ar", "@albertito")
addPerson("Alberto Donizetti", "alb.donizetti@gmail.com", "5385@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alberto Donizetti", "alb.donizetti@gmail.com", "@ALTree")
addPerson("Alberto García Hierro", "alberto@garciahierro.com", "@fiam")
addPerson("Aleksandar Dezelin", "dezelin@gmail.com", "@dezelin")
addPerson("Aleksandr Demakin", "alexander.demakin@gmail.com", "8245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aleksandr Demakin", "alexander.demakin@gmail.com", "@avdva")
addPerson("Aleksandr Razumov", "ar@cydev.ru")
addPerson("Aleksandr Razumov", "ar@cydev.ru", "@ernado")
addPerson("Alekseev Artem", "a.artem060@gmail.com", "@fexolm")
addPerson("Alessandro Arzilli", "alessandro.arzilli@gmail.com", "5821@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alessandro Arzilli", "alessandro.arzilli@gmail.com", "@aarzilli")
addPerson("Alessandro Baffa", "alessandro.baffa@gmail.com", "@alebaffa")
addPerson("Alex A Skinner", "alex@lx.lc")
addPerson("Alex Brainman", "alex.brainman@gmail.com", "5070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Brainman", "alex.brainman@gmail.com", "@alexbrainman")
addPerson("Alex Bramley", "a.bramley@gmail.com", "@fluffle")
addPerson("Alex Browne", "stephenalexbrowne@gmail.com", "@albrow")
addPerson("Alex Carol", "alex.carol.c@gmail.com", "@alexcarol")
addPerson("Alex Crawford", "alex@acrawford.com")
addPerson("Alex Flint", "alex.flint@gmail.com")
addPerson("Alex Jin", "toalexjin@gmail.com", "@toalexjin")
addPerson("Alex Kohler", "alexjohnkohler@gmail.com", "@alexkohler")
addPerson("Alex Myasoedov", "msoedov@gmail.com", "@msoedov")
addPerson("Alex Plugaru", "alex@plugaru.org", "@xarg")
addPerson("Alex Schroeder", "alex@gnu.org", "@kensanata")
addPerson("Alex Sergeyev", "abc@alexsergeyev.com", "@asergeyev")
addPerson("Alex Seubert", "alexseubert@gmail.com")
addPerson("Alex Skinner", "alex@lx.lc", "6090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Stoddard", "alex.stoddard@comcast.net")
addPerson("Alex Tokarev", "aleksator@gmail.com", "@aleksator")
addPerson("Alex Vaghin", "ddos@google.com", "alex@cloudware.io", "@x1ddos")
addPerson("Alex Vaghin", "crhyme@google.com", "6347@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Vaghin", "alex@cloudware.io", "8870@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Yu", "yu.alex96@gmail.com")
addPerson("AlexRudd", "rudd.alex1@gmail.com")
addPerson("Alexander A. Klimov", "alexander.klimov@netways.de")
addPerson("Alexander Ben Nasrallah", "me@abn.sh")
addPerson("Alexander Döring", "email@alexd.ch", "15115@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Döring", "email@alexd.ch", "@alexd765")
addPerson("Alexander F Rødseth", "alexander.rodseth@appeartv.com", "@xyproto")
addPerson("Alexander F Rødseth", "rodseth@gmail.com")
addPerson("Alexander Guz", "kalimatas@gmail.com", "@kalimatas")
addPerson("Alexander Kauer", "alexander@affine.space", "@kaueraal")
addPerson("Alexander Kucherenko", "alxkchr@gmail.com")
addPerson("Alexander Kuleshov", "kuleshovmail@gmail.com")
addPerson("Alexander Larsson", "alexander.larsson@gmail.com", "@alexlarsson")
addPerson("Alexander Menzhinsky", "amenzhinsky@gmail.com", "16045@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Menzhinsky", "amenzhinsky@gmail.com", "@amenzhinsky")
addPerson("Alexander Milyutin", "alexander.milyutin@lazada.com")
addPerson("Alexander Morozov", "lk4d4math@gmail.com", "8340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Morozov", "lk4d4math@gmail.com", "@LK4D4")
addPerson("Alexander Neumann", "alexander@bumpern.de", "@fd0")
addPerson("Alexander Orlov", "alexander.orlov@loxal.net", "@loxal")
addPerson("Alexander Polcyn", "apolcyn@google.com", "16623@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Polcyn", "apolcyn@google.com", "@apolcyn")
addPerson("Alexander Reece", "awreece@gmail.com", "@awreece")
addPerson("Alexander Shopov", "ash@kambanaria.org", "@alshopov")
addPerson("Alexander Zhavnerchik", "alex.vizor@gmail.com", "@alxzh")
addPerson("Alexander Zolotov", "goldifit@gmail.com", "@zolotov")
addPerson("Alexandre Cesaro", "alexandre.cesaro@gmail.com", "5647@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandre Cesaro", "alexandre.cesaro@gmail.com", "@alexcesaro")
addPerson("Alexandre Fiori", "fiorix@gmail.com", "@fiorix")
addPerson("Alexandre Maari", "draeron@gmail.com", "@draeron")
addPerson("Alexandre Normand", "alexandre.normand@gmail.com", "@alexandre-normand")
addPerson("Alexandre Parenteau", "aubonbeurre@gmail.com")
addPerson("Alexandre Viau", "viau.alexandre@gmail.com", "27580@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "alexandru@mosoi.ro")
addPerson("Alexandru Moșoi", "alexandru@mosoi.ro", "6173@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "brtzsnr@gmail.com", "5930@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "brtzsnr@gmail.com", "@brtzsnr")
addPerson("Alexandru Moșoi", "mosoi@google.com")
addPerson("Alexei Sholik", "alcosholik@gmail.com", "@alco")
addPerson("Alexey Alexandrov", "aalexand@google.com", "@aalexand")
addPerson("Alexey Borzenkov", "snaury@gmail.com", "@snaury")
addPerson("Alexey Naidonov", "alexey.naidyonov@gmail.com")
addPerson("Alexey Naidonov", "alexey.naidyonov@gmail.com", "@growler")
addPerson("Alexey Neganov", "neganovalexey@gmail.com", "@neganovalexey")
addPerson("Alexey Nezhdanov", "snakeru@gmail.com")
addPerson("Alexey Nezhdanov", "snakeru@gmail.com", "9000@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexey Palazhchenko", "alexey.palazhchenko@gmail.com", "13090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexey Palazhchenko", "alexey.palazhchenko@gmail.com", "@AlekSi")
addPerson("Alexey Vilenskiy", "vilenskialeksei@gmail.com")
addPerson("Alexis Hildebrandt", "surryhill@gmail.com")
addPerson("Alexis Horgix Chotard", "alexis.horgix.chotard@gmail.com")
addPerson("Alexis Hunt", "lexer@google.com")
addPerson("Alexis Imperial-Legrand", "ail@google.com", "@ailg")
addPerson("Ali Rizvi-Santiago", "arizvisa@gmail.com", "@arizvisa")
addPerson("Aliaksandr Valialkin", "valyala@gmail.com", "9525@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aliaksandr Valialkin", "valyala@gmail.com", "@valyala")
addPerson("Alif Rachmawadi", "subosito@gmail.com", "@subosito")
addPerson("Alistair Barrell", "alistair.t.barrell@gmail.com")
addPerson("Allan Simon", "allan.simon@supinfo.com", "@allan-simon")
addPerson("Alok Menghrajani", "alok.menghrajani@gmail.com", "@alokmenghrajani")
addPerson("Aman Gupta", "aman@tmm1.net", "20002@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aman Gupta", "aman@tmm1.net", "@tmm1")
addPerson("Amanuel Bogale", "abogale2@gmail.com")
addPerson("Amir Mohammad Saied", "amir@gluegadget.com", "@amir")
addPerson("Amit Ghadge", "amitg.b14@gmail.com")
addPerson("Ammar Bandukwala", "ammar@ammar.io")
addPerson("Amr A.Mohammed", "merodiro@gmail.com")
addPerson("Amrut Joshi", "amrut.joshi@gmail.com", "@rumple")
addPerson("Amy Schlesener", "amyschlesener@gmail.com")
addPerson("Anand K. Mistry", "anand@mistry.ninja")
addPerson("Anders Pearson", "anders@columbia.edu", "@thraxil")
addPerson("Andrea Nodari", "andrea.nodari91@gmail.com")
addPerson("Andrea Nodari", "andrea.nodari91@gmail.com", "@nodo")
addPerson("Andrea Spadaccini", "spadaccio@google.com", "@lupino3")
addPerson("Andreas Auernhamer", "andreas_golang@mail.de")
addPerson("Andreas Auernhammer", "aead@mail.de", "14805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andreas Auernhammer", "aead@mail.de", "@aead")
addPerson("Andreas Auernhammer", "enceve@mail.de")
addPerson("Andreas Jellinghaus", "andreas@ionisiert.de", "@tolonuga")
addPerson("Andreas Litt", "andreas.litt@gmail.com")
addPerson("Andrei Gherzan", "andrei@resin.io")
addPerson("Andrei Korzhevskii", "a.korzhevskiy@gmail.com", "@nordligulv")
addPerson("Andrei Tudor Călin", "mail@acln.ro", "27279@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrei Tudor Călin", "mail@acln.ro", "@acln0")
addPerson("Andres Erbsen", "andres.erbsen@gmail.com")
addPerson("Andrew Austin", "andrewaclt@gmail.com", "@andrewaustin")
addPerson("Andrew Benton", "andrewmbenton@gmail.com", "@andrewmbenton")
addPerson("Andrew Bonventre", "andybons@golang.org", "andybons@gmail.com", "@andybons", "365204+andybons@users.noreply.github.com", "22285@62eb7196-b449-3ce5-99f1-c037f21e1705", "andybons@google.com", "10660@62eb7196-b449-3ce5-99f1-c037f21e1705", "hello@obvy.co")
addPerson("Andrew Brampton", "bramp@google.com")
addPerson("Andrew Braunstein", "awbraunstein@gmail.com", "@awbraunstein")
addPerson("Andrew Ekstedt", "andrew.ekstedt@gmail.com", "6255@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Ekstedt", "andrew.ekstedt@gmail.com", "@magical")
addPerson("Andrew Etter", "andrew.etter@gmail.com", "@andrewetter")
addPerson("Andrew Gerrand", "adg@golang.org", "5010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Gerrand", "adg@golang.org", "@adg")
addPerson("Andrew Gerrand", "nf@wh3rd.net", "@nf")
addPerson("Andrew Harding", "andrew@spacemonkey.com", "@azdagron")
addPerson("Andrew M Bursavich", "abursavich@gmail.com", "@abursavich")
addPerson("Andrew Patzer", "andrew.patzer@gmail.com")
addPerson("Andrew Pilloud", "andrewpilloud@igneoussystems.com", "@apilloud")
addPerson("Andrew Pogrebnoy", "absourd.noise@gmail.com", "@dAdAbird")
addPerson("Andrew Poydence", "apoydence@pivotal.io", "@poy")
addPerson("Andrew Pritchard", "awpritchard@gmail.com", "@awpr")
addPerson("Andrew Radev", "andrey.radev@gmail.com", "@AndrewRadev")
addPerson("Andrew Skiba", "skibaa@gmail.com", "@skibaa")
addPerson("Andrew Szeto", "andrew@jabagawee.com", "@jabagawee")
addPerson("Andrew Wilkins", "axwalk@gmail.com", "8640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Wilkins", "axwalk@gmail.com", "@axw")
addPerson("Andrew Williams", "williams.andrew@gmail.com", "@williamsandrew")
addPerson("Andrey Petrov", "andrey.petrov@shazow.net", "@shazow")
addPerson("Andrii Soldatenko", "andrii.soldatenko@gmail.com", "@andriisoldatenko")
addPerson("Andrii Soluk", "isoluchok@gmail.com", "24501@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrii Zakharov", "andrii@messagebird.com")
addPerson("Andris Valums", "avalums.spam@linelane.com")
addPerson("Andriy Lytvynov", "lytvynov.a.v@gmail.com", "@awly")
addPerson("Andrzej Żeżel", "andrii.zhezhel@gmail.com", "@zhezhel")
addPerson("André Carvalho", "asantostc@gmail.com", "@andrestc")
addPerson("Andy Balholm", "andy@balholm.com", "6535@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andy Balholm", "andy@balholm.com", "@andybalholm")
addPerson("Andy Balholm", "andybalholm@gmail.com")
addPerson("Andy Bursavich", "bursavich@google.com")
addPerson("Andy Davis", "andy@bigandian.com", "@bigandian")
addPerson("Andy Finkenstadt", "afinkenstadt@zynga.com", "@afinkenstadt")
addPerson("Andy Lindeman", "andy@lindeman.io")
addPerson("Andy Maloney", "asmaloney@gmail.com", "@asmaloney")
addPerson("Andy Walker", "walkeraj@gmail.com")
addPerson("Anfernee Yongkun Gui", "anfernee.gui@gmail.com", "@anfernee")
addPerson("Angelo Bulfone", "mbulfone@gmail.com", "@boomshroom")
addPerson("Angelo Compagnucci", "angelo.compagnucci@gmail.com")
addPerson("Anh Hai Trinh", "anh.hai.trinh@gmail.com", "@aht")
addPerson("Anit Gandhi", "anitgandhi@gmail.com", "@anitgandhi")
addPerson("Ankit Goyal", "ankit3goyal@gmail.com", "@goyalankit")
addPerson("Anmol Sethi", "anmol@aubble.com", "@nhooyr")
addPerson("Anmol Sethi", "hi@nhooyr.io")
addPerson("Anmol Sethi", "me+git@anmol.io", "@nhooyr")
addPerson("Anmol Sethi", "me@anmol.io", "9620@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Anschel Schaffer-Cohen", "anschelsc@gmail.com", "@anschelsc")
addPerson("Anthony Alves", "cvballa3g0@gmail.com")
addPerson("Anthony Canino", "anthony.canino1@gmail.com", "@anthonycanino1")
addPerson("Anthony Eufemio", "anthony.eufemio@gmail.com", "@tymat")
addPerson("Anthony Martin", "ality@pbrane.org", "5635@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Anthony Martin", "ality@pbrane.org", "@ality")
addPerson("Anthony Pesch", "inolen@gmail.com")
addPerson("Anthony Romano", "anthony.romano@coreos.com")
addPerson("Anthony Sottile", "asottile@umich.edu", "@asottile")
addPerson("Anthony Starks", "ajstarks@gmail.com", "@ajstarks")
addPerson("Antoine Martin", "antoine97.martin@gmail.com", "@alarsyo")
addPerson("Anton Gyllenberg", "anton@iki.fi", "@antong")
addPerson("Antonin Amand", "antonin.amand@gmail.com", "@gwik")
addPerson("Antonio Antelo", "aantelov87@gmail.com")
addPerson("Antonio Bibiano", "antbbn@gmail.com", "@antbbn")
addPerson("Antonio Murdaca", "runcom@redhat.com", "@runcom")
addPerson("Aram Hăvărneanu", "aram@mgk.ro", "5036@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aram Hăvărneanu", "aram@mgk.ro", "@4ad")
addPerson("Arash Bina", "arash@arash.io")
addPerson("Arash Bina", "arash@arash.io", "@arashbina")
addPerson("Areski Belaid", "areski@gmail.com", "5825@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ariel Mashraki", "ariel@mashraki.co.il", "@a8m")
addPerson("Arlo Breault", "arlolra@gmail.com", "@arlolra")
addPerson("Arnaud Ysmal", "stacktic@netbsd.org", "@stacktic")
addPerson("Arne Hormann", "arnehormann@gmail.com", "@arnehormann")
addPerson("Arnout Engelen", "arnout@bzzt.net")
addPerson("Aron Nopanen", "aron.nopanen@gmail.com", "@aroneous")
addPerson("Artem V. Navrotskiy", "bozaro@gmail.com")
addPerson("Artemiy Ryabinkov", "getmylag@gmail.com")
addPerson("Arthur Khashaev", "arthur@khashaev.ru", "@Invizory")
addPerson("Arthur Mello", "arthur.mello85@gmail.com")
addPerson("Artyom Pervukhin", "artyom.pervukhin@gmail.com", "9870@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Artyom Pervukhin", "artyom.pervukhin@gmail.com", "@artyom")
addPerson("Arvindh Rajesh Tamilmani", "art@a-30.net", "@arvindht")
addPerson("Asad Mehmood", "asad78611@googlemail.com")
addPerson("Ashish Gandhi", "ag@ashishgandhi.org", "@ashishgandhi")
addPerson("Asim Shankar", "asimshankar@gmail.com", "@asimshankar")
addPerson("Atin M", "amalaviy@akamai.com", "@amalaviy")
addPerson("Ato Araki", "ato.araki@gmail.com", "@atotto")
addPerson("Attila Tajti", "attila.tajti@gmail.com")
addPerson("Audrey Lim", "audreylh@gmail.com", "13190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Audrey Lim", "audreylh@gmail.com", "@audreylim")
addPerson("Audrius Butkevicius", "audrius.butkevicius@gmail.com", "25277@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Audrius Butkevicius", "audrius.butkevicius@gmail.com", "@AudriusButkevicius")
addPerson("Augusto Roman", "aroman@gmail.com", "@augustoroman")
addPerson("Aulus Egnatius Varialus", "varialus@gmail.com", "@varialus")
addPerson("Aurélien Rainone", "aurelien.rainone@gmail.com")
addPerson("Aurélien Rainone", "aurelien.rainone@gmail.com", "@arl")
addPerson("Austin Clements", "austin@google.com", "5167@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Austin Clements", "austin@google.com", "@aclements")
addPerson("Austin J. Alexander", "austinjalexander@gmail.com")
addPerson("Author Name", "aaronstein12@gmail.com", "@aastein")
addPerson("Author Name", "brett.j.merrill94@gmail.com", "@bmerrill42")
addPerson("Author Name", "mikemitchellwebdev@gmail.com")
addPerson("Author: grantseltzer", "grantseltzer@gmail.com")
addPerson("Avelino", "t@avelino.xxx", "8805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Avelino", "t@avelino.xxx", "@avelino")
addPerson("Awn Umar", "awn@cryptolosophy.org", "21940@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Awn", "awn@cryptolosophy.io")
addPerson("Axel Wagner", "axel.wagner.hh@googlemail.com", "@Merovius")
addPerson("Ayan George", "ayan@ayan.net")
addPerson("Ayke van Laethem", "aykevanlaethem@gmail.com")
addPerson("Aymerick", "aymerick@jehanne.org", "@aymerick")
addPerson("B.G.Adrian", "aditza8@gmail.com")
addPerson("Baiju Muthukadan", "baiju.m.mail@gmail.com", "@baijum")
addPerson("Bakin Aleksey", "kultihell@gmail.com")
addPerson("Balaram Makam", "bmakam.qdt@qualcommdatacenter.com", "25702@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Balaram Makam", "bmakam.qdt@qualcommdatacenter.com", "@bmakam-qdt")
addPerson("Balazs Lecz", "leczb@google.com", "@leczb")
addPerson("Baokun Lee", "nototon@gmail.com", "9646@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Baokun Lee", "nototon@gmail.com", "@oiooj")
addPerson("Bartek Plotka", "bwplotka@gmail.com")
addPerson("Bartosz Modelski", "modelski.bartosz@gmail.com")
addPerson("Bastian Ike", "bastian.ike@gmail.com")
addPerson("Baylee Feore", "baylee.feore@gmail.com")
addPerson("Ben Burkert", "ben@benburkert.com", "5673@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Burkert", "ben@benburkert.com", "@benburkert")
addPerson("Ben Fried", "ben.fried@gmail.com", "@benfried")
addPerson("Ben Haines", "bhainesva@gmail.com")
addPerson("Ben Hoyt", "benhoyt@gmail.com", "@benhoyt")
addPerson("Ben Laurie", "ben@links.org", "21925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Lubar", "ben.lubar@gmail.com", "@BenLubar")
addPerson("Ben Lynn", "benlynn@gmail.com", "@blynn")
addPerson("Ben Olive", "sionide21@gmail.com", "@sionide21")
addPerson("Ben Schwartz", "bemasc@google.com", "20251@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Schwartz", "bemasc@google.com", "@bemasc")
addPerson("Ben Shi", "powerman1st@163.com", "16935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Shi", "powerman1st@163.com", "@benshi001")
addPerson("Ben Toews", "mastahyeti@gmail.com")
addPerson("Benjamin Black", "b@b3k.us", "@b")
addPerson("Benjamin Cable", "cable.benjamin@gmail.com", "@ladydascalie")
addPerson("Benjamin Hsieh", "tanookiben@users.noreply.github.com")
addPerson("Benjamin Prosnitz", "bprosnitz@google.com", "6965@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benjamin Prosnitz", "bprosnitz@google.com", "@bprosnitz")
addPerson("Benjamin Wester", "bwester@squareup.com", "@bwester")
addPerson("Benny Siegert", "bsiegert@gmail.com", "5184@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benny Siegert", "bsiegert@gmail.com", "@bsiegert")
addPerson("Benny Siegert", "bsiegert@google.com", "@bsiegert")
addPerson("Benoit Sigoure", "tsunanet@gmail.com", "9643@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benoit Sigoure", "tsunanet@gmail.com", "@tsuna")
addPerson("Bernat Moix", "bmoix@bmoix.io")
addPerson("Bernd Fix", "brf@hoi-polloi.org")
addPerson("BigMikes", "giulio.micheloni@gmail.com", "@BigMikes")
addPerson("Bill Neubauer", "wcn@google.com")
addPerson("Bill O'Farrell", "billo@ca.ibm.com", "@wgo")
addPerson("Bill O'Farrell", "billotosyr@gmail.com", "11191@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bill Prin", "waprin@google.com")
addPerson("Bill Thiede", "couchmoney@gmail.com", "6175@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bill Thiede", "couchmoney@gmail.com", "@wathiede", "*goog")
addPerson("Bill Zissimopoulos", "billziss@navimatics.com", "@billziss-gh")
addPerson("Billie H. Cleek", "bhcleek@gmail.com", "@bhcleek")
addPerson("Billy Lynch", "wlynch@google.com", "@wlynch")
addPerson("Blain Smith", "blain.smith@gmail.com", "22696@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blain Smith", "rebelgeek@blainsmith.com", "@blainsmith")
addPerson("Blake Gentry", "blakesgentry@gmail.com", "5683@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blake Gentry", "blakesgentry@gmail.com", "@bgentry")
addPerson("Blake Mesdag", "blakemesdag@gmail.com")
addPerson("Blake Mizerany", "blake.mizerany@gmail.com", "10551@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blake Mizerany", "blake.mizerany@gmail.com", "@bmizerany")
addPerson("Blixt", "me@blixt.nyc", "@blixt")
addPerson("Bob B.", "rbriski@gmail.com", "26997@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bob Potter", "bobby.potter@gmail.com")
addPerson("Bobby DeSimone", "bobbydesimone@gmail.com", "@desimone")
addPerson("Bobby Powers", "bobbypowers@gmail.com", "@bpowers")
addPerson("Bodo Junglas", "bodo.junglas@leanovate.de")
addPerson("Boris Nagaev", "nagaev@google.com")
addPerson("Boris Schrijver", "bschrijver@schubergphilis.com")
addPerson("Borja Clemente", "borja.clemente@gmail.com", "@clebs")
addPerson("Brad Burch", "brad.burch@gmail.com", "@brad-burch")
addPerson("Brad Fitzpatrick", "bradfitz@golang.org", "brad@danga.com", "@bradfitz", "5065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brad Jones", "rbjones@google.com")
addPerson("Brad Morgan", "brad@morgabra.com")
addPerson("Brad Whitaker", "brad.whitaker@gmail.com")
addPerson("Braden Bassingthwaite", "bbassingthwaite@vendasta.com")
addPerson("Bradley Kemp", "bradleyjkemp96@gmail.com")
addPerson("Bradley Schoch", "bschoch@gmail.com")
addPerson("Brady Catherman", "brady@gmail.com")
addPerson("Brady Sullivan", "brady@bsull.com", "@d1str0")
addPerson("Brandon Bennett", "bbennett@fb.com", "@brbe")
addPerson("Brandon Bennett", "bbennett@fb.com", "@nemith")
addPerson("Brandon Dyck", "brandon@dyck.us")
addPerson("Brandon Gilmore", "varz@google.com", "@bgilmore")
addPerson("Brandon Gonzalez", "bg@lightstep.com")
addPerson("Brandon Lum", "lumjjb@gmail.com")
addPerson("Brendan Ashworth", "brendan.ashworth@me.com")
addPerson("Brendan Daniel Tracey", "tracey.brendan@gmail.com", "@btracey")
addPerson("Brendan Tracey", "tracey.brendan@gmail.com", "7155@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brett Cannon", "bcannon@gmail.com", "@brettcannon")
addPerson("Brett Jones", "bjones027@gmail.com")
addPerson("Brian Dellisanti", "briandellisanti@gmail.com", "@briandellisanti")
addPerson("Brian Downs", "brian.downs@gmail.com", "@briandowns")
addPerson("Brian Flanigan", "brian_flanigan@cable.comcast.com")
addPerson("Brian G. Merrell", "bgmerrell@gmail.com", "@bgmerrell")
addPerson("Brian Gitonga Marete", "bgm@google.com", "@marete")
addPerson("Brian Gitonga Marete", "marete@toshnix.com", "@marete")
addPerson("Brian Kennedy", "btkennedy@gmail.com", "@briantkennedy")
addPerson("Brian Kessler", "brian.m.kessler@gmail.com", "20650@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brian Kessler", "brian.m.kessler@gmail.com", "@bmkessler")
addPerson("Brian Ketelsen", "bketelsen@gmail.com", "@bketelsen")
addPerson("Brian Slesinskya", "skybrian@google.com", "@skybrian")
addPerson("Brian Smith", "ohohvi@gmail.com", "@sirwart")
addPerson("Brian Starke", "brian.starke@gmail.com", "@brianstarke")
addPerson("Brian Starkey", "stark3y@gmail.com")
addPerson("Bruno Clermont", "bruno.clermont@gmail.com")
addPerson("Bryan Alexander", "kozical@msn.com", "@Kozical")
addPerson("Bryan C. Mills", "bcmills@google.com", "6365@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan C. Mills", "bcmills@google.com", "@bcmills")
addPerson("Bryan Chan", "bryan.chan@ca.ibm.com", "@bryanpkc")
addPerson("Bryan Chan", "bryanpkc@gmail.com", "6576@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan Ford", "brynosaurus@gmail.com", "5500@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan Ford", "brynosaurus@gmail.com", "@bford")
addPerson("Bryan Heden", "b.heden@gmail.com", "@hedenface")
addPerson("Bryan Mills", "bcmills@google.com", "@bcmills")
addPerson("Bryan Turley", "bryanturley@gmail.com")
addPerson("Bulat Gaifullin", "gaifullinbf@gmail.com", "@bgaifullin")
addPerson("Burak Guven", "bguven@gmail.com", "@burakguven")
addPerson("Caine Tighe", "arctanofyourface@gmail.com", "@nilnilnil")
addPerson("Caio Marcelo de Oliveira Filho", "caio.oliveira@intel.com", "@cmarcelo")
addPerson("Caio Oliveira", "caio.oliveira@intel.com", "12640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Caleb Doxsey", "caleb@doxsey.net")
addPerson("Caleb Martinez", "accounts@calebmartinez.com", "@conspicuousClockwork")
addPerson("Caleb Spare", "cespare@gmail.com", "5615@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Caleb Spare", "cespare@gmail.com", "@cespare")
addPerson("Calvin Behling", "calvin.behling@gmail.com")
addPerson("Calvin Leung Huang", "cleung2010@gmail.com")
addPerson("Cameron Howey", "chowey@ualberta.net")
addPerson("Carl Chatfield", "carlchatfield@gmail.com", "@0xfaded")
addPerson("Carl Henrik Lunde", "chlunde@ifi.uio.no")
addPerson("Carl Henrik Lunde", "chlunde@ifi.uio.no", "@chlunde")
addPerson("Carl Jackson", "carl@stripe.com", "@carl-stripe")
addPerson("Carl Johnson", "me@carlmjohnson.net", "12425@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carl Johnson", "me@carlmjohnson.net", "@carlmjohnson")
addPerson("Carl Mastrangelo", "notcarl@google.com", "@carl-mastrangelo", "carl.mastrangelo@gmail.com", "carlmastrangelo@gmail.com", "12225@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlisia Campos", "carlisia@grokkingtech.io", "@carlisia")
addPerson("Carlo Alberto Ferraris", "cafxx@strayorange.com", "11500@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlo Alberto Ferraris", "cafxx@strayorange.com", "@CAFxX")
addPerson("Carlos C", "uldericofilho@gmail.com", "@ucirello")
addPerson("Carlos Castillo", "cookieo9@gmail.com", "5141@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlos Castillo", "cookieo9@gmail.com", "@cookieo9")
addPerson("Carlos Eduardo Seo", "cseo@linux.vnet.ibm.com", "13015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlos Eduardo Seo", "cseo@linux.vnet.ibm.com", "@ceseo")
addPerson("Carolyn Van Slyck", "me@carolynvanslyck.com", "@carolynvs")
addPerson("Carrie Bynon", "cbynon@gmail.com", "@cbynon")
addPerson("Casey Callendrello", "squeed@gmail.com")
addPerson("Casey Marshall", "casey.marshall@gmail.com", "@cmars")
addPerson("Casey Smith", "smithc@homesandland.com")
addPerson("Cassandra Salisbury", "cls@golang.org")
addPerson("Cassandra Salisbury", "salisburycl@gmail.com")
addPerson("Catalin Nicutar", "cnicutar@google.com")
addPerson("Catalin Nicutar", "cnicutar@google.com", "12526@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Catalin Patulea", "catalinp@google.com", "@cpatulea")
addPerson("Cedric Staub", "cs@squareup.com", "@csstaub")
addPerson("Cezar Espinola", "cezarsa@gmail.com", "9010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cezar Sa Espinola", "cezarsa@gmail.com", "@cezarsa")
addPerson("Chad Kunde", "Kunde21@gmail.com")
addPerson("Chad Rosier", "mrosier.qdt@qualcommdatacenter.com", "25690@62eb7196-b449-3ce5-99f1-c037f21e1705", "@mrosier-qdt")
addPerson("ChaiShushan", "chaishushan@gmail.com", "@chai2010")
addPerson("Chance Zibolski", "chance.zibolski@coreos.com")
addPerson("Changsoo Kim", "broodkcs@gmail.com")
addPerson("Channing Kimble-Brown", "channing@golang.org", "@cnoellekb")
addPerson("Charle Demers", "charle.demers@gmail.com")
addPerson("Charles Fenwick Elliott", "Charles@FenwickElliott.io")
addPerson("Charles Kenney", "charlesc.kenney@gmail.com", "@Charliekenney23")
addPerson("Charles Weill", "weill@google.com", "@cweill")
addPerson("Charlie Dorian", "cldorian@gmail.com", "5435@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Charlie Dorian", "cldorian@gmail.com", "@cldorian")
addPerson("Cheng-Lung Sung", "clsung@gmail.com", "@clsung")
addPerson("Cherry Zhang", "cherryyz@google.com", "13315@62eb7196-b449-3ce5-99f1-c037f21e1705", "@cherrymui", "lunaria21@gmail.com", "9670@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chew Choon Keat", "choonkeat@gmail.com", "@choonkeat")
addPerson("Chintan Sheth", "shethchintan7@gmail.com")
addPerson("Cholerae Hu", "choleraehyq@gmail.com", "15760@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cholerae Hu", "choleraehyq@gmail.com", "@choleraehyq")
addPerson("Chotepud Teo", "alexrousg@users.noreply.github.com", "@AlexRouSg")
addPerson("Chris Ball", "chris@printf.net", "@cjb")
addPerson("Chris Biscardi", "chris@christopherbiscardi.com", "@ChristopherBiscardi")
addPerson("Chris Broadfoot", "cbro@golang.org", "7935@62eb7196-b449-3ce5-99f1-c037f21e1705", "cbro@google.com", "@broady", "7440@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Dollin", "ehog.hedge@gmail.com", "@ehedgehog")
addPerson("Chris Donnelly", "cmd@ceedon.io")
addPerson("Chris Duarte", "csduarte@gmail.com")
addPerson("Chris Farmiloe", "chrisfarms@gmail.com", "@chrisfarms")
addPerson("Chris H (KruftMaster)", "chrusty@gmail.com")
addPerson("Chris Hines", "chris.cs.guy@gmail.com", "7850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Hines", "chris.cs.guy@gmail.com", "@ChrisHines")
addPerson("Chris J Arges", "christopherarges@gmail.com")
addPerson("Chris Jones", "chris@cjones.org", "@cjyar")
addPerson("Chris K", "c@chrisko.ch")
addPerson("Chris Kastorff", "encryptio@gmail.com", "@encryptio")
addPerson("Chris Lennert", "calennert@gmail.com", "@calennert")
addPerson("Chris Lewis", "cflewis@golang.org")
addPerson("Chris Lewis", "cflewis@google.com")
addPerson("Chris Liles", "caveryliles@gmail.com", "26297@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Manghane", "cmang@golang.org", "5130@62eb7196-b449-3ce5-99f1-c037f21e1705", "@paranoiacblack")
addPerson("Chris Marchesi", "chrism@vancluevertech.com", "@vancluever")
addPerson("Chris McGee", "newton688@gmail.com", "15452@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris McGee", "sirnewton_01@yahoo.ca", "@sirnewton01")
addPerson("Chris Raynor", "raynor@google.com")
addPerson("Chris Roche", "rodaine@gmail.com", "@rodaine")
addPerson("Chris Stockton", "chrisstocktonaz@gmail.com")
addPerson("Chris Zou", "chriszou@ca.ibm.com", "@ChrisXZou")
addPerson("ChrisALiles", "caveryliles@gmail.com", "@ChrisALiles")
addPerson("Christian Alexander", "christian@linux.com", "@ChristianAlexander")
addPerson("Christian Couder", "chriscool@tuxfamily.org", "@chriscool")
addPerson("Christian Couder", "christian.couder@gmail.com", "11200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Christian Haas", "christian.haas@sevensuns.at")
addPerson("Christian Himpel", "chressie@googlemail.com", "@chressie")
addPerson("Christian Mauduit", "ufoot@ufoot.org")
addPerson("Christian Pellegrin", "chri@evolware.org")
addPerson("Christian Simon", "simon@swine.de")
addPerson("Christoph Hack", "christoph@tux21b.org", "@tux21b")
addPerson("Christophe Kamphaus", "christophe.kamphaus@gmail.com")
addPerson("Christophe Taton", "taton@google.com")
addPerson("Christopher Boumenot", "chrboum@microsoft.com")
addPerson("Christopher Cahoon", "chris.cahoon@gmail.com", "@ccahoon")
addPerson("Christopher Guiney", "chris@guiney.net", "@chrisguiney")
addPerson("Christopher Koch", "chrisko@google.com", "@hugelgupf")
addPerson("Christopher Nelson", "nadiasvertex@gmail.com", "11675@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Christopher Nelson", "nadiasvertex@gmail.com", "@nadiasvertex")
addPerson("Christopher Nielsen", "m4dh4tt3r@gmail.com", "@m4dh4tt3r")
addPerson("Christopher Redden", "christopher.redden@gmail.com", "@topherredden")
addPerson("Christopher Wedgwood", "cw@f00f.org", "@cwedgwood")
addPerson("Christos Zoulas", "christos@zoulas.com", "@zoulasc")
addPerson("Christos Zoulas", "zoulasc@gmail.com")
addPerson("Christy Perez", "christy@linux.vnet.ibm.com", "@clnperez")
addPerson("Cindy Pallares", "cindy@gitlab.com")
addPerson("Cixtor", "cixtords@gmail.com", "@cixtor")
addPerson("Claire Wang", "cw773@cornell.edu")
addPerson("Clement Courbet", "courbet@google.com")
addPerson("Clement Skau", "clementskau@gmail.com", "@cskau")
addPerson("Clément Chigot", "clement.chigot@atos.net", "@Helflym")
addPerson("Clément Denis", "clement@altirnao.com")
addPerson("Coda Hale", "coda.hale@gmail.com")
addPerson("Colby Ranger", "cranger@google.com", "@crangeratgoogle")
addPerson("Colin Cross", "ccross@android.com", "@colincross")
addPerson("Colin Edwards", "colin@recursivepenguin.com", "@DDRBoxman")
addPerson("Colin Kennedy", "moshen.colin@gmail.com", "@moshen")
addPerson("Colin", "clr@google.com")
addPerson("Connor McGuinness", "connor.mcguinness@izettle.com")
addPerson("Conrad Irwin", "conrad.irwin@gmail.com", "@ConradIrwin")
addPerson("Conrad Meyer", "cemeyer@cs.washington.edu", "@cemeyer")
addPerson("Conrad Taylor", "conradwt@gmail.com")
addPerson("Conrado Gouvea", "conradoplg@gmail.com", "@conradoplg")
addPerson("Constantijn Schepens", "constantijnschepens@gmail.com")
addPerson("Constantin Konstantinidis", "constantinkonstantinidis@gmail.com", "26957@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Constantin Konstantinidis", "constantinkonstantinidis@gmail.com", "@iWdGo")
addPerson("Corey Thomasson", "cthom.lists@gmail.com", "@cthom06")
addPerson("Cory LaNou", "cory@lanou.com")
addPerson("Cosmos Nicolaou", "cnicolaou@grailbio.com")
addPerson("Costin Chirvasuta", "ctin@google.com", "@ct1n")
addPerson("Craig Citro", "craigcitro@google.com", "@craigcitro")
addPerson("Craig Peterson", "cpeterson@stackoverflow.com")
addPerson("Cristian Staretu", "unclejacksons@gmail.com", "@unclejack")
addPerson("Cuihtlauac ALVARADO", "cuihtlauac.alvarado@orange.com", "@cuihtlauac")
addPerson("Cuong Manh Le", "cuong.manhle.vn@gmail.com", "14665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cyrill Schumacher", "cyrill@schumacher.fm", "@SchumacherFM")
addPerson("Daker Fernandes Pinheiro", "daker.fernandes.pinheiro@intel.com", "@dakerfp")
addPerson("Dalton Scott", "dscott.jobs@gmail.com")
addPerson("Damian Gryski", "damian@gryski.com", "@dgryski")
addPerson("Damian Gryski", "dgryski@gmail.com", "7050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damian Gryski", "dgryski@gmail.com", "@dgryski")
addPerson("Damien Lespiau", "damien.lespiau@gmail.com", "13855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damien Lespiau", "damien.lespiau@intel.com", "damien.lespiau@gmail.com", "@dlespiau")
addPerson("Damien Mathieu", "42@dmathieu.com", "@dmathieu")
addPerson("Damien Neil", "dneil@google.com", "5305@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damien Neil", "dneil@google.com", "@neild")
addPerson("Damien Tournoud", "damien@platform.sh")
addPerson("Dan Adkins", "dadkins@gmail.com")
addPerson("Dan Ballard", "dan@mindstab.net")
addPerson("Dan Barry", "dan@bakineggs.com")
addPerson("Dan Bentley", "dtbentley@gmail.com")
addPerson("Dan Caddigan", "goldcaddy77@gmail.com", "@goldcaddy77")
addPerson("Dan Callahan", "dan.callahan@gmail.com", "@callahad")
addPerson("Dan Ertman", "dtertman@gmail.com")
addPerson("Dan Goldsmith", "dan@d2g.org.uk")
addPerson("Dan Harrington", "harringtond@google.com")
addPerson("Dan Jacques", "dnj@google.com")
addPerson("Dan Johnson", "computerdruid@google.com", "@ComputerDruid")
addPerson("Dan Kortschak", "dan.kortschak@adelaide.edu.au", "6480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dan Kortschak", "dan@kortschak.io", "@kortschak")
addPerson("Dan Luedtke", "mail@danrl.com")
addPerson("Dan Moore", "mooreds@gmail.com")
addPerson("Dan Peterson", "dpiddy@gmail.com", "5665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dan Peterson", "dpiddy@gmail.com", "@danp")
addPerson("Dan Richards", "dan.m.richards@gmail.com")
addPerson("Dan Richelson", "drichelson@gmail.com")
addPerson("Dan Sinclair", "dan.sinclair@gmail.com", "@dj2")
addPerson("Dana Hoffman", "danahoffman@google.com")
addPerson("Daniel Cormier", "daniel.cormier@gmail.com")
addPerson("Daniel Fleischman", "danielfleischman@gmail.com", "@danielf")
addPerson("Daniel Heckrath", "d.heckrath@maple-apps.com")
addPerson("Daniel Hultqvist", "daniel@typedef.se")
addPerson("Daniel Ingram", "ingramds@appstate.edu", "@daniel-s-ingram")
addPerson("Daniel Johansson", "dajo2002@gmail.com", "9663@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Johansson", "dajo2002@gmail.com", "@dajoo75")
addPerson("Daniel Kerwin", "d.kerwin@gini.net", "@dkerwin")
addPerson("Daniel Krech", "eikeon@eikeon.com", "@eikeon")
addPerson("Daniel Mahu", "dmahu@google.com")
addPerson("Daniel Martí", "mvdan@mvdan.cc", "13550@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Martí", "mvdan@mvdan.cc", "@mvdan")
addPerson("Daniel Morsing", "daniel.morsing@gmail.com", "5310@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Morsing", "daniel.morsing@gmail.com", "@DanielMorsing")
addPerson("Daniel Nephin", "dnephin@gmail.com", "@dnephin")
addPerson("Daniel Ortiz Pereira da Silva", "daniel.particular@gmail.com", "@dopsilva")
addPerson("Daniel Skinner", "daniel@dasa.cc", "10675@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Skinner", "daniel@dasa.cc", "@dskinner")
addPerson("Daniel Speichert", "daniel@speichert.pl", "@DSpeichert")
addPerson("Daniel Theophanes", "kardianos@gmail.com", "5080@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Theophanes", "kardianos@gmail.com", "@kardianos")
addPerson("Daniel Toebe", "dtoebe@gmail.com")
addPerson("Daniel Upton", "daniel@floppy.co", "@boxofrad")
addPerson("Daniel Wagner-Hall", "dawagner@gmail.com")
addPerson("Daniel", "danielfs.ti@gmail.com")
addPerson("Daniel, Dao Quang Minh", "dqminh89@gmail.com")
addPerson("Daniela Petruzalek", "daniela.petruzalek@gmail.com", "@danicat")
addPerson("Daniël de Kok", "me@danieldk.eu", "@danieldk")
addPerson("Danny Hadley", "dadleyy@gmail.com")
addPerson("Danny Rosseau", "daniel.rosseau@gmail.com")
addPerson("Danny Wyllie", "wylliedanny@gmail.com")
addPerson("Danny Yoo", "dannyyoo@google.com")
addPerson("Dante Shareiff", "prophesional@gmail.com")
addPerson("Darien Raymond", "admin@v2ray.com")
addPerson("Darien Raymond", "admin@v2ray.com", "@DarienRaymond")
addPerson("Darren Elwood", "darren@textnode.com", "@textnode")
addPerson("Darron Froese", "dfroese@salesforce.com")
addPerson("Darshan Parajuli", "parajulidarshan@gmail.com", "@darshanparajuli")
addPerson("Datong Sun", "dndx@idndx.com", "@dndx")
addPerson("Dave Borowitz", "dborowitz@google.com", "@dborowitz")
addPerson("Dave Cheney", "dave@cheney.net", "5150@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dave Cheney", "dave@cheney.net", "@davecheney")
addPerson("Dave Day", "djd@golang.org", "5170@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dave Day", "djd@golang.org", "@okdave")
addPerson("Dave MacFarlane", "driusan@gmail.com")
addPerson("Dave Russell", "forfuncsake@gmail.com", "@forfuncsake")
addPerson("Dave Setzke", "daveset73@gmail.com")
addPerson("Dave Wyatt", "dlwyatt115@gmail.com")
addPerson("David Anderson", "danderson@google.com", "13070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Anderson", "danderson@google.com", "@danderson")
addPerson("David Barnett", "dbarnett@google.com", "@dbarnett")
addPerson("David Bartley", "bartle@stripe.com")
addPerson("David Benjamin", "davidben@google.com", "7805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Benjamin", "davidben@google.com", "@davidben")
addPerson("David Benque", "dbenque@gmail.com")
addPerson("David Brophy", "dave@brophy.uk", "@dave")
addPerson("David Bürgin", "676c7473@gmail.com", "@glts")
addPerson("David Calavera", "david.calavera@gmail.com", "@calavera")
addPerson("David Carlier", "devnexen@gmail.com", "@devnexen")
addPerson("David Chase", "drchase@google.com", "7061@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Chase", "drchase@google.com", "@dr2chase")
addPerson("David Crawshaw", "crawshaw@golang.org", "5030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Crawshaw", "crawshaw@golang.org", "@crawshaw")
addPerson("David Deng", "daviddengcn@gmail.com")
addPerson("David Finkel", "david.finkel@gmail.com")
addPerson("David Forsythe", "dforsythe@gmail.com", "@dforsyth")
addPerson("David G. Andersen", "dave.andersen@gmail.com", "@dave-andersen")
addPerson("David Glasser", "glasser@meteor.com", "9556@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Glasser", "glasser@meteor.com", "@glasser")
addPerson("David Good", "dgood@programminggoody.com")
addPerson("David Heuschmann", "heuschmann.d@gmail.com", "@dddent")
addPerson("David Howden", "dhowden@gmail.com")
addPerson("David Hubbard", "dsp@google.com")
addPerson("David Jakob Fritz", "david.jakob.fritz@gmail.com", "@djfritz")
addPerson("David Kitchen", "david@buro9.com")
addPerson("David Lazar", "lazard@golang.org", "16260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Lazar", "lazard@golang.org", "@davidlazar")
addPerson("David Leon Gil", "coruus@gmail.com", "5830@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Leon Gil", "coruus@gmail.com", "@coruus")
addPerson("David Ndungu", "dnjuguna@gmail.com")
addPerson("David NewHamlet", "david@newhamlet.com", "@wheelcomplex")
addPerson("David Newhamlet", "david@newhamlet.com", "13738@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Presotto", "presotto@gmail.com", "@presotto")
addPerson("David R. Jenni", "david.r.jenni@gmail.com", "@davidrjenni")
addPerson("David R. Jenni", "davidrjenni@protonmail.com", "6180@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Sansome", "me@davidsansome.com")
addPerson("David Stainton", "dstainton415@gmail.com", "@david415")
addPerson("David Symonds", "dsymonds@golang.org", "5045@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Symonds", "dsymonds@golang.org", "@dsymonds")
addPerson("David Thomas", "davidthomas426@gmail.com", "@davidthomas426")
addPerson("David Timm", "dtimm@pivotal.io", "@dtimm")
addPerson("David Titarenco", "david.titarenco@gmail.com", "@dvx")
addPerson("David Tolpin", "david.tolpin@gmail.com", "@dtolpin")
addPerson("David Url", "david@urld.io", "26506@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Url", "david@urld.io", "@urld")
addPerson("David Volquartz Lebech", "david@lebech.info")
addPerson("David Wimmer", "davidlwimmer@gmail.com", "@dwimmer")
addPerson("David du Colombier", "0intro@gmail.com")
addPerson("David du Colombier", "0intro@gmail.com", "5060@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David du Colombier", "0intro@gmail.com", "@0intro")
addPerson("Davies Liu", "davies.liu@gmail.com", "@davies")
addPerson("Davor Kapsa", "davor.kapsa@gmail.com", "@dvrkps")
addPerson("Ddo", "joeddo89@gmail.com")
addPerson("Dean Prichard", "dean.prichard@gmail.com", "@zard49")
addPerson("Deepak Jois", "deepak.jois@gmail.com", "@deepakjois")
addPerson("Deepali Raina", "deepali.raina@gmail.com")
addPerson("Denis Bernard", "db047h@gmail.com", "@db47h")
addPerson("Denis Nagorny", "denis.nagorny@intel.com", "10734@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Denis Nagorny", "denis.nagorny@intel.com", "@dvnagorny")
addPerson("Dennis Kuhnert", "mail.kuhnert@gmail.com", "26874@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dennis Kuhnert", "mail.kuhnert@gmail.com", "@kyroy")
addPerson("Denys Honsiorovskyi", "honsiorovskyi@gmail.com", "@honsiorovskyi")
addPerson("Denys Smirnov", "denis.smirnov.91@gmail.com", "@dennwc")
addPerson("Derek Bruening", "bruening@google.com")
addPerson("Derek Buitenhuis", "derek.buitenhuis@gmail.com", "@dwbuiten")
addPerson("Derek Che", "drc@yahoo-inc.com")
addPerson("Derek Che", "drc@yahoo-inc.com", "5750@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Derek McGowan", "derek@mcgstyle.net")
addPerson("Derek Parker", "parkerderek86@gmail.com", "@derekparker")
addPerson("Derek Perkins", "derek@derekperkins.com")
addPerson("Derek Phan", "derekphan94@gmail.com", "@dphan72")
addPerson("Derek Shockey", "derek.shockey@gmail.com", "@derelk")
addPerson("Dev Ojha", "dojha12@gmail.com", "27059@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Deval Shah", "devalshah88@gmail.com")
addPerson("Devon H. O'Dell", "devon.odell@gmail.com", "25956@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Devon H. O'Dell", "devon.odell@gmail.com", "@dhobsd")
addPerson("Dhaivat Pandit", "dhaivatpandit@gmail.com", "15030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dhaivat Pandit", "dhaivatpandit@gmail.com", "@ceocoder")
addPerson("Dhananjay Nakrani", "dhananjayn@google.com")
addPerson("Dhananjay Nakrani", "dhananjayn@google.com", "15558@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dhananjay Nakrani", "dhananjaynakrani@gmail.com", "@dhananjay92")
addPerson("Dhiru Kholia", "dhiru.kholia@gmail.com", "@kholia")
addPerson("Dhruvdutt Jadhav", "dhruvdutt.jadhav@gmail.com", "@dhruvdutt")
addPerson("Di Xiao", "dixiao@google.com")
addPerson("Di Xiao", "xiaodi.larry@gmail.com")
addPerson("Didier Spezia", "didier.06@gmail.com", "7795@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Didier Spezia", "didier.06@gmail.com", "@dspezia")
addPerson("Diego Saint Esteben", "diego@saintesteben.me")
addPerson("Diego Siqueira", "diego9889@gmail.com", "@DiSiqueira")
addPerson("Dieter Plaetinck", "dieter@raintank.io")
addPerson("Dieter Plaetinck", "dieter@raintank.io", "@Dieterbe")
addPerson("Dimitri Tcaciuc", "dtcaciuc@gmail.com", "@dtcaciuc")
addPerson("Dimitrios Arethas", "darethas@gmail.com")
addPerson("Dina Garmash", "dgrmsh@gmail.com", "@dgrmsh")
addPerson("Dinesh Kumar", "dinesh.kumar@go-jek.com")
addPerson("Diogo Pinela", "diogoid7400@gmail.com")
addPerson("Diogo Pinela", "diogoid7400@gmail.com", "16943@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Diogo Pinela", "diogoid7400@gmail.com", "@dpinela")
addPerson("Dirk Gadsden", "dirk@esherido.com", "@dirk")
addPerson("Diwaker Gupta", "diwakergupta@gmail.com", "@diwakergupta")
addPerson("Dmitri Popov", "operator@cv.dp-net.com", "@pin")
addPerson("Dmitri Shuralyov", "dmitshur@golang.org", "dmitri@shuralyov.com", "shurcool@gmail.com", "@dmitshur", "6005@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitriy Dudkin", "dudkin.dmitriy@gmail.com", "@tmwh")
addPerson("Dmitriy", "dchenk@users.noreply.github.com")
addPerson("Dmitry Chestnykh", "dchest@gmail.com", "@dchest")
addPerson("Dmitry Doroginin", "doroginin@gmail.com", "@doroginin")
addPerson("Dmitry Mottl", "dmitry.mottl@gmail.com", "@Mottl")
addPerson("Dmitry Neverov", "dmitry.neverov@gmail.com", "@nd")
addPerson("Dmitry Pokidov", "dooman87@gmail.com")
addPerson("Dmitry Savintsev", "dsavints@gmail.com", "6190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitry Savintsev", "dsavints@gmail.com", "@dmitris")
addPerson("Dmitry Vyukov", "dvyukov@google.com", "5400@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitry Vyukov", "dvyukov@google.com", "@dvyukov")
addPerson("DocMerlin", "landivar@gmail.com")
addPerson("Dominic Barnes", "dominic@dbarnes.info")
addPerson("Dominic Green", "dominicgreen1@gmail.com")
addPerson("Dominik Honnef", "dominik@honnef.co", "5020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dominik Honnef", "dominik@honnef.co", "@dominikh")
addPerson("Dominik Vogt", "vogt@linux.vnet.ibm.com", "6065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dominik Vogt", "vogt@linux.vnet.ibm.com", "@vogtd")
addPerson("Don Byington", "don@dbyington.com", "@dbyington")
addPerson("Donald Huang", "don.hcd@gmail.com")
addPerson("Dong-hee Na", "donghee.na92@gmail.com", "17352@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dong-hee Na", "donghee.na92@gmail.com", "@corona10")
addPerson("Donovan Hide", "donovanhide@gmail.com", "@donovanhide")
addPerson("Doug Evans", "dje@google.com")
addPerson("Doug Fawley", "dfawley@google.com")
addPerson("Dragoslav Mitrinovic", "fdm224@motorola.com")
addPerson("Drew Flower", "drewvanstone@gmail.com", "@drewvanstone")
addPerson("Drew Hintz", "adhintz@google.com", "@adhintz")
addPerson("Duco van Amstel", "duco@improbable.io")
addPerson("Duncan Holm", "mail@frou.org", "@frou")
addPerson("Dusan Kasan", "me@dusankasan.com")
addPerson("Dustin Carlino", "dcarlino@google.com")
addPerson("Dustin Shields-Cloues", "dcloues@gmail.com", "@dcloues")
addPerson("Dylan Carney", "dcarney@gmail.com")
addPerson("Dylan Waits", "dylan@waits.io", "@waits")
addPerson("EKR", "ekr@rtfm.com", "@ekr")
addPerson("Edan B", "3d4nb3@gmail.com", "@edanbe")
addPerson("Eddie Ringle", "eddie@ringle.io")
addPerson("Eden Li", "eden.li@gmail.com", "@eden")
addPerson("Edson Medina", "edsonmedina@gmail.com")
addPerson("EduRam", "eduardo.ramalho@gmail.com", "@EduRam")
addPerson("Eduard Urbach", "e.urbach@gmail.com")
addPerson("Eduard Urbach", "e.urbach@gmail.com", "@blitzprog")
addPerson("Edward Muller", "edwardam@interlix.com", "9641@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Edward Muller", "edwardam@interlix.com", "@freeformz")
addPerson("Egon Elbre", "egonelbre@gmail.com", "6785@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Egon Elbre", "egonelbre@gmail.com", "@egonelbre")
addPerson("Ehden Sinai", "ehdens@gmail.com")
addPerson("Ehren Kret", "ehren.kret@gmail.com", "@eakret")
addPerson("Eitan Adler", "lists@eitanadler.com", "@grimreaper")
addPerson("Eivind Uggedal", "eivind@uggedal.com", "@uggedal")
addPerson("Elbert Fliek", "efliek@gmail.com", "@Nr90")
addPerson("Eldar Rakhimberdin", "ibeono@gmail.com")
addPerson("Elen Eisendle", "elen@eisendle.ee")
addPerson("Elena Grahovac", "elena@grahovac.me")
addPerson("Elias Naur", "mail@eliasnaur.com", "@eliasnaur")
addPerson("Elias Naur", "mail@eliasnaur.com", "elias.naur@gmail.com", "@eliasnaur", "7435@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Elliot Morrison-Reed", "elliotmr@gmail.com", "@elliotmr")
addPerson("Emanuele Iannone", "emanuele@fondani.it")
addPerson("Emerson Lin", "linyintor@gmail.com", "21970@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emil Hessman", "emil@hessman.se", "5555@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emil Hessman", "emil@hessman.se", "c.emil.hessman@gmail.com", "@ceh")
addPerson("Emmanuel Odeke", "emm.odeke@gmail.com", "5137@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emmanuel Odeke", "emm.odeke@gmail.com", "@odeke-em")
addPerson("Emmanuel Odeke", "emmanuel@orijtech.com", "27585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emmanuel Odeke", "odeke@ualberta.ca")
addPerson("Emmanuel Odeke", "odeke@ualberta.ca", "5735@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eno Compton", "enocom@google.com")
addPerson("Enrico Candino", "enrico.candino@gmail.com")
addPerson("Eoghan Sherry", "ejsherry@gmail.com", "@ejsherry")
addPerson("Eric Adams", "ercadams@gmail.com")
addPerson("Eric Brown", "browne@vmware.com")
addPerson("Eric Chiang", "eric.chiang.m@gmail.com", "@ericchiang")
addPerson("Eric Clark", "zerohp@gmail.com", "@eclark")
addPerson("Eric Daniels", "eric@erdaniels.com", "25196@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eric Daniels", "eric@erdaniels.com", "@edaniels")
addPerson("Eric Dube", "eric.alex.dube@gmail.com")
addPerson("Eric Engestrom", "eric@engestrom.ch", "@1ace")
addPerson("Eric Garrido", "ekg@google.com", "@minusnine")
addPerson("Eric Hopper", "hopper@omnifarious.org")
addPerson("Eric Koleda", "ekoleda+devrel@google.com")
addPerson("Eric Lagergren", "ericscottlagergren@gmail.com", "@ericlagergren")
addPerson("Eric Lagergren", "eric@ericlagergren.com", "@ericlagergren")
addPerson("Eric Lagergren", "ericscottlagergren@gmail.com", "7276@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eric Milliken", "emilliken@gmail.com", "@emilliken")
addPerson("Eric Pauley", "eric@pauley.me", "@ericpauley")
addPerson("Eric Ponce", "tricokun@gmail.com", "@trico")
addPerson("Eric Roshan-Eisner", "eric.d.eisner@gmail.com", "@eisner")
addPerson("Eric Rykwalder", "e.rykwalder@gmail.com", "@erykwalder")
addPerson("Eric Schow", "eric.schow@gmail.com")
addPerson("Erik Aigner", "aigner.erik@gmail.com", "@eaigner")
addPerson("Erik Dubbelboer", "erik@dubbelboer.com", "8976@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Erik Dubbelboer", "erik@dubbelboer.com", "@erikdubbelboer")
addPerson("Erik St. Martin", "alakriti@gmail.com", "@erikstmartin")
addPerson("Erik Staab", "estaab@google.com", "@erikus")
addPerson("Erik Westrup", "erik.westrup@gmail.com", "@erikw")
addPerson("Erin Call", "hello@erincall.com")
addPerson("Erin Masatsugu", "erin.masatsugu@gmail.com", "@emasatsugu")
addPerson("Ernest Chiang", "ernest_chiang@htc.com")
addPerson("Erwin Oegema", "blablaechthema@hotmail.com", "@diamondo25")
addPerson("Esko Luontola", "esko.luontola@gmail.com", "@orfjackal")
addPerson("Etai Lev Ran", "etail@il.ibm.com")
addPerson("Ethan Burns", "eaburns@google.com")
addPerson("Ethan Miller", "eamiller@us.ibm.com", "@millere")
addPerson("Euan Kemp", "euank@euank.com", "@euank")
addPerson("Eugene Kalinin", "e.v.kalinin@gmail.com", "12380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eugene Kalinin", "e.v.kalinin@gmail.com", "@ekalinin")
addPerson("Evan Broder", "evan@stripe.com", "@evan-stripe")
addPerson("Evan Brown", "evanbrown@google.com")
addPerson("Evan Brown", "evanbrown@google.com", "9260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evan Brown", "evanbrown@google.com", "@evandbrown")
addPerson("Evan Farrar", "evanfarrar@gmail.com")
addPerson("Evan Hicks", "evan.hicks2@gmail.com", "@FearlessDestiny")
addPerson("Evan Jones", "ej@evanjones.ca", "@evanj")
addPerson("Evan Klitzke", "evan@eklitzke.org")
addPerson("Evan Klitzke", "evan@eklitzke.org", "@eklitzke")
addPerson("Evan Kroske", "evankroske@google.com", "@evankroske")
addPerson("Evan Martin", "evan.martin@gmail.com", "@evmar")
addPerson("Evan Phoenix", "evan@phx.io", "6330@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evan Phoenix", "evan@phx.io", "@evanphx")
addPerson("Evan Shaw", "chickencha@gmail.com")
addPerson("Evan Shaw", "edsrzf@gmail.com", "@edsrzf")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net", "17055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net", "@bioothod")
addPerson("Ewan Chou", "coocood@gmail.com", "@coocood")
addPerson("Eyal Posener", "posener@gmail.com")
addPerson("Fab>rizio (Misto) Milo", "mistobaan@gmail.com", "@Mistobaan")
addPerson("Fabian Wickborn", "fabian@wickborn.net", "@fawick")
addPerson("Fabian", "fabian@youremail.eu")
addPerson("Fabien Silberstein", "silberfab@gmail.com")
addPerson("Fabio Alessandro Locati", "me@fale.io")
addPerson("Faiyaz Ahmed", "ahmedf@vmware.com", "@fdawg4l")
addPerson("Fan Hongjian", "fan.howard@gmail.com", "@fango")
addPerson("Fan Jiang", "fan.torchz@gmail.com")
addPerson("Fangming Fang", "Fangming.Fang@arm.com", "19276@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Fangming.Fang", "fangming.fang@arm.com", "@zorrorffm")
addPerson("Fatih Arslan", "fatih@arslan.io", "@fatih")
addPerson("Fatih Arslan", "ftharsln@gmail.com", "@fatih")
addPerson("Fazal Majid", "majid@apsalar.com")
addPerson("Fazlul Shahriar", "fshahriar@gmail.com", "@fhs")
addPerson("Federico Simoncelli", "fsimonce@redhat.com", "@simon3z")
addPerson("Fedor Indutny", "fedor@indutny.com", "@indutny")
addPerson("Felix Kollmann", "felix.kollmann@twinpoint.de")
addPerson("Felix Kollmann", "fk@konsorten.de")
addPerson("Felix Kollmann", "mail@fkollmann.de", "26861@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Feng Liyuan", "darktemplar.f@gmail.com")
addPerson("Filip Gruszczyński", "gruszczy@gmail.com", "17532@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Filip Gruszczyński", "gruszczy@gmail.com", "@gruszczy")
addPerson("Filip Haglund", "drathier@users.noreply.github.com")
addPerson("Filip Ochnik", "filip.ochnik@gmail.com")
addPerson("Filip Stanis", "fstanis@google.com")
addPerson("Filippo Valsorda", "filippo@golang.org", "11715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Filippo Valsorda", "hi@filippo.io", "@FiloSottile", "filippo@cloudflare.com")
addPerson("Filippo Valsorda", "6195@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Firmansyah Adiputra", "frm.adiputra@gmail.com", "@frm-adiputra")
addPerson("Florian Forster", "octo@google.com", "@octo")
addPerson("Florian Uekermann", "florian@uekermann.me", "13410@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Florian Uekermann", "florian@uekermann.me", "@FlorianUekermann")
addPerson("Florian Uekermann", "florian@uekermann.me", "@MaVo159")
addPerson("Florian Weimer", "fw@deneb.enyo.de", "@fweimer")
addPerson("Florian", "sinnlosername@users.noreply.github.com")
addPerson("Florian", "sinnlosername@users.noreply.github.com", "@sinnlosername")
addPerson("Florin Patan", "florinpatan@gmail.com", "6473@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Florin Patan", "florinpatan@gmail.com", "@dlsniper")
addPerson("Ford Hurley", "ford.hurley@gmail.com", "@fordhurley")
addPerson("FourSeventy", "msiggy@gmail.com")
addPerson("Francesc Campoy Flores", "campoy@golang.org", "5955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Francesc Campoy Flores", "campoy@google.com", "7455@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Francesc Campoy", "campoy@golang.org", "campoy@google.com", "@campoy")
addPerson("Francesc Campoy", "francesc@campoy.cat")
addPerson("Francisco Claude", "fclaude@recoded.cl", "@fclaude")
addPerson("Francisco Rojas", "francisco.rojas.gallegos@gmail.com", "@frojasg")
addPerson("Francisco Souza", "franciscossouza@gmail.com", "@fsouza")
addPerson("Francisco Souza", "fsouza@users.noreply.github.com")
addPerson("Frank Rehwinkel", "frankrehwinkel@gmail.com")
addPerson("Frank Schroeder", "frank.schroeder@gmail.com")
addPerson("Frank Schroeder", "frank.schroeder@gmail.com", "@magiconair")
addPerson("Frank Schröder", "frank.schroeder@gmail.com", "11300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Frank Somers", "fsomers@arista.com", "@somersf")
addPerson("Franz Bettag", "franz@bett.ag")
addPerson("Fred Carle", "fred.carle@thorium90.io")
addPerson("Frederick Kelly Mayle III", "frederickmayle@gmail.com", "@fkm3")
addPerson("Frederik Ring", "frederik.ring@gmail.com")
addPerson("Fredrik Enestad", "fredrik.enestad@soundtrackyourbrand.com", "@fredr")
addPerson("Fredrik Forsmo", "fredrik.forsmo@gmail.com", "@frozzare")
addPerson("Fredrik Wallgren", "fredrik.wallgren@gmail.com")
addPerson("Frits van Bommel", "fvbommel@gmail.com", "13460@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Frits van Bommel", "fvbommel@gmail.com", "@fvbommel")
addPerson("Frédéric Guillot", "frederic.guillot@gmail.com", "@fguillot")
addPerson("Fumitoshi Ukai", "ukai@google.com", "@ukai")
addPerson("G. Hussain Chinoy", "ghchinoy@gmail.com", "@ghchinoy")
addPerson("Gaal Yahas", "gaal@google.com")
addPerson("Gabe Dalay", "gabedalay@gmail.com")
addPerson("Gabriel Aszalos", "gabriel.aszalos@gmail.com", "5465@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gabriel Aszalos", "gabriel.aszalos@gmail.com", "@gbbr")
addPerson("Gabriel Nicolas Avellaneda", "avellaneda.gabriel@gmail.com", "@GabrielNicolasAvellaneda")
addPerson("Gabriel Rosenhouse", "rosenhouse@gmail.com")
addPerson("Gabriel Russell", "gabriel.russell@gmail.com", "@wiccatech")
addPerson("Gabríel Arthúr Pétursson", "gabriel@system.is", "@polarina")
addPerson("Gareth Paul Jones", "gpj@foursquare.com", "@garethpaul")
addPerson("Garret Kelly", "gdk@google.com")
addPerson("Gary Burd", "gary@beagledreams.com", "@garyburd")
addPerson("Gary Elliott", "garyelliott@google.com")
addPerson("Gaurish Sharma", "contact@gaurishsharma.com", "@gaurish")
addPerson("Gautam Dey", "gautam.dey77@gmail.com")
addPerson("Gautham Thambidorai", "gautham.dorai@gmail.com", "@gauthamt")
addPerson("Gauthier Jolly", "gauthier.jolly@gmail.com")
addPerson("Geert-Johan Riemer", "gjr19912@gmail.com")
addPerson("Genevieve Luyt", "genevieve.luyt@gmail.com", "@genevieveluyt")
addPerson("Geoff Berry", "gberry.qdt@qualcommdatacenter.com", "25768@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Geoff Berry", "gberry.qdt@qualcommdatacenter.com", "@gberry-qdt")
addPerson("Georg Reinke", "guelfey@gmail.com", "@guelfey")
addPerson("George Gkirtsou", "ggirtsou@gmail.com", "@ggirtsou")
addPerson("George Shammas", "george@shamm.as", "@georgyo")
addPerson("George Tankersley", "george.tankersley@gmail.com")
addPerson("Gepser Hoil", "geharold@gmail.com")
addPerson("Gerasimos (Makis) Maropoulos", "kataras2006@hotmail.com", "@kataras")
addPerson("Gerasimos Dimitriadis", "gedimitr@gmail.com", "@gedimitr")
addPerson("Gergely Brautigam", "skarlso777@gmail.com", "@Skarlso")
addPerson("Getulio Sánchez", "valentin2507@gmail.com")
addPerson("Ggicci", "ggicci.t@gmail.com", "@ggicci")
addPerson("Gianguido Sora`", "g.sora4@gmail.com")
addPerson("Giannis Kontogianni", "giannis2792@gmail.com")
addPerson("GiantsLoveDeathMetal", "sebastien@cytora.com", "@foxyblue")
addPerson("Gil Raphaelli", "g@raphaelli.com")
addPerson("Giovanni Bajo", "rasky@develer.com", "5340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Giovanni Bajo", "rasky@develer.com", "@rasky")
addPerson("Giulio Iotti", "dullgiulio@gmail.com", "@dullgiulio")
addPerson("Giuseppe Valente", "gvalente@arista.com")
addPerson("Gleb Smirnoff", "glebius@netflix.com")
addPerson("Gleb Stepanov", "glebstepanov1992@gmail.com", "14596@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gleb Stepanov", "glebstepanov1992@gmail.com", "@stgleb")
addPerson("Glenn Brown", "glennb@google.com")
addPerson("Glenn Griffin", "glenng@google.com")
addPerson("Glenn Lewis", "gmlewis@google.com", "@gmlewis")
addPerson("Glib Smaga", "code@gsmaga.com")
addPerson("Go Team", "no-reply@golang.org")
addPerson("Goo", "liuwanle2010@gmail.com", "@l-we")
addPerson("Gordon Klaus", "gordon.klaus@gmail.com", "5780@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gordon Klaus", "gordon.klaus@gmail.com", "@gordonklaus")
addPerson("Graham Miller", "graham.miller@gmail.com", "@laslowh")
addPerson("Greg Poirier", "greg.istehbest@gmail.com", "@grepory")
addPerson("Greg Ward", "greg@gerg.ca", "@gward")
addPerson("Gregory Colella", "gcolella@google.com")
addPerson("Gregory Haskins", "gregory.haskins@gmail.com")
addPerson("Gregory Man", "man.gregory@gmail.com")
addPerson("Gregory Man", "man.gregory@gmail.com", "@gregory-m")
addPerson("Greyh4t", "greyh4t1337@gmail.com")
addPerson("Grim", "megaskyhawk@gmail.com")
addPerson("Grégoire Delattre", "gregoire.delattre@gmail.com", "@gregdel")
addPerson("Guilherme Garnier", "guilherme.garnier@gmail.com", "@ggarnier")
addPerson("Guilherme Goncalves", "guilhermeaugustosg@gmail.com", "@guilhermeasg")
addPerson("Guilherme Rezende", "guilhermebr@gmail.com", "22856@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Guilherme Rezende", "guilhermebr@gmail.com", "@guilhermebr")
addPerson("Guilherme Santos", "guilherme.santos@foodora.com")
addPerson("GuilhermeCaruso", "gui.martinscaruso@gmail.com", "@GuilhermeCaruso")
addPerson("Guillaume J. Charmes", "guillaume@charmes.net", "@creack")
addPerson("Guillaume J. Charmes", "gcharmes@magicleap.com")
addPerson("Guillaume Koenig", "guillaume.edward.koenig@gmail.com")
addPerson("Guillaume Leroi", "leroi.g@gmail.com")
addPerson("Guillermo López-Anglada", "guillermo.lopez@outlook.com", "@guillermooo")
addPerson("Guobiao Mei", "meiguobiao@gmail.com", "@guobiao")
addPerson("Guoliang Wang", "iamwgliang@gmail.com", "@wgliang")
addPerson("Gurpartap Singh", "hi@gurpartap.com")
addPerson("Gustav Paul", "gustav.paul@gmail.com", "@gpaul")
addPerson("Gustav Westling", "gustav@westling.xyz", "@zegl")
addPerson("Gustav Westling", "zegl@westling.xyz", "@zegl")
addPerson("Gustavo Niemeyer", "gustavo@niemeyer.net", "n13m3y3r@gmail.com", "@niemeyer")
addPerson("Gustavo Picón", "tabo@tabo.pe")
addPerson("Gyu-Ho Lee", "gyuhox@gmail.com", "@gyuho")
addPerson("H. İbrahim Güngör", "igungor@gmail.com", "@igungor")
addPerson("HAMANO Tsukasa", "hamano@osstech.co.jp", "@hamano")
addPerson("HENRY-PC\\Henry", "henry.adisumarto@gmail.com")
addPerson("Hajime Hoshi", "hajimehoshi@gmail.com", "7938@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hajime Hoshi", "hajimehoshi@gmail.com", "@hajimehoshi")
addPerson("Hamit Burak Emre", "hamitburakemre@gmail.com")
addPerson("Han-Wen Nienhuys", "hanwen@google.com", "5893@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Han-Wen Nienhuys", "hanwen@google.com", "@hanwen")
addPerson("Han-Wen Nienhuys", "hanwenn@gmail.com", "6115@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hana Kim", "hyangah@gmail.com", "@hyangah")
addPerson("Hang Qian", "hangqian90@gmail.com")
addPerson("Hanjun Kim", "hallazzang@gmail.com", "@hallazzang")
addPerson("Hannes Landeholm", "hnsl@google.com")
addPerson("Haosdent Huang", "haosdent@gmail.com")
addPerson("Harald Nordgren", "haraldnordgren@gmail.com", "26145@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("HaraldNordgren", "haraldnordgren@gmail.com", "@HaraldNordgren")
addPerson("Hari haran", "hariharan.uno@gmail.com", "@hariharan-uno")
addPerson("Hariharan Srinath", "srinathh@gmail.com", "@srinathh")
addPerson("Harry Moreno", "morenoh149@gmail.com", "@morenoh149")
addPerson("Harshavardhana", "hrshvardhana@gmail.com", "@harshavardhana")
addPerson("Harshavardhana", "harsha@minio.io")
addPerson("Harshavardhana", "hrshvardhana@gmail.com", "11900@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hauke Löffler", "hloeffler@users.noreply.github.com", "@hloeffler")
addPerson("He Liu", "liulonnie@gmail.com")
addPerson("Hector Chu", "hectorchu@gmail.com", "@hectorchu")
addPerson("Hector Jusforgues", "hector.jusforgues@gmail.com")
addPerson("Hector Martin Cantero", "hector@marcansoft.com", "@marcan")
addPerson("Hector Rivas Gandara", "keymon@gmail.com")
addPerson("Henning Schmiedehausen", "henning@schmiedehausen.org", "@hgschmie")
addPerson("Henrik Hodne", "henrik@hodne.io", "@henrikhodne")
addPerson("Henrique Vicente", "henriquevicente@gmail.com")
addPerson("Henry Chang", "mr.changyuheng@gmail.com")
addPerson("Henry Clifford", "h.a.clifford@gmail.com", "@hcliff")
addPerson("Henry D. Case", "kris@amongbytes.com")
addPerson("Henry", "google@mindeco.de")
addPerson("Herbert Georg Fischer", "herbert.fischer@gmail.com", "@hgfischer")
addPerson("Herbie Ong", "herbie@google.com", "17100@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Herbie Ong", "herbie@google.com", "@cybrcodr")
addPerson("Heschi Kreinick", "heschi@google.com", "17090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Heschi Kreinick", "heschi@google.com", "@heschik")
addPerson("Hidetatsu Yaginuma", "ygnmhdtt@gmail.com", "@yagi5")
addPerson("Hilko Bengen", "bengen@hilluzination.de")
addPerson("Hiroaki Nakamura", "hnakamur@gmail.com")
addPerson("Hiroaki Nakamura", "hnakamur@gmail.com", "17745@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hironao OTSUBO", "motemen@gmail.com", "@motemen")
addPerson("Hiroshi Ioka", "hirochachacha@gmail.com", "11631@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hiroshi Ioka", "hirochachacha@gmail.com", "@hirochachacha")
addPerson("Hitoshi Mitake", "mitake.hitoshi@gmail.com", "@mitake")
addPerson("Homan Chou", "homanchou@gmail.com")
addPerson("Hong Ruiqi", "hongruiqi@gmail.com", "@hongruiqi")
addPerson("Hsin Tsao", "tsao@google.com")
addPerson("Hsin Tsao", "tsao@google.com", "@lazyhackeratwork")
addPerson("HuKeping", "hukeping@huawei.com", "@HuKeping")
addPerson("Huadcu Sulivan", "huadcu@gmail.com")
addPerson("Hugo Rut", "hugorut@gmail.com")
addPerson("Hugues Bruant", "hugues.bruant@gmail.com", "17586@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hugues Bruant", "hugues.bruant@gmail.com", "@huguesb")
addPerson("Hyang-Ah Hana Kim", "hyangah@gmail.com", "5190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hyang-Ah Hana Kim", "hyangah@gmail.com", "hakim@google.com", "@hyangah")
addPerson("Håvard Haugen", "havard.haugen@gmail.com", "5505@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Håvard Haugen", "havard.haugen@gmail.com", "@osocurioso")
addPerson("INADA Naoki", "songofacandy@gmail.com", "@methane")
addPerson("Ian Cottrell", "iancottrell@google.com", "9711@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ian Cottrell", "iancottrell@google.com", "@ianthehat")
addPerson("Ian Davis", "nospam@iandavis.com")
addPerson("Ian Davis", "nospam@iandavis.com", "@iand")
addPerson("Ian Ennis", "michaelian.ennis@gmail.com")
addPerson("Ian Gudger", "igudger@google.com", "12625@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ian Gudger", "igudger@google.com", "ian@loosescre.ws", "@iangudger")
addPerson("Ian Haken", "ihaken@netflix.com")
addPerson("Ian Johnson", "person.uwsome@gmail.com")
addPerson("Ian Kent", "iankent85@gmail.com", "@ian-kent")
addPerson("Ian Lance Taylor", "iant@golang.org", "@ianlancetaylor", "5206@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ibrahim AshShohail", "ibra.sho@gmail.com", "@ibrasho")
addPerson("Ibrahim AshShohail", "me@ibrasho.com")
addPerson("Iccha Sethi", "icchasethi@gmail.com", "@isethi")
addPerson("Idora Shinatose", "idora.shinatose@gmail.com", "@idora")
addPerson("Igor Bernstein", "igorbernstein@google.com")
addPerson("Igor Dolzhikov", "bluesriverz@gmail.com", "@takama")
addPerson("Igor Vashyst", "ivashyst@gmail.com", "@ivashyst")
addPerson("Igor Zhilianin", "igor.zhilianin@gmail.com", "@igorzhilianin")
addPerson("Ilan Pillemer", "ilan.pillemer@gmail.com")
addPerson("Ilia Filippov", "ilia.filippov@intel.com")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "26817@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "8585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "@TocarIP")
addPerson("Inanc Gumus", "m@inanc.io", "25354@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Inanc Gumus", "m@inanc.io", "@inancgumus")
addPerson("Ingo Gottwald", "in.gottwald@gmail.com")
addPerson("Ingo Krabbe", "ikrabbe.ask@gmail.com", "@ikrabbe")
addPerson("Ingo Oeser", "nightlyone@googlemail.com", "5021@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ingo Oeser", "nightlyone@googlemail.com", "@nightlyone")
addPerson("Ioannis Georgoulas", "geototti21@hotmail.com", "@geototti21")
addPerson("Ishani Garg", "ishani.garg@gmail.com")
addPerson("Iskander Sharipov", "iskander.sharipov@intel.com", "24037@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Iskander Sharipov", "iskander.sharipov@intel.com", "@Quasilyte")
addPerson("Iskander Sharipov", "quasilyte@gmail.com", "25422@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Issac Trotts", "issac.trotts@gmail.com")
addPerson("Issac Trotts", "issactrotts@google.com", "@ijt")
addPerson("Ivan Babrou", "ivan@cloudflare.com", "@bobrik")
addPerson("Ivan Bertona", "ivan.bertona@gmail.com", "@ibrt")
addPerson("Ivan Jovanovic", "ivan@loopthrough.ch")
addPerson("Ivan Krasin", "krasin@golang.org", "@krasin2")
addPerson("Ivan Kruglov", "ivan.kruglov@yahoo.com")
addPerson("Ivan Kutuzov", "arbrix@gmail.com")
addPerson("Ivan Kutuzov", "arbrix@gmail.com", "@arbrix")
addPerson("Ivan Markin", "sw@nogoegst.net")
addPerson("Ivan Markin", "twim@riseup.net")
addPerson("Ivan Moscoso", "moscoso@gmail.com", "@ivan3bx")
addPerson("Ivan Sharavuev", "shpiwan@gmail.com", "@Shiwin")
addPerson("Ivan Ukhov", "ivan.ukhov@gmail.com", "@IvanUkhov")
addPerson("Ivy Evans", "ivy@ivyevans.net", "@ivy")
addPerson("J. Mroz", "nalik.nal@gmail.com")
addPerson("Jay Conrod", "jayconrod@google.com", "@jayconrod")
addPerson("JBD (DO NOT USE)", "jbd@golang.org", "10107@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("JBD", "jbd@google.com", "5040@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("JP Sugarbroad", "jpsugar@google.com", "@taralx")
addPerson("JT Olds", "hello@jtolds.com", "@jtolds")
addPerson("Jaana Burcu Dogan", "jbd@google.com", "jbd@golang.org", "@rakyll")
addPerson("Jack Christensen", "jack@jackchristensen.com")
addPerson("Jack Lindamood", "jlindamo@justin.tv", "@cep21")
addPerson("Jack Parkinson", "jdparkinson93@gmail.com")
addPerson("Jack", "jackxbritton@gmail.com", "@jackxbritton")
addPerson("Jackson Owens", "jackson_owens@alumni.brown.edu")
addPerson("Jacob H. Haven", "jacob@cloudflare.com")
addPerson("Jacob H. Haven", "jacob@jhaven.me", "@jacobhaven")
addPerson("Jacob Haven", "jacob@cloudflare.com", "5346@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jacob Hoffman-Andrews", "github@hoffman-andrews.com", "10927@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jacob Hoffman-Andrews", "github@hoffman-andrews.com", "@jsha")
addPerson("Jacob Kobernik", "jkobernik@gmail.com")
addPerson("Jacob Marble", "jacobmarble@google.com")
addPerson("Jacob Walker", "jacobwalker0814@gmail.com")
addPerson("Jade Auer", "jda@tapodi.net")
addPerson("Jae Kwon", "jae@tendermint.com", "@jaekwon")
addPerson("Jaime Geiger", "jaime@grimm-co.com")
addPerson("Jake B", "doogie1012@gmail.com")
addPerson("Jake B", "doogie1012@gmail.com", "@silbinarywolf")
addPerson("Jake Burkhead", "jake.b@socialcodeinc.com")
addPerson("Jakob Borg", "jakob@nym.se", "@calmh")
addPerson("Jakob Weisblat", "jakobw@mit.edu", "@jakob223")
addPerson("Jakub Katarzynski", "kkatarzynski@gmail.com")
addPerson("Jakub Ryszard Czarnowicz", "j.czarnowicz@gmail.com", "@Naranim")
addPerson("Jakub Čajka", "jcajka@redhat.com", "11002@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jakub Čajka", "jcajka@redhat.com", "@jcajka")
addPerson("James Abley", "james.abley@gmail.com")
addPerson("James Bardin", "j.bardin@gmail.com", "@jbardin")
addPerson("James Chacon", "jchacon@google.com")
addPerson("James Clarke", "jrtc27@jrtc27.com", "15676@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Clarke", "jrtc27@jrtc27.com", "@jrtc27")
addPerson("James Cowgill", "James.Cowgill@imgtec.com")
addPerson("James Cowgill", "james.cowgill@mips.com", "17679@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Cowgill", "james.cowgill@mips.com", "@jcowgill")
addPerson("James Craig Burley", "james-github@burleyarch.com", "@jcburley")
addPerson("James F. Carter", "jfc.org.uk@gmail.com")
addPerson("James Fysh", "james.fysh@gmail.com", "@JamesFysh")
addPerson("James Gray", "james@james4k.com", "@james4k")
addPerson("James Greenhill", "fuziontech@gmail.com")
addPerson("James Hall", "james.hall@shopify.com")
addPerson("James Hartig", "fastest963@gmail.com", "17920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Hartig", "fastest963@gmail.com", "@fastest963")
addPerson("James Lawrence", "jljatone@gmail.com", "@james-lawrence")
addPerson("James Munnelly", "james@munnelly.eu")
addPerson("James Myers", "jfmyers9@gmail.com", "@jfmyers9")
addPerson("James Neve", "jamesoneve@gmail.com", "@jamesneve")
addPerson("James Robinson", "jamesr@google.com", "@jamesr")
addPerson("James Schofield", "james@shoeboxapp.com", "@jamesshoebox")
addPerson("James Smith", "jrs1995@icloud.com", "@jimmysmith95")
addPerson("James Sweet", "james.sweet88@googlemail.com", "@Omegaice")
addPerson("James Toy", "nil@opensesame.st", "@jamestoy")
addPerson("James Treanor", "jtreanor3@gmail.com")
addPerson("James Tucker", "raggi@google.com", "@raggi")
addPerson("James Whitehead", "jnwhiteh@gmail.com", "@jnwhiteh")
addPerson("Jamie Barnett", "jamiebarnett1992@gmail.com")
addPerson("Jamie Beverly", "jamie.r.beverly@gmail.com", "@jbeverly")
addPerson("Jamie Hall", "jamiehall@google.com")
addPerson("Jamie Kerr", "jkerr113@googlemail.com")
addPerson("Jamie Liu", "jamieliu@google.com", "@nixprime")
addPerson("Jamie Stackhouse", "contin673@gmail.com", "@itsjamie")
addPerson("Jamie Wilkinson", "jaq@spacepants.org", "@jaqx0r")
addPerson("Jamil Djadala", "djadala@gmail.com", "@djadala")
addPerson("Jan Berktold", "jan@berktold.co")
addPerson("Jan Berktold", "jan@berktold.co", "@JanBerktold")
addPerson("Jan H. Hosang", "jan.hosang@gmail.com", "@hosang")
addPerson("Jan Kratochvil", "jan.kratochvil@redhat.com", "@jankratochvil")
addPerson("Jan Lehnardt", "jan@apache.org", "@janl")
addPerson("Jan Mercl", "0xjnml@gmail.com", "5295@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jan Mercl", "0xjnml@gmail.com", "@cznic")
addPerson("Jan Mercl", "befelemepeseveze@gmail.com", "@bflm")
addPerson("Jan Pilzer", "jan.pilzer@gmx.de")
addPerson("Jan Ziak", "0xe2.0x9a.0x9b@gmail.com", "@atomsymbol")
addPerson("Janne Snabb", "snabb@epipe.com", "@snabb")
addPerson("Jason A. Donenfeld", "jason@zx2c4.com", "@zx2c4")
addPerson("Jason Barnett", "jason.w.barnett@gmail.com", "@jasonwbarnett")
addPerson("Jason Buberel", "jbuberel@google.com", "8445@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jason Buberel", "jbuberel@google.com", "jason@buberel.org", "@jbuberel")
addPerson("Jason Chu", "jasonchujc@gmail.com", "@1lann")
addPerson("Jason Cwik", "jason@cwik.org")
addPerson("Jason Del Ponte", "delpontej@gmail.com", "@jasdel")
addPerson("Jason Donenfeld", "jason.donenfeld@gmail.com", "20556@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jason E. Aten", "j.e.aten@gmail.com")
addPerson("Jason Hall", "imjasonh@gmail.com")
addPerson("Jason Hall", "jasonhall@google.com")
addPerson("Jason Hewes", "jasonhewes5@gmail.com")
addPerson("Jason Keene", "jasonkeene@gmail.com")
addPerson("Jason Keene", "jasonkeene@gmail.com", "@jasonkeene")
addPerson("Jason LeBrun", "jblebrun@gmail.com", "@jblebrun")
addPerson("Jason McVetta", "jason.mcvetta@gmail.com", "@jmcvetta")
addPerson("Jason Murray", "jason@chaosaffe.io")
addPerson("Jason Smale", "jsmale@zendesk.com")
addPerson("Jason Travis", "infomaniac7@gmail.com", "@corburn")
addPerson("Jason Wangsadinata", "jwangsadinata@gmail.com", "@jwangsadinata")
addPerson("Jason Wilder", "mail@jasonwilder.com")
addPerson("Javier Kohen", "jkohen@google.com", "@jkohen")
addPerson("Javier Segura", "javism@gmail.com", "@jsegura")
addPerson("Jay Conrod", "jayconrod@google.com", "17092@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jay Conrod", "jayconrod@google.com", "@jayconrod")
addPerson("Jay Satiro", "raysatiro@yahoo.com")
addPerson("Jay Stramel", "js@ionactual.com")
addPerson("Jay Weisskopf", "jay@jayschwa.net", "@jayschwa")
addPerson("Jayabaskar Rajagopal", "jayabaskar.rajagopal@gmail.com")
addPerson("Jean de Klerk", "deklerk@google.com", "26615@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jean de Klerk", "deklerk@google.com", "@jadekler")
addPerson("Jean de Klerk", "jadekler@gmail.com")
addPerson("Jean-André Santoni", "jean.andre.santoni@gmail.com")
addPerson("Jean-Francois Cantin", "jfcantin@gmail.com", "@jfcantin")
addPerson("Jean-Marc Eurin", "jmeurin@google.com", "@jmeurin")
addPerson("Jean-Nicolas Moal", "jn.moal@gmail.com", "@jnmoal")
addPerson("Jed Denlea", "jed@fastly.com", "5550@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jed Denlea", "jed@fastly.com", "@jeddenlea")
addPerson("Jeet Parekh", "jeetparekh96@gmail.com", "24716@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeet Parekh", "jeetparekh96@gmail.com", "@jeet-parekh")
addPerson("Jeevanandam M", "jeeva@myjeeva.com")
addPerson("Jeff (Zhefu) Jiang", "jeffjiang@google.com")
addPerson("Jeff Buchbinder", "jeff@ourexchange.net")
addPerson("Jeff Craig", "jeffcraig@google.com", "@foxxtrot")
addPerson("Jeff Dupont", "jeff.dupont@gmail.com", "@jeffdupont")
addPerson("Jeff Grafton", "jgrafton@google.com")
addPerson("Jeff Hodges", "jeff@somethingsimilar.com", "@jmhodges")
addPerson("Jeff Johnson", "jrjohnson@google.com", "16958@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeff Johnson", "jrjohnson@google.com", "@johnsonj")
addPerson("Jeff R. Allen", "jra@nella.org", "5646@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeff R. Allen", "jra@nella.org", "@jeffallen")
addPerson("Jeff Sickel", "jas@corpus-callosum.com", "@vat9")
addPerson("Jeff Wendling", "jeff@spacemonkey.com", "@zeebo")
addPerson("Jeff Williams", "jefesaurus@google.com")
addPerson("Jeff", "jeffreyh192@gmail.com", "@jeffizhungry")
addPerson("Jeffrey Yong", "jeffreyyong10@gmail.com")
addPerson("Jelte Fennema", "github-tech@jeltef.nl", "@JelteF")
addPerson("Jens Frederich", "jfrederich@gmail.com", "@frederich")
addPerson("Jeremiah Harmsen", "jeremiah@google.com", "@jharmsen")
addPerson("Jeremy Baumont", "jeremy.baumont@gmail.com")
addPerson("Jeremy Jackins", "jeremyjackins@gmail.com", "5300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeremy Jackins", "jeremyjackins@gmail.com", "@jnjackins")
addPerson("Jeremy Jay", "jeremy@pbnjay.com")
addPerson("Jeremy Jay", "jeremy@pbnjay.com", "@pbnjay")
addPerson("Jeremy Loy", "jeremy.b.loy@icloud.com")
addPerson("Jeremy Schlatter", "jeremy.schlatter@gmail.com", "@jeremyschlatter")
addPerson("Jeremy", "jcanady@gmail.com")
addPerson("Jeroen Bobbeldijk", "jerbob92@gmail.com", "@jerbob92")
addPerson("Jerrin Shaji George", "jerrinsg@gmail.com", "@jerrinsg")
addPerson("Jess Frazelle", "acidburn@google.com", "@jessfraz")
addPerson("Jess Frazelle", "me@jessfraz.com", "@jessfraz")
addPerson("Jesse Szwedko", "jesse.szwedko@gmail.com", "@jszwedko")
addPerson("Jessie Frazelle", "me@jessfraz.com", "6071@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jesús Espino", "jespinog@gmail.com")
addPerson("Jian Zhen", "zhenjl@gmail.com")
addPerson("Jianing Yu", "jnyu@google.com")
addPerson("Jianqiao Li", "jianqiaoli@google.com")
addPerson("Jianqiao Li", "jianqiaoli@jianqiaoli.svl.corp.google.com")
addPerson("Jihyun Yu", "yjh0502@gmail.com", "@yjh0502")
addPerson("Jille Timmermans", "quis@google.com")
addPerson("Jim Cote", "jfcote87@gmail.com", "5320@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jim Cote", "jfcote87@gmail.com", "@jfcote87")
addPerson("Jim Kingdon", "jim@bolt.me", "@jkingdon")
addPerson("Jim McGrath", "jimmc2@gmail.com", "@mcgoo")
addPerson("Jim Minter", "jminter@redhat.com")
addPerson("Jim Myers", "jfmyers9@gmail.com", "16855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jimmy Zelinskie", "jimmyzelinskie@gmail.com", "@jzelinskie")
addPerson("Jin-wook Jeong", "jeweljar@hanmail.net", "@jeweljar")
addPerson("Jingcheng Zhang", "diogin@gmail.com", "@diogin")
addPerson("Jingguo Yao", "yaojingguo@gmail.com", "@yaojingguo")
addPerson("Jiong Du", "londevil@gmail.com", "@lodevil")
addPerson("Jirka Daněk", "dnk@mail.muni.cz", "@jirkadanek")
addPerson("Jiulong Wang", "jiulongw@gmail.com")
addPerson("Jizhong Jiang", "jiangjizhong@gmail.com")
addPerson("Joakim Sernbrant", "serbaut@gmail.com", "@serbaut")
addPerson("Joe Cortopassi", "joe@joecortopassi.com", "@JoeCortopassi")
addPerson("Joe Farrell", "joe2farrell@gmail.com", "@joe2far")
addPerson("Joe Harrison", "joehazzers@gmail.com", "@sigwinch28")
addPerson("Joe Henke", "joed.henke@gmail.com", "@jdhenke")
addPerson("Joe Kyo", "xunianzu@gmail.com", "21935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Kyo", "xunianzu@gmail.com", "@joekyo")
addPerson("Joe Poirier", "jdpoirier@gmail.com", "@jpoirier")
addPerson("Joe Richey", "joerichey@google.com", "17411@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Shaw", "joe@joeshaw.org")
addPerson("Joe Shaw", "joe@joeshaw.org", "5185@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Shaw", "joe@joeshaw.org", "@joeshaw")
addPerson("Joe Sylve", "joe.sylve@gmail.com", "11851@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Sylve", "joe.sylve@gmail.com", "@jtsylve")
addPerson("Joe Tsai", "joetsai@google.com", "joetsai@digital-static.net", "thebrokentoaster@gmail.com", "@dsnet")
addPerson("Joe Tsai", "joetsai@digital-static.net", "8495@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Tsai", "joetsai@google.com", "12850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Tsai", "thebrokentoaster@gmail.com", "9735@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joel Sing", "joel@sing.id.au", "13640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joel Sing", "joel@sing.id.au", "jsing@google.com", "@4a6f656c")
addPerson("Joel Sing", "jsing@google.com", "5770@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Brandhorst", "johan.brandhorst@gmail.com", "16585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Brandhorst", "johan.brandhorst@gmail.com", "@johanbrandhorst")
addPerson("Johan Brandhorst", "johan@cognitivelogic.com")
addPerson("Johan Brandhorst", "johan@infosum.com")
addPerson("Johan Euphrosine", "proppy@google.com", "5480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Euphrosine", "proppy@google.com", "@proppy")
addPerson("Johan Sageryd", "j@1616.se", "@jsageryd")
addPerson("Johan Schuijt-Li", "johan@300.nl")
addPerson("Johanna Mantilla Duque", "johanna1431@gmail.com")
addPerson("Johannes Ebke", "johannes@ebke.org")
addPerson("John Asmuth", "jasmuth@gmail.com", "jasmuth@google.com", "@skelterjohn")
addPerson("John Beisley", "huin@google.com", "@huin-google")
addPerson("John Dethridge", "jcd@golang.org", "5515@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("John Dethridge", "jcd@golang.org", "@jcd2")
addPerson("John Eikenberry", "jae@zhar.net")
addPerson("John Gibb", "johngibb@gmail.com", "@johngibb")
addPerson("John Howard Palevich", "jack.palevich@gmail.com", "@jackpal")
addPerson("John Jeffery", "jjeffery@sp.com.au", "@jjeffery")
addPerson("John Leidegren", "john.leidegren@gmail.com")
addPerson("John Paul Adrian Glaubitz", "glaubitz@physik.fu-berlin.de")
addPerson("John Potocny", "johnp@vividcortex.com", "@potocnyj")
addPerson("John R. Lenton", "jlenton@gmail.com", "@chipaca")
addPerson("John Schnake", "schnake.john@gmail.com", "@johnSchnake")
addPerson("John ShaggyTwoDope Jenkins", "twodopeshaggy@gmail.com", "@shaggytwodope")
addPerson("John Shahid", "jvshahid@gmail.com", "@jvshahid")
addPerson("John Starks", "jostarks@microsoft.com")
addPerson("John Tuley", "john@tuley.org", "@jmtuley")
addPerson("JohnCGriffin", "griffinish@gmail.com")
addPerson("Johnny Boursiquot", "jboursiquot@gmail.com")
addPerson("Johnny Luo", "johnnyluo1980@gmail.com", "19155@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johnny Luo", "johnnyluo1980@gmail.com", "@johnnyluo")
addPerson("Jon Chen", "jchen@justin.tv", "@bsdlp")
addPerson("Jon Jenkins", "invultussolis@gmail.com")
addPerson("Jon Jenkins", "jon@mj12.su")
addPerson("Jonathan Amsterdam", "jba@google.com", "14570@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jonathan Amsterdam", "jba@google.com", "@jba")
addPerson("Jonathan Anderson", "jonathan.anderson@mun.ca")
addPerson("Jonathan Boulle", "jonathanboulle@gmail.com", "@jonboulle")
addPerson("Jonathan Chen", "dijonkitchen@users.noreply.github.com", "@dijonkitchen")
addPerson("Jonathan Doklovic", "doklovic@atlassian.com")
addPerson("Jonathan ES Lin", "ernsheong@gmail.com")
addPerson("Jonathan Feinberg", "feinberg@google.com", "@google-feinberg")
addPerson("Jonathan Hseu", "jhseu@google.com", "@jhseu")
addPerson("Jonathan Lloyd", "j.lloyd.email@gmail.com")
addPerson("Jonathan Mark", "jhmark@xenops.com", "@jhmark")
addPerson("Jonathan Mayer", "jonmayer@google.com")
addPerson("Jonathan Nieder", "jrn@google.com", "@jrn")
addPerson("Jonathan Pentecost", "pentecostjonathan@gmail.com")
addPerson("Jonathan Pittman", "jmpittman@google.com", "@jonathanpittman")
addPerson("Jonathan Rudenberg", "jonathan@titanous.com", "5431@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jonathan Rudenberg", "jonathan@titanous.com", "@titanous")
addPerson("Jonathan Turner", "jt@jtnet.co.uk")
addPerson("Jonathan Wills", "runningwild@gmail.com", "@runningwild")
addPerson("Jongmin Kim", "atomaths@gmail.com", "@atomaths")
addPerson("Jongmin Kim", "jmkim@pukyong.ac.kr", "@jmkim")
addPerson("Jono Gould", "jono.gould@gmail.com")
addPerson("Joonas Kuorilehto", "joneskoo@derbian.fi", "14770@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joonas Kuorilehto", "joneskoo@derbian.fi", "@joneskoo")
addPerson("Joop Kiefte", "joop@kiefte.net", "@LaPingvino")
addPerson("Jordan Lewis", "jordanthelewis@gmail.com", "@jordanlewis")
addPerson("Jordan Liggitt", "jliggitt@redhat.com")
addPerson("Jordan Rhee", "jordanrh@microsoft.com", "28473@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jordan Rhee", "jordanrh@microsoft.com", "@jordanrh1")
addPerson("Jos Visser", "josv@google.com", "@gosbisser")
addPerson("Jose Luis Vázquez González", "josvazg@gmail.com", "@josvazg")
addPerson("Joseph Herlant", "herlantj@gmail.com")
addPerson("Joseph Holsten", "joseph@josephholsten.com", "@josephholsten")
addPerson("Joseph Poirier", "jdpoirier@gmail.com")
addPerson("Joseph Richey", "joerichey@google.com", "@josephlr")
addPerson("Joseph Spurrier", "code@josephspurrier.com")
addPerson("Josh Bleecher Snyder", "josharian@gmail.com", "5143@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Josh Bleecher Snyder", "josharian@gmail.com", "@josharian")
addPerson("Josh Chorlton", "jchorlton@gmail.com", "@jchorl")
addPerson("Josh Deprez", "josh.deprez@gmail.com", "@DrJosh9000")
addPerson("Josh Goebel", "dreamer3@gmail.com", "@yyyc514")
addPerson("Josh Hoak", "jhoak@google.com", "@Kashomon")
addPerson("Josh Lubawy", "jlubawy@gmail.com")
addPerson("Josh Roppo", "joshroppo@gmail.com", "@Ropes")
addPerson("Josh Varga", "josh.varga@gmail.com")
addPerson("Joshua Blakeley", "jtblakeley@gmail.com", "27898@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joshua Boelter", "joshua.boelter@intel.com")
addPerson("Joshua Boelter", "joshua.boelter@intel.com", "@duckized")
addPerson("Joshua Humphries", "jhumphries131@gmail.com")
addPerson("Joshua Rubin", "joshua@rubixconsulting.com", "@joshuarubin")
addPerson("Joshua T Corbin", "joshua@uber.com")
addPerson("Josselin Costanzi", "josselin@costanzi.fr")
addPerson("Josselin Costanzi", "josselin@costanzi.fr", "16720@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Josselin Costanzi", "josselin@costanzi.fr", "@josselin-c")
addPerson("Jostein Stuhaug", "js@solidsystem.no")
addPerson("José Carlos Nieto", "jose.carlos@menteslibres.net")
addPerson("João Lucas Melo Brasio", "jaumlucas@gmail.com")
addPerson("Joël Stemmer", "jstemmer@google.com", "@jstemmer")
addPerson("Joël Stemmer", "stemmertech@gmail.com", "@jstemmer")
addPerson("Juan Carlos", "juanjcsr@gmail.com", "@juanjcsr")
addPerson("JuciÊ Dias Andrade", "ojucie@gmail.com")
addPerson("Jude Pereira", "judebpereira@gmail.com")
addPerson("Jukka-Pekka Kekkonen", "karatepekka@gmail.com", "@madari")
addPerson("Julia Hansbrough", "flowerhack@google.com", "@flowerhack")
addPerson("Julian Kornberger", "jk+github@digineo.de", "@corny")
addPerson("Julian Pastarmov", "pastarmovj@google.com")
addPerson("Julian Phillips", "julian@quantumfyre.co.uk", "@qur")
addPerson("Julie Qiu", "julieyeqiu@gmail.com", "@julieqiu", "julieqiu@google.com", "julie@golang.org")
addPerson("Julien Kauffmann", "julien.kauffmann@freelan.org")
addPerson("Julien Salleyron", "julien.salleyron@gmail.com", "@Juliens")
addPerson("Julien Schmidt", "google@julienschmidt.com", "@julienschmidt")
addPerson("Julio Montes", "julio.montes@intel.com", "@devimc")
addPerson("Junda Liu", "junda@celer.network")
addPerson("Jungho Ahn", "jhahn@google.com", "@jhahn21")
addPerson("Junya Hayashi", "ledmonster@gmail.com")
addPerson("Juraj Sukop", "sukop@users.noreply.github.com", "@sukop")
addPerson("Jure Ham", "jure.ham@zemanta.com", "@hamaxx")
addPerson("Jurgen De Commer", "jurgen.decommer@gmail.com")
addPerson("Justin Gracenin", "jgracenin@gmail.com")
addPerson("Justin Li", "git@justinli.net", "@pushrax")
addPerson("Justin Nuß", "nuss.justin@gmail.com", "5475@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Justin Nuß", "nuss.justin@gmail.com", "@nussjustin")
addPerson("Justyn Temme", "justyntemme@gmail.com", "@justyntemme")
addPerson("KB Sriram", "kbsriram@google.com", "@kbsriram")
addPerson("Kaarthik Rao Bekal Radhakrishna", "karthik.0703@gmail.com")
addPerson("Kale Blankenship", "kale@lemnisys.com", "15430@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kale Blankenship", "kale@lemnisys.com", "@vcabbage")
addPerson("Kaleb Elwert", "kelwert@atlassian.com", "@belak")
addPerson("Kamal Aboul-Hosn", "aboulhosn@google.com")
addPerson("Kamal Aboul-Hosn", "kamal.aboulhosn@gmail.com")
addPerson("Kamil Kisiel", "kamil@kamilkisiel.net", "@kisielk")
addPerson("Kamil Rytarowski", "krytarowski@users.noreply.github.com", "@krytarowski")
addPerson("Kang Hu", "hukangustc@gmail.com", "@mkhu")
addPerson("Kanitkorn S", "k.sujautra@gmail.com")
addPerson("Karan Misra", "kidoman@gmail.com")
addPerson("Karel Pazdera", "pazderak@gmail.com", "@pazderak")
addPerson("Karsten Köhler", "karsten.koehler95@gmail.com", "@SchiffFlieger")
addPerson("Karthik Karanth", "karanth.karthik@gmail.com")
addPerson("Kashav Madan", "kshvmdn@gmail.com", "@kshvmdn")
addPerson("Kasper Nilsson", "kaspern@google.com")
addPerson("Kate Manson", "kate.manson@izettle.com", "@kamanson")
addPerson("Katie Hockman", "katie@golang.org", "28759@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Katie Hockman", "katie@golang.org", "@katiehockman")
addPerson("Kato Kazuyoshi", "kato.kazuyoshi@gmail.com", "@kzys")
addPerson("Katrina Owen", "katrina.owen@gmail.com", "10395@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Katrina Owen", "katrina.owen@gmail.com", "@kytrinyx")
addPerson("Katsuya Miyachi", "kattu0426@gmail.com")
addPerson("Kaviraj", "kavirajkanagaraj@gmail.com", "@kavirajk")
addPerson("Kazuhiro Kubota", "k2.wanko@gmail.com")
addPerson("Kazuhiro Sera", "seratch@gmail.com", "@seratch")
addPerson("Keegan Carruthers-Smith", "keegan.csmith@gmail.com", "@keegancsmith")
addPerson("Kei Son", "hey.calmdown@gmail.com", "@heycalmdown")
addPerson("Keiji Yoshida", "keijiyoshida.mail@gmail.com", "@keijiyoshida")
addPerson("Keith Ball", "inflatablewoman@gmail.com")
addPerson("Keith Randall", "khr@golang.org", "5200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Keith Randall", "khr@golang.org", "khr@google.com", "keithr@alum.mit.edu", "@randall77")
addPerson("Keith Rarick", "kr@xph.us", "@kr")
addPerson("Kelsey Hightower", "kelsey.hightower@gmail.com", "5491@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kelsey Hightower", "kelsey.hightower@gmail.com", "@kelseyhightower")
addPerson("Ken Friedenbach", "kenliz@cruzio.com", "@Ken1JF")
addPerson("Ken Rockot", "ken@oz.gs", "@krockot")
addPerson("Ken Sedgwick", "ken@bonsai.com", "@ksedgwic")
addPerson("Ken Thompson", "ken@golang.org", "@ken")
addPerson("Kenji Kaneda", "kenji.kaneda@gmail.com", "@kkaneda")
addPerson("Kenji Yano", "kenji.yano@gmail.com", "@yanolab")
addPerson("Kenneth Shaw", "kenshaw@gmail.com", "@kenshaw")
addPerson("Kenny Grant", "kennygrant@gmail.com", "10235@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kenny Grant", "kennygrant@gmail.com", "@kennygrant")
addPerson("Ketan Parmar", "ketanbparmar@gmail.com")
addPerson("Ketan Parmar", "ketanbparmar@gmail.com", "@kpbird")
addPerson("Kevin Ballard", "kevin@sb.org", "@kballard")
addPerson("Kevin Burke", "kev@inburke.com", "13437@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kevin Burke", "kev@inburke.com", "@kevinburke")
addPerson("Kevin Kirsche", "kev.kirsche@gmail.com", "@kkirsche")
addPerson("Kevin Klues", "klueska@gmail.com", "@klueska")
addPerson("Kevin Lozandier", "lozandier@gmail.com")
addPerson("Kevin Malachowski", "chowski@google.com")
addPerson("Kevin Ruffin", "kruffin@gmail.com")
addPerson("Kevin Vu", "kevin.m.vu@gmail.com", "@kvu787")
addPerson("Kevin Wang", "kevin@kevinwang.com")
addPerson("Kevin Zita", "bleedgreenandgold@gmail.com", "@kzisme")
addPerson("Khramov Anton", "anton@endocode.com")
addPerson("Kieran Colford", "kieran@kcolford.com")
addPerson("Kim Shrier", "kshrier@racktopsystems.com", "@kim-racktop")
addPerson("Kim YongBin", "kybinz@gmail.com", "5154@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kir Kolyshkin", "kolyshkin@gmail.com", "@kolyshkin")
addPerson("Kirill Korotaev", "kirillx@gmail.com")
addPerson("Kirill Smelkov", "kirr@nexedi.com", "16286@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kirill Smelkov", "kirr@nexedi.com", "@navytux")
addPerson("Kirklin McDonald", "kirklin.mcdonald@gmail.com", "@KirkMcDonald")
addPerson("Klaus Post", "klauspost@gmail.com", "6545@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Klaus Post", "klauspost@gmail.com", "@klauspost")
addPerson("Koala Yeung", "koalay@gmail.com")
addPerson("Kodie", "kodiegoodwin@gmail.com")
addPerson("Koen Rouwhorst", "info@koenrouwhorst.nl")
addPerson("Koichi Shiraishi", "zchee.io@gmail.com", "10420@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Koichi Shiraishi", "zchee.io@gmail.com", "@zchee")
addPerson("Koki Ide", "niconegoto@yahoo.co.jp", "@niconegoto")
addPerson("Konstantin Shaposhnikov", "k.shaposhnikov@gmail.com", "8065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Konstantin Shaposhnikov", "k.shaposhnikov@gmail.com", "@kostya-sh")
addPerson("Kris Kwiatkowski", "kris@cloudflare.com", "27471@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kris Nova", "kris@nivenly.com", "@kris-nova")
addPerson("Kris", "krousey@google.com")
addPerson("Krish Munot", "krishmunot@gmail.com")
addPerson("Kristopher Watts", "traetox@gmail.com", "@traetox")
addPerson("Kropekk", "kamilkropiewnicki@gmail.com")
addPerson("Kun", "likunarmstrong@gmail.com", "@cnbuff410")
addPerson("Kunpei Sakai", "namusyaka@gmail.com", "23250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kunpei Sakai", "namusyaka@gmail.com", "@namusyaka")
addPerson("Kyle Consalus", "consalus@gmail.com", "@kcons")
addPerson("Kyle Isom", "kyle@gokyle.net", "@kisom")
addPerson("Kyle Jones", "kyle@kyledj.com")
addPerson("Kyle Lemons", "kyle@kylelemons.net", "@kylelemons")
addPerson("Kyle Shannon", "kyle@pobox.com", "@ksshannon")
addPerson("Kyle Spiers", "eiais@google.com")
addPerson("Kyle Wood", "kyle@kylewood.cc", "@DemonWav")
addPerson("Kyohei Kadota", "lufia@lufia.org")
addPerson("Kyrylo Silin", "silin@kyrylo.org", "@kyrylo")
addPerson("L Campbell", "unpantsu@gmail.com", "@lye")
addPerson("L. Alberto Giménez", "lagimenez@gmail.com")
addPerson("LE Manh Cuong", "cuong.manhle.vn@gmail.com", "@Gnouc")
addPerson("Lai Jiangshan", "eag0628@gmail.com", "@laijs")
addPerson("Lakshay Garg", "lakshay.garg.1996@gmail.com", "21860@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lakshay Garg", "lakshay.garg.1996@gmail.com", "@lakshayg")
addPerson("Landon Jones", "lbj@landonjones.com")
addPerson("Lann Martin", "lannm@google.com")
addPerson("Lanre Adelowo", "yo@lanre.wtf")
addPerson("Lantao Liu", "lantaol@google.com")
addPerson("Larry Clapp", "larry@theclapp.org", "@theclapp")
addPerson("Larry Hosken", "lahosken@gmail.com", "@lahosken")
addPerson("Lars Jeppesen", "jeppesen.lars@gmail.com")
addPerson("Lars Lehtonen", "lars.lehtonen@gmail.com")
addPerson("Lars Wiegman", "lars@namsral.com", "@namsral")
addPerson("Larz Conwell", "larzconwell@gmail.com", "@larzconwell")
addPerson("Laurent Voisin", "lpvoisin@gmail.com")
addPerson("Laurie Clark-Michalek", "laurie@qubit.com", "@lclarkmichalek")
addPerson("Laurynas", "LaurynasUsas@gmail.com")
addPerson("Lee Hinman", "hinman@gmail.com", "@hinman")
addPerson("Lee Packham", "lpackham@gmail.com", "@leepa")
addPerson("Lehner Florian", "dev@der-flo.net")
addPerson("Lehner Florian", "dev@der-flo.net", "@florianl")
addPerson("Leigh McCulloch", "leigh@mcchouse.com")
addPerson("Leigh McCulloch", "leighmcc@gmail.com", "21426@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Leigh McCulloch", "leighmcc@gmail.com", "@leighmcculloch")
addPerson("Leo Antunes", "leo@costela.net")
addPerson("Leo Rudberg", "ljr@google.com")
addPerson("Leon Klingele", "git@leonklingele.de", "16005@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Leon Klingele", "git@leonklingele.de", "@leonklingele")
addPerson("Leonel Quinteros", "leonel.quinteros@gmail.com")
addPerson("Letian Yi", "letian0805@gmail.com")
addPerson("Lev Shamardin", "shamardin@gmail.com", "@abbot")
addPerson("Lewin Bormann", "lbo@spheniscida.de")
addPerson("Lewin Bormann", "lewin.bormann@gmail.com", "@dermesser")
addPerson("Liam Missin", "liam.missin@gmail.com")
addPerson("Lifu Huang", "lifu.hlf@gmail.com")
addPerson("Lin Haowen", "linhaowen99@gmail.com")
addPerson("Lineu Felipe", "lineufelipe@gmail.com")
addPerson("Lion Yang", "lion@aosc.xyz", "@LionNatsu")
addPerson("Liz Rice", "liz@lizrice.com")
addPerson("Lloyd Dewolf", "foolswisdom@gmail.com", "@lloydde")
addPerson("Logan", "businesspapers@gmail.com")
addPerson("Lorenz Bauer", "lmb@cloudflare.com", "14200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lorenz Bauer", "lmb@cloudflare.com", "@lmb")
addPerson("Lorenzo Masini", "rugginoso@develer.com", "17340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lorenzo Masini", "rugginoso@develer.com", "@rugginoso")
addPerson("Lorenzo Stoakes", "lstoakes@gmail.com", "@lorenzo-stoakes")
addPerson("LotusFenn", "fenn.lotus@gmail.com")
addPerson("LotusFenn", "fenn.lotus@gmail.com", "@LotusFenn")
addPerson("Luan Santos", "cfcluan@gmail.com", "@luan")
addPerson("Lubomir I. Ivanov (VMware)", "neolit123@gmail.com", "@neolit123")
addPerson("Lubomir I. Ivanov", "neolit123@gmail.com", "26534@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Luca Bruno", "luca.bruno@coreos.com", "@lucab")
addPerson("Luca Greco", "luca.greco@alcacoop.it", "@rpl")
addPerson("Lucas Bremgartner", "lucas.bremgartner@gmail.com", "16630@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lucas Bremgartner", "lucas.bremgartner@gmail.com", "@breml")
addPerson("Lucas Clemente", "lclemente@google.com", "@lucas-clemente")
addPerson("Lucas Garron", "lgarron@chromium.org", "*goog")
addPerson("Lucas Halim", "luketheflyingcoder@gmail.com")
addPerson("Lucien Stuker", "lucien.stuker@gmail.com", "@LStuker")
addPerson("Lucio De Re", "lucio.dere@gmail.com", "@lootch")
addPerson("Ludi Rehak", "ludi317@gmail.com", "@ludi317")
addPerson("Luigi Riefolo", "luigi.riefolo@gmail.com", "@luigi-riefolo")
addPerson("Luit van Drongelen", "luit@luit.it", "@Luit")
addPerson("Luit van Drongelen", "luitvd@gmail.com", "@Luit")
addPerson("Luka Zakrajšek", "tr00.g33k@gmail.com")
addPerson("Luka", "luka@blow.sh")
addPerson("Lukasz Dobrzanski", "lukasz.m.dobrzanski@gmail.com")
addPerson("Lukasz Milewski", "lmmilewski@gmail.com", "lmilewski@google.com", "@LMMilewski")
addPerson("Luke Curley", "qpingu@gmail.com", "@kixelated")
addPerson("Luke Granger-Brown", "git@lukegb.com")
addPerson("Luna Duclos", "luna.duclos@palmstonegames.com")
addPerson("Lyle Franklin", "lylejfranklin@gmail.com", "@ljfranklin")
addPerson("Lynn Boger", "laboger@linux.vnet.ibm.com", "6320@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lynn Boger", "laboger@linux.vnet.ibm.com", "@laboger")
addPerson("Lyoness", "carmen.andoh@gmail.com")
addPerson("Maarten Bezemer", "maarten.bezemer@gmail.com")
addPerson("Maciej Dębski", "maciejd@google.com", "26521@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Maciej Galkowski", "maciejgalkowski@gmail.com")
addPerson("Maggie Nolan", "nolanmar@google.com")
addPerson("Magnus Hiie", "magnus.hiie@gmail.com")
addPerson("Mahmoud Bassiouny", "mbassiouny@google.com")
addPerson("Mak Kolybabi", "mak@kolybabi.com", "@mogigoma")
addPerson("Maksym Trykur", "maksym.trykur@gmail.com", "@mak73kur")
addPerson("Mal Curtis", "mal@mal.co.nz", "@snikch")
addPerson("Malcolm Rebughini", "malcolm.rebughini@gmail.com")
addPerson("Malhar Vora", "mlvora.2010@gmail.com")
addPerson("Manfred Touron", "m@42.am", "@moul")
addPerson("Maniacal", "mike.glenney@gmail.com")
addPerson("Manigandan Dharmalingam", "manigandan.jeff@gmail.com")
addPerson("Manish Goregaokar", "manishsmail@gmail.com", "@Manishearth")
addPerson("Mansour Rahimi", "rahimi.mnr@gmail.com", "25524@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mansour Rahimi", "rahimi.mnr@gmail.com", "@m4ns0ur")
addPerson("Manu Garg", "manugarg@gmail.com")
addPerson("Manu S Ajith", "neo@codingarena.in", "@manusajith")
addPerson("Manuel FernandezaaZ", "sourvivor@gmail.com")
addPerson("Marc Coury", "gloriphobia@gmail.com")
addPerson("Marc Lopez", "marc5.12@outlook.com")
addPerson("Marc Vandenbosch", "marc.vandenbosch@gmail.com")
addPerson("Marc-Antoine Ruel", "maruel@chromium.org", "7845@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marc-Antoine Ruel", "maruel@chromium.org", "@maruel", "*goog")
addPerson("Marcel Edmund Franke", "marcel.edmund.franke@gmail.com", "@donutloop")
addPerson("Marcel Lanz", "marcel.lanz@n-1.ch")
addPerson("Marcel van Lohuizen", "mpvl@golang.org", "5182@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marcel van Lohuizen", "mpvl@golang.org", "@mpvl")
addPerson("Marcelo Cantos", "marcelo.cantos@gmail.com")
addPerson("Marcio Feroni", "consultoria.feroni@gmail.com")
addPerson("Marco Hennings", "marco.hennings@freiheit.com", "@mhennings")
addPerson("Marco Peereboom", "marco@peereboom.us")
addPerson("Marcos Minond", "minond.marcos@gmail.com")
addPerson("Marcus Comstedt", "marcus@mc.pp.se")
addPerson("Marcus Willock", "crazcalm@gmail.com", "@crazcalm")
addPerson("Marek Polacek", "polacek@redhat.com", "@mpolacek")
addPerson("Marga Manterola", "marga@google.com", "@margamanterola")
addPerson("Marin", "marin.basic02@gmail.com", "@MarinX")
addPerson("Mario Arranz", "marioarranzr@gmail.com", "@marioarranzr")
addPerson("Marius Kittler", "mariuskittler@gmx.de")
addPerson("Marius Nuennerich", "mnu@google.com")
addPerson("Mark Adams", "mark@markadams.me", "@mark-adams")
addPerson("Mark Bucciarelli", "mkbucc@gmail.com", "@mbucc")
addPerson("Mark Harrison", "marhar@google.com")
addPerson("Mark Lee", "code0x9@gmail.com")
addPerson("Mark Pulford", "mark@kyne.com.au", "15920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mark Pulford", "mark@kyne.com.au", "@mpx")
addPerson("Mark Rushakoff", "mark.rushakoff@gmail.com", "@mark-rushakoff")
addPerson("Mark Rushakoff", "mark@influxdata.com")
addPerson("Mark Ryan", "mark.d.ryan@intel.com", "@markdryan")
addPerson("Mark Severson", "miquella@gmail.com", "11540@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mark Severson", "miquella@gmail.com", "@miquella")
addPerson("Mark Theunissen", "mark.theunissen@gmail.com", "@marktheunissen")
addPerson("Mark Wolfe", "mark@wolfe.id.au", "@wolfeidau")
addPerson("Mark Zavislak", "zavislak@google.com", "@zavislak")
addPerson("Marko Juhani Silokunnas", "marko.silokunnas@gmail.com", "@marant")
addPerson("Marko Kevac", "marko@kevac.org")
addPerson("Marko Kevac", "marko@kevac.org", "@mkevac")
addPerson("Marko Mudrinic", "mudrinic.mare@gmail.com", "@xmudrii")
addPerson("Marko Mudrinić", "mudrinic.mare@gmail.com", "17318@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marko Tiikkaja", "marko@joh.to", "5446@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marko Tiikkaja", "marko@joh.to", "@johto")
addPerson("Markus Sonderegger", "marraison@gmail.com", "@mars9")
addPerson("Markus Zimmermann", "markus.zimmermann@nethead.at")
addPerson("Markus Zimmermann", "zimmski@gmail.com", "@zimmski")
addPerson("Markus", "m.walther97@gmail.com", "@markus-wa")
addPerson("Marten Seemann", "martenseemann@gmail.com")
addPerson("Marten Seemann", "martenseemann@gmail.com", "@marten-seemann")
addPerson("Martijn Janssen", "martijn9612+github@gmail.com")
addPerson("Martin Bertschler", "mbertschler@gmail.com", "@mbertschler")
addPerson("Martin Drlik", "martadrlik@gmail.com")
addPerson("Martin Garton", "garton@gmail.com", "13346@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Garton", "garton@gmail.com", "@MartinGarton")
addPerson("Martin Garton", "garton@gmail.com", "@mjgarton")
addPerson("Martin Habbecke", "marhab@google.com")
addPerson("Martin Hamrle", "martin.hamrle@gmail.com", "@mhamrle")
addPerson("Martin Hoefling", "martin.hoefling@gmx.de")
addPerson("Martin Kreichgauer", "martinkr@google.com", "16331@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Kreichgauer", "martinkr@google.com", "@kreichgauer")
addPerson("Martin Kunc", "martinkunc@users.noreply.github.com")
addPerson("Martin Kunc", "mk@Martins-MacBook-Pro.local")
addPerson("Martin Lee", "martin@martinlee.org")
addPerson("Martin Lindhe", "martin.j.lindhe@gmail.com", "@martinlindhe")
addPerson("Martin Möhrmann", "moehrmann@google.com", "martisch@uos.de", "@martisch")
addPerson("Martin Möhrmann", "martisch@uos.de", "5846@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Möhrmann", "moehrmann@google.com", "16006@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Olsen", "github.com@martinolsen.net", "@martinolsen")
addPerson("Martin Olsson", "martin@minimum.se", "@mo")
addPerson("Martin Probst", "martin@probst.io")
addPerson("Martin Redmond", "mrtodo@gmail.com")
addPerson("Martin Sucha", "anty.sk+git@gmail.com", "@martin-sucha")
addPerson("Martin Tournoij", "martin@arp242.net")
addPerson("Martins Sipenko", "martins.sipenko@gmail.com", "@martinssipenko")
addPerson("Martynas Budriūnas", "mabu@google.com", "@mabu")
addPerson("Marvin Stenger", "marvin.stenger94@gmail.com")
addPerson("Marvin Stenger", "marvin.stenger94@gmail.com", "9850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Masa Sekimura", "sekimura@gmail.com")
addPerson("Masahiro Furudate", "masahiro.furudate@gmail.com")
addPerson("Massimiliano Ghilardi", "massimiliano.ghilardi@gmail.com")
addPerson("Mat Byczkowski", "mbyczkowski@gmail.com", "@mbyczkowski")
addPerson("Mat Evans", "matzhouse@gmail.com")
addPerson("Mat Ryer", "thatmatryer@gmail.com")
addPerson("Matej Baćo", "matejbaco@gmail.com", "@matejb")
addPerson("Mateus Amin", "mateus.amin@gmail.com")
addPerson("Mateusz Czapliński", "czapkofan@gmail.com", "10525@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mateusz Czapliński", "czapkofan@gmail.com", "@akavel")
addPerson("Mathias Beke", "git@denbeke.be", "7490@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mathias Hall-Andersen", "mathias@hall-andersen.dk")
addPerson("Mathias Leppich", "mleppich@muhqu.de", "@muhqu")
addPerson("MathiasB", "git@denbeke.be", "@DenBeke")
addPerson("Mathieu Lonjaret", "mathieu.lonjaret@gmail.com", "8466@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mats Lidell", "mats.lidell@cag.se", "@matsl")
addPerson("Matt Aimonetti", "mattaimonetti@gmail.com", "13882@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Aimonetti", "mattaimonetti@gmail.com", "@mattetti")
addPerson("Matt Blair", "me@matthewblair.net", "@mblair")
addPerson("Matt Bostock", "matt@mattbostock.com", "@mattbostock")
addPerson("Matt Dee", "mdee@hioscar.com")
addPerson("Matt Drollette", "matt@drollette.com", "@MDrollette")
addPerson("Matt DuVall", "matt@stripe.com")
addPerson("Matt Harden", "matt.harden@gmail.com")
addPerson("Matt Harden", "matt.harden@gmail.com", "8785@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Harden", "matt.harden@gmail.com", "@nerdatmath")
addPerson("Matt Jibson", "matt.jibson@gmail.com", "@mjibson")
addPerson("Matt Joiner", "anacrolix@gmail.com", "@anacrolix")
addPerson("Matt Jones", "matt@mhjones.org")
addPerson("Matt Juran", "thepciet@gmail.com", "@pciet")
addPerson("Matt Keenan", "github@mattkeenan.net", "@mattkeenan")
addPerson("Matt Layher", "mdlayher@gmail.com", "7860@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Layher", "mdlayher@gmail.com", "@mdlayher")
addPerson("Matt Proud", "matt.proud@gmail.com", "6400@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Reiferson", "mreiferson@gmail.com", "@mreiferson")
addPerson("Matt Robenolt", "matt@ydekproductions.com", "@mattrobenolt")
addPerson("Matt T. Proud", "matt.proud@gmail.com", "@matttproud")
addPerson("Matteo Croce", "matteo.croce@canonical.com")
addPerson("Matthew Allen Moltzau", "Matthew_Moltzau@comcast.com")
addPerson("Matthew Brennan", "matty.brennan@gmail.com", "@mattyb")
addPerson("Matthew Broberg", "gogetmb@gmail.com", "@mbbroberg")
addPerson("Matthew Broberg", "matthewbbroberg@gmail.com")
addPerson("Matthew Byrne", "mjw.byrne@gmail.com")
addPerson("Matthew Cottingham", "mattcottingham@gmail.com", "@mattrco")
addPerson("Matthew Dempsky", "mdempsky@google.com", "@mdempsky")
addPerson("Matthew Dempsky", "matthew@dempsky.org", "8715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Dempsky", "mdempsky@google.com", "5440@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Denton", "mdenton@skyportsystems.com", "@mdentonSkyport")
addPerson("Matthew Endsley", "mendsley@gmail.com")
addPerson("Matthew Herrmann", "mherr@google.com")
addPerson("Matthew Holt", "matthew.holt+git@gmail.com", "@mholt")
addPerson("Matthew Holt", "matthew.holt@gmail.com", "7611@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Hooker", "mwhooker@gmail.com")
addPerson("Matthew Horsnell", "matthew.horsnell@gmail.com", "@matt2909")
addPerson("Matthew LJ Smith", "matthew.lj.smith@gmail.com")
addPerson("Matthew Rudy Jacobs", "matthewrudyjacobs@gmail.com")
addPerson("Matthew Waters", "mwwaters@gmail.com", "@mwwaters")
addPerson("Matthew Whisenhunt", "matt.whisenhunt@gmail.com")
addPerson("Matthieu Hauglustaine", "matt.hauglustaine@gmail.com", "@MattHauglustaine")
addPerson("Matthieu Sarter", "matthieu.sarter.external@atos.net", "16325@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthieu Sarter", "matthieu.sarter.external@atos.net", "@MatthieuSarter")
addPerson("Matthijs Kooijman", "matthijs@stdin.nl", "@matthijskooijman")
addPerson("Max Moroz", "maxmoroz@gmail.com")
addPerson("Max Renaud", "maxrd@google.com")
addPerson("Max Riveiro", "kavu13@gmail.com", "@kavu")
addPerson("Max Schmitt", "max@schmitt.mx")
addPerson("Max Ushakov", "ushmax@gmail.com", "@ushakov")
addPerson("Maxime de Roucy", "maxime.deroucy@gmail.com", "@tchernomax")
addPerson("Maxwell Krohn", "themax@gmail.com", "@maxtaco")
addPerson("Mayank Kumar", "krmayankk@gmail.com", "@krmayankk")
addPerson("Mayank Sharma", "maksharma231@gmail.com")
addPerson("Mayank Sharma", "mayank.sharma@tokopedia.com")
addPerson("Mayank Sharma", "mayank@qlogic.io")
addPerson("Mayra Cabrera", "mcabrera1087@gmail.com")
addPerson("Mehul Choube", "mchoube@gmail.com")
addPerson("Meir Fischer", "meirfischer@gmail.com", "@meirf")
addPerson("Meng Zhuo", "mengzhuo1203@gmail.com", "@mengzhuo")
addPerson("Menghan Li", "menghanl@google.com")
addPerson("Mhd Sulhan", "m.shulhan@gmail.com", "@shuLhan")
addPerson("MiLk", "hello@emilienkenler.com", "@MiLk")
addPerson("Michael Andersen", "michael@steelcode.com")
addPerson("Michael Anthony Knyszek", "mknyszek@google.com", "@mknyszek")
addPerson("Michael Brandenburg", "mbrandenburg@bolste.com")
addPerson("Michael Chaten", "mchaten@gmail.com", "@chaten")
addPerson("Michael D Henderson", "mdhender@mdhender.com")
addPerson("Michael Darakananda", "pongad@gmail.com")
addPerson("Michael Darakananda", "pongad@google.com")
addPerson("Michael Darakananda", "pongad@google.com", "@pongad")
addPerson("Michael Darakananda", "pongad@pongad-linuxworkstation1.sea.corp.google.com")
addPerson("Michael Dorner", "mail@michaeldorner.de")
addPerson("Michael Edwards", "medwards@walledcity.ca")
addPerson("Michael Elkins", "michael.elkins@gmail.com", "@sigpipe")
addPerson("Michael Ellis", "micellis@justin.tv", "@mellis")
addPerson("Michael Folkson", "michael@riskbazaar.org")
addPerson("Michael Fraenkel", "michael.fraenkel@gmail.com", "5889@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Fraenkel", "michael.fraenkel@gmail.com", "@fraenkel")
addPerson("Michael Gehring", "mg@ebfe.org", "6715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Gehring", "mg@ebfe.org", "@ebfe")
addPerson("Michael Henderson", "mdhender@users.noreply.github.com", "@mdhender")
addPerson("Michael Hendricks", "michael@ndrix.org", "@mndrix")
addPerson("Michael Hoisie", "hoisie@gmail.com", "@hoisie")
addPerson("Michael Hudson-Doyle", "michael.hudson@canonical.com", "5153@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Hudson-Doyle", "michael.hudson@canonical.com", "@mwhudson")
addPerson("Michael Kasch", "michael.kasch@gmail.com", "@MyChaOS87")
addPerson("Michael Kuryshev", "me@mk9.name")
addPerson("Michael Käufl", "golang@c.michael-kaeufl.de", "@michael-k")
addPerson("Michael Lewis", "mikelikespie@gmail.com", "@mikelikespie")
addPerson("Michael MacInnis", "michael.p.macinnis@gmail.com", "6355@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael MacInnis", "michael.p.macinnis@gmail.com", "@michaelmacinnis")
addPerson("Michael Marineau", "michael.marineau@coreos.com", "mike@marineau.org", "@marineam")
addPerson("Michael Matloob", "matloob@golang.org", "10033@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Matloob", "matloob@golang.org", "matloob@google.com", "@matloob")
addPerson("Michael Matloob", "michaelmatloob@gmail.com", "5270@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael McConville", "momcconville@gmail.com")
addPerson("Michael McGreevy", "mcgreevy@golang.org", "@mcgreevy")
addPerson("Michael McLoughlin", "mmcloughlin@gmail.com", "@mmcloughlin")
addPerson("Michael Munday", "mike.munday@ibm.com", "11990@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Munday", "mike.munday@ibm.com", "munday@ca.ibm.com", "@mundaym")
addPerson("Michael Pearson", "mipearson@gmail.com", "@mipearson")
addPerson("Michael Pratt", "mpratt@google.com", "12120@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Pratt", "mpratt@google.com", "@prattmic")
addPerson("Michael Schaller", "michael@5challer.de", "@michael-schaller")
addPerson("Michael Schurter", "michael.schurter@gmail.com", "@schmichael")
addPerson("Michael Shields", "mshields@google.com", "@shields")
addPerson("Michael Smith", "mikejsmitty@gmail.com")
addPerson("Michael Spiegel", "michael.m.spiegel@gmail.com")
addPerson("Michael Stapelberg", "stapelberg@google.com", "8470@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Stapelberg", "stapelberg@google.com", "@stapelberg")
addPerson("Michael Steinert", "mike.steinert@gmail.com", "@msteinert")
addPerson("Michael Sterle-Contala", "mike.sterle@gmail.com")
addPerson("Michael Teichgräber", "mteichgraeber@gmx.de", "@knieriem")
addPerson("Michael Vetter", "g.bluehut@gmail.com", "@jubalh")
addPerson("Michal Bohuslávek", "mbohuslavek@gmail.com", "9715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michal Bohuslávek", "mbohuslavek@gmail.com", "@mibk")
addPerson("Michal Cierniak", "cierniak@google.com", "@cierniak")
addPerson("Michal Franc", "lam.michal.franc@gmail.com")
addPerson("Michal Pristas", "michal.pristas@gmail.com")
addPerson("Michalis Kargakis", "michaliskargakis@gmail.com")
addPerson("Michel Lespinasse", "walken@google.com", "12938@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michel Lespinasse", "walken@google.com", "@walken-google")
addPerson("Miek Gieben", "miek@miek.nl", "@miekg", "*goog")
addPerson("Miguel Molina", "hi@mvader.me", "@erizocosmico")
addPerson("Miguel Perez", "miguel250@gmail.com")
addPerson("Mihai Borobocea", "MihaiBorobocea@gmail.com")
addPerson("Mihai Todor", "todormihai@gmail.com", "@mihaitodor")
addPerson("Mike Appleby", "mike@app.leby.org", "14930@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Appleby", "mike@app.leby.org", "@appleby")
addPerson("Mike Danese", "mikedanese@google.com", "@mikedanese")
addPerson("Mike Dour", "mdour@google.com")
addPerson("Mike Graf", "mikegraf000@gmail.com")
addPerson("Mike Houston", "mike@kothar.net", "@kothar")
addPerson("Mike Kabischev", "kabischev@gmail.com")
addPerson("Mike Lloyd", "kevin.michael.lloyd@gmail.com")
addPerson("Mike Lloyd", "kevin.michael.lloyd@gmail.com", "10091@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Rosset", "mike.rosset@gmail.com", "@mrosset")
addPerson("Mike Samuel", "mikesamuel@gmail.com", "17511@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Samuel", "mikesamuel@gmail.com", "@mikesamuel")
addPerson("Mike Solomon", "msolo@gmail.com", "@msolo")
addPerson("Mike Strosaker", "strosake@us.ibm.com")
addPerson("Mike Tsao", "mike@sowbug.com")
addPerson("Mike Wiacek", "mjwiacek@google.com", "@mikewiacek")
addPerson("Mike Wiacek", "mjwiacek@mjwiacek-macbookpro.roam.corp.google.com")
addPerson("Mikhail Gusarov", "dottedmag@dottedmag.net", "@dottedmag")
addPerson("Miki Habryn", "dichro@rcpt.to")
addPerson("Miki Tebeka", "miki.tebeka@gmail.com", "@tebeka")
addPerson("Mikio Hara", "mikioh.public.networking@gmail.com", "mikioh.mikioh@gmail.com", "@mikioh", "29736@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mikkel Krautz", "mikkel@krautz.dk", "@mkrautz")
addPerson("Mikołaj Baranowski", "m.baranowski@travelaudience.com")
addPerson("Milan Knezevic", "milan.knezevic@mips.com", "@milanknezevic")
addPerson("Minaev Mike", "minaev.mike@gmail.com", "23800@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Minaev Mike", "minaev.mike@gmail.com", "@minaevmike")
addPerson("Minux Ma", "minux@golang.org", "5055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Miquel Sabaté Solà", "mikisabate@gmail.com", "@mssola")
addPerson("Miroslav Genov", "mgenov@gmail.com", "@mgenov")
addPerson("Misty De Meo", "mistydemeo@gmail.com", "@mistydemeo")
addPerson("Mitchell-Riley", "tug72074@temple.edu")
addPerson("Mithun Sasidharan", "mithunsasidharan89@gmail.com")
addPerson("Miyakawa Taku", "miyakawa.taku@gmail.com")
addPerson("Mofizur Rahman", "moficodes@gmail.com")
addPerson("Mohan Pawar", "mohanpawary1990@gmail.com")
addPerson("Mohit Agarwal", "mohit@sdf.org", "10715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mohit Agarwal", "mohit@sdf.org", "@0xmohit")
addPerson("Mohit kumar Bajoria", "mohitbajo36@gmail.com", "@mbj36")
addPerson("Momchil Velikov", "momchil.velikov@gmail.com", "@momchil-velikov")
addPerson("Monis Khan", "mkhan@redhat.com", "@enj")
addPerson("Monty Taylor", "mordred@inaugust.com", "@emonty")
addPerson("Moriyoshi Koizumi", "mozo@mozo.jp", "@moriyoshi")
addPerson("Morten Siebuhr", "sbhr@sbhr.dk", "10928@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Morten Siebuhr", "sbhr@sbhr.dk", "@msiebuhr")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se", "18980@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se", "@mostynb")
addPerson("Mrunal Patel", "mrunalp@gmail.com", "@mrunalp")
addPerson("Muhammad Falak R Wani", "falakreyaz@gmail.com", "23560@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Muhammad Falak R Wani", "falakreyaz@gmail.com", "@mfrw")
addPerson("Muhammed Uluyol", "uluyol0@gmail.com", "@uluyol")
addPerson("Mura Li", "mura_li@castech.com.tw", "10925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mura Li", "mura_li@castech.com.tw", "@typeless")
addPerson("Máximo Cuadros Ortiz", "mcuadros@gmail.com", "@mcuadros")
addPerson("NODA, Kai", "nodakai@gmail.com")
addPerson("Nan Deng", "monnand@gmail.com", "@monnand")
addPerson("Naoki INADA", "songofacandy@gmail.com", "5895@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Naoki Kanatani", "k12naoki@gmail.com", "@kanata2")
addPerson("Nate Wilkinson", "nathanwilk7@gmail.com", "@nathanwilk7")
addPerson("Nathan Cantelmo", "n.cantelmo@gmail.com", "@ncantelmo")
addPerson("Nathan Caza", "mastercactapus@gmail.com", "@mastercactapus")
addPerson("Nathan Davies", "nathanjamesdavies@gmail.com")
addPerson("Nathan John Youngman", "nj@nathany.com", "@nathany")
addPerson("Nathan Otterness", "otternes@cs.unc.edu")
addPerson("Nathan P Finch", "nate.finch@gmail.com", "@natefinch")
addPerson("Nathan VanBenschoten", "nvanbenschoten@gmail.com", "@nvanbenschoten")
addPerson("Nathan Youngman", "git@nathany.com", "@nathany")
addPerson("Nathan Youngman", "hello@nathany.com", "5235@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nathan(yinian) Hu", "nathanhu@google.com", "@nathandfox")
addPerson("Nathaniel Caza", "mastercactapus@gmail.com", "17183@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Naveen Kumar Sangi", "naveenkumarsangi@protonmail.com")
addPerson("Neelesh Chandola", "neelesh.c98@gmail.com")
addPerson("Neil Basu", "nbasu02@gmail.com")
addPerson("Neil Lyons", "nwjlyons@googlemail.com", "@nwjlyons")
addPerson("Neil Owen", "neil.anthony.owen@gmail.com")
addPerson("Nelz", "nelz9999@gmail.com")
addPerson("Nemin Sun", "sunnemin@gmail.com")
addPerson("Neven Sajko", "nsajko@gmail.com", "15048@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Neven Sajko", "nsajko@gmail.com", "@nsajko")
addPerson("Nevins Bartolomeo", "nevins.bartolomeo@gmail.com", "@nevins-b")
addPerson("Niall Sheridan", "nsheridan@gmail.com", "13755@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Niall Sheridan", "nsheridan@gmail.com", "@nsheridan")
addPerson("Nic Day", "nic.day@me.com", "@nicday")
addPerson("Nicholas Anderson", "nick@miletwo.net")
addPerson("Nicholas Katsaros", "nick@nickkatsaros.com", "@nkatsaros")
addPerson("Nicholas Maniscalco", "nicholas@maniscalco.com")
addPerson("Nicholas Maniscalco", "nicholas@maniscalco.com", "@nicholasmaniscalco")
addPerson("Nicholas Ng", "nickng@nickng.io")
addPerson("Nicholas Presta", "nick@nickpresta.ca", "@nickpresta")
addPerson("Nicholas Rawlings", "nicholasorenrawlings@gmail.com")
addPerson("Nicholas Waples", "nwaples@gmail.com", "@nwaples")
addPerson("Nick Cooper", "nmvc@google.com")
addPerson("Nick Cooper", "nmvc@google.com", "5776@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Craig-Wood", "nick@craig-wood.com", "@ncw")
addPerson("Nick Craig-Wood", "nickcw@gmail.com", "5175@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Harper", "nharper@google.com")
addPerson("Nick Kubala", "nkubala@google.com", "@nkubala")
addPerson("Nick McCrory", "nickmhc14@gmail.com")
addPerson("Nick Miyake", "nmiyake@gmail.com")
addPerson("Nick Patavalis", "nick.patavalis@gmail.com", "9880@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Patavalis", "nick.patavalis@gmail.com", "@npat-efault")
addPerson("Nick Petroni", "npetroni@cs.umd.edu")
addPerson("Nick Sullivan", "nicholas.sullivan@gmail.com", "@grittygrease")
addPerson("Nickolay Turpitko", "nikolay@turpitko.com", "7015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nicolas BRULEZ", "n.brulez@gmail.com", "@N-Bz")
addPerson("Nicolas S. Dade", "nic.dade@gmail.com", "@nsd20463")
addPerson("Niek Sanders", "niek.sanders@gmail.com", "19925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Niek Sanders", "niek.sanders@gmail.com", "@nieksand")
addPerson("Nigel Kerr", "nigel.kerr@gmail.com", "@nigelkerr")
addPerson("Nigel Tao", "nigeltao@golang.org", "5899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nigel Tao", "nigeltao@golang.org", "@nigeltao")
addPerson("Nik Nyby", "nnyby@columbia.edu", "@nikolas")
addPerson("Nikhil Benesch", "nikhil.benesch@gmail.com", "25418@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nikhil Benesch", "nikhil.benesch@gmail.com", "@benesch")
addPerson("Nikhita Raghunath", "nikitaraghunath@gmail.com")
addPerson("Niklas Lindblad", "niklas@lindblad.info")
addPerson("Niklas Schnelle", "niklas.schnelle@gmail.com", "@niklas88")
addPerson("Niko Dziemba", "niko@dziemba.com", "@dziemba")
addPerson("Nikolay Ponomarev", "itsnikolay@gmail.com")
addPerson("Nikolay Turpitko", "nikolay@turpitko.com", "@nikolay-turpitko")
addPerson("Nilesh Jagnik", "nileshj@google.com")
addPerson("Nils Larsgård", "nilsmagnus@gmail.com", "@nilsmagnus")
addPerson("Nir Soffer", "nirsof@gmail.com")
addPerson("Nishanth Shanmugham", "nishanth.gerrard@gmail.com")
addPerson("Nitin Patil", "patil16nit@gmail.com")
addPerson("Noah Campbell", "noahcampbell@gmail.com")
addPerson("Noble Johnson", "noblepoly@gmail.com")
addPerson("Nodir Turakulov", "nodir@google.com", "7877@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nodir Turakulov", "nodir@google.com", "@nodirt")
addPerson("Noel Georgi", "git@frezbo.com")
addPerson("Norberto Lopes", "nlopes.ml@gmail.com", "@nlopes")
addPerson("Nyah Check", "check.nyah@gmail.com", "22747@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("ObesePenguin", "srburnham@gmail.com")
addPerson("Odin Ugedal", "odin@ugedal.com", "@odinuge")
addPerson("Oleg Bulatov", "dmage@yandex-team.ru", "@dmage")
addPerson("Oleg Bulatov", "oleg@bulatov.me", "@dmage")
addPerson("Oleg Vakheta", "helginet@gmail.com", "@helginet")
addPerson("Oleku Konko", "oleku.konko@gmail.com", "@olekukonko")
addPerson("OlgaVlPetrova", "ovppetrova@gmail.com", "@OlgaVlPetrova")
addPerson("Oling Cat", "olingcat@gmail.com", "5136@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Oling Cat", "olingcat@gmail.com", "@OlingCat")
addPerson("Oliver Hookins", "ohookins@gmail.com", "@ohookins")
addPerson("Oliver Skånberg-Tippen", "oliverskanbergtippen@gmail.com")
addPerson("Oliver Stenbom", "ostenbom@pivotal.io", "@ostenbom")
addPerson("Oliver Tonnhofer", "olt@bogosoft.com", "@olt")
addPerson("Olivier Duperray", "duperray.olivier@gmail.com", "@dupoxy")
addPerson("Olivier Mengué", "olivier.mengue@gmail.com")
addPerson("Olivier Poitrey", "rs@netflix.com", "rs@dailymotion.com", "rs@rhapsodyk.net", "10610@62eb7196-b449-3ce5-99f1-c037f21e1705", "@rs")
addPerson("Olivier Saingre", "osaingre@gmail.com", "@osaingre")
addPerson("Olivier", "desylva@gmail.com")
addPerson("Omar Jarjur", "ojarjur@google.com")
addPerson("OneOfOne", "oneofone@gmail.com", "@OneOfOne")
addPerson("Oryan Moshe", "iamoryanmoshe@gmail.com", "28422@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Oryan Moshe", "iamoryanmoshe@gmail.com", "@oryanmoshe")
addPerson("Osamu TONOMORI", "osamingo@gmail.com")
addPerson("Osamu TONOMORI", "osamingo@gmail.com", "@osamingo")
addPerson("Oscar Forner Martinez", "oscar.forner.martinez@gmail.com")
addPerson("Otto Giron", "ottog2486@gmail.com")
addPerson("Owen Marshall", "om@om.ht")
addPerson("Pablo Lalloni", "plalloni@gmail.com", "@lalloni")
addPerson("Pablo Santiago Blum de Aguiar", "scorphus@gmail.com", "@scorphus")
addPerson("Paddy Foran", "paddy@secondbit.org", "@paddyforan")
addPerson("Paddy Steed", "jarktasaa@gmail.com")
addPerson("Padraig Kitterick", "padraigkitterick@gmail.com", "@padraigkitterick")
addPerson("Panagiotis Xynos", "panagiotis.xinos@gmail.com")
addPerson("Paolo Giarrusso", "p.giarrusso@gmail.com", "@Blaisorblade")
addPerson("Paolo Martini", "mrtnpaolo@gmail.com", "@ear")
addPerson("Parker Moore", "parkrmoore@gmail.com", "6501@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Parker Moore", "parkrmoore@gmail.com", "@parkr")
addPerson("Pascal Corpet", "lascap@google.com")
addPerson("Pascal Muetschard", "pmuetschard@google.com")
addPerson("Pascal S. de Kloe", "pascal@quies.net", "@pascaldekloe")
addPerson("Pascal de Kloe", "pascal@quies.net", "8310@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Pat Moroney", "pat@pat.email", "@pmoroney")
addPerson("Patrick Bennett", "patrick@thebennetts.com")
addPerson("Patrick Edgett", "pedgett@gmail.com")
addPerson("Patrick Gavlin", "pgavlin@gmail.com", "@pgavlin")
addPerson("Patrick Higgins", "patrick.allen.higgins@gmail.com", "@patrick-higgins")
addPerson("Patrick Lee", "pattyshack101@gmail.com", "@pattyshack")
addPerson("Patrick Mezard", "patrick@mezard.eu", "@pmezard")
addPerson("Patrick Mylund Nielsen", "patrick@patrickmn.com", "@patrickmn")
addPerson("Patrick Mézard", "patrick@mezard.eu", "7915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Patrick Pelletier", "pp.pelletier@gmail.com", "@skinp")
addPerson("Patrick Riley", "pfr@google.com", "@pfrstg")
addPerson("Patrick Smith", "pat42smith@gmail.com", "@pat42smith")
addPerson("Patrick Uiterwijk", "patrick@puiterwijk.org")
addPerson("Patrik Nyblom", "pnyb@google.com")
addPerson("Paul A Querna", "paul.querna@gmail.com", "@pquerna")
addPerson("Paul Borman", "borman@google.com", "@pborman")
addPerson("Paul Boyd", "boyd.paul2@gmail.com", "@pboyd")
addPerson("Paul Chang", "paulchang@google.com", "@pchx")
addPerson("Paul Gier", "pgier@redhat.com")
addPerson("Paul Hankin", "paulhankin@google.com")
addPerson("Paul Jolly", "paul@myitcv.org.uk", "@myitcv")
addPerson("Paul Jolly", "paul@myitcv.io", "@myitcv")
addPerson("Paul Jolly", "paul@myitcv.org.uk", "16375@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Lalonde", "paul.a.lalonde@gmail.com", "@paul-lalonde")
addPerson("Paul M Furley", "paul@paulfurley.com")
addPerson("Paul Marks", "pmarks@google.com", "6050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Marks", "pmarks@google.com", "@pmarks-net")
addPerson("Paul Meyer", "paul.meyer@microsoft.com", "@paulmey")
addPerson("Paul Nasrat", "pnasrat@google.com")
addPerson("Paul PISCUC", "paul.piscuc@gmail.com", "@ppiscuc")
addPerson("Paul Querna", "pquerna@apache.org", "@pquerna", "paul@querna.org", "14273@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Rosania", "paul.rosania@gmail.com", "@paulrosania")
addPerson("Paul Sbarra", "sbarra.paul@gmail.com", "@tones111")
addPerson("Paul Smith", "paulsmith@pobox.com", "@paulsmith")
addPerson("Paul Tyng", "paul@paultyng.net")
addPerson("Paul Tyng", "ptyng@underarmour.com")
addPerson("Paul Wankadia", "junyer@google.com", "@junyer")
addPerson("Paul van Brouwershaven", "paul@vanbrouwershaven.com", "5920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul van Brouwershaven", "paul@vanbrouwershaven.com", "@vanbroup")
addPerson("Paulo Casaretto", "pcasaretto@gmail.com", "@pcasaretto")
addPerson("Paulo Flabiano Smorigo", "pfsmorigo@linux.vnet.ibm.com")
addPerson("Paulo Flabiano Smorigo", "pfsmorigo@linux.vnet.ibm.com", "@pfsmorigo")
addPerson("Pavlo Sumkin", "psumkin@mirantis.com")
addPerson("Pavlo Sumkin", "ymkins@gmail.com")
addPerson("Pawel Knap", "pawelknap88@gmail.com", "@ppknap")
addPerson("Pawel Pisarzewski", "morris@morris.io")
addPerson("Peng Gao", "peng.gao.dut@gmail.com")
addPerson("Percy Wegmann", "ox.to.a.cart@gmail.com", "@oxtoacart")
addPerson("Perry Abbott", "perry.j.abbott@gmail.com", "@pabbott0")
addPerson("Petar Maymounkov", "petarm@gmail.com", "@petar")
addPerson("Peter Armitage", "peter.armitage@gmail.com", "@pja")
addPerson("Peter Armitage", "pja@google.com")
addPerson("Peter Bourgon", "peter@bourgon.org", "@peterbourgon")
addPerson("Peter Collingbourne", "pcc@google.com", "5535@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Collingbourne", "pcc@google.com", "@pcc")
addPerson("Peter Ebden", "peter.ebden@gmail.com")
addPerson("Peter Froehlich", "peter.hans.froehlich@gmail.com", "@phf")
addPerson("Peter Gonda", "pgonda@google.com")
addPerson("Peter Gonda", "pgonda@google.com", "@pgonda")
addPerson("Peter Gonda", "ptrgonda@gmail.com")
addPerson("Peter Götz", "peter.gtz@gmail.com")
addPerson("Peter Hoyes", "pahoyes@gmail.com")
addPerson("Peter Kieltyka", "peter.kieltyka@pressly.com")
addPerson("Peter Kleiweg", "pkleiweg@xs4all.nl", "@pebbe")
addPerson("Peter Mattis", "petermattis@gmail.com")
addPerson("Peter McKenzie", "petermck@google.com", "@peter-mckenzie")
addPerson("Peter Moody", "peter.moody@gmail.com")
addPerson("Peter Moody", "pmoody@uber.com", "8905@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Moody", "pmoody@uber.com", "@pmoody-")
addPerson("Peter Morjan", "pmorjan@gmail.com", "@pmorjan")
addPerson("Peter Mundy", "go.peter.90@gmail.com", "@peterGo")
addPerson("Peter Nguyen", "peter@mictis.com", "@pengux")
addPerson("Peter S", "speter.go1@gmail.com", "5351@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Sanford", "psanford@sanford.io")
addPerson("Peter Sutherland", "peter@pedrosland.co.uk")
addPerson("Peter Teichman", "peter@teichman.org")
addPerson("Peter Teichman", "pteichman@fastly.com", "@pteichman")
addPerson("Peter Tseng", "ptseng@squareup.com", "@petertseng")
addPerson("Peter Waldschmidt", "peter@waldschmidt.com", "6340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Waldschmidt", "peter@waldschmidt.com", "@peterwald")
addPerson("Peter Waller", "p@pwaller.net", "5822@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Waller", "p@pwaller.net", "@pwaller")
addPerson("Peter Wathall", "peter.wathall@gmail.com")
addPerson("Peter Weinberger", "pjw@golang.org", "pjw@google.com", "@pjweinb", "@pjweinbgo")
addPerson("Peter Weinberger", "pjw@google.com", "5260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Williams", "pwil3058@gmail.com", "@pwil3058")
addPerson("Peter Wu", "peter@lekensteyn.nl", "24681@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Wu", "pwu@cloudflare.com", "@Lekensteyn")
addPerson("Peterson, David", "davidtpeterson@gmail.com")
addPerson("Phil Pearl", "philip.j.r.pearl@gmail.com", "@philpearl")
addPerson("Philip Brown", "phil@bolthole.com")
addPerson("Philip Børgesen", "philip.borgesen@gmail.com", "@PhilipBorgesen")
addPerson("Philip Børgesen", "philipborgesen@users.noreply.github.com", "@PhilipBorgesen")
addPerson("Philip Hofer", "phofer@umich.edu")
addPerson("Philip Hofer", "phofer@umich.edu", "9055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Philip K. Warren", "pkwarren@gmail.com", "@pkwarren")
addPerson("Philip Pearl", "philip.j.r.pearl@gmail.com", "27852@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Philipp Kern", "pkern@google.com")
addPerson("Philippe Lafoucrière", "philippe.lafoucriere@gmail.com")
addPerson("Pierre Durand", "pierredurand@gmail.com", "@pierrre")
addPerson("Pierre Prinetti", "pierreprinetti@gmail.com", "@pierreprinetti")
addPerson("Pierre Roullon", "pierre.roullon@gmail.com", "@proullon")
addPerson("Pieter Droogendijk", "pieter@binky.org.uk", "@PieterD")
addPerson("Pieterjan Lambein", "pieterjan@otainsight.com")
addPerson("Pietro Gagliardi", "pietro10@mac.com", "9190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Pietro Gagliardi", "pietro10@mac.com", "@andlabs")
addPerson("Piotr Kowalczuk", "p.kowalczuk.priv@gmail.com")
addPerson("Piyush Mishra", "piyush@codeitout.com", "@ofpiyush")
addPerson("Plekhanov Maxim", "kishtatix@gmail.com")
addPerson("Plekhanov Maxim", "kishtatix@gmail.com", "@kishtatik")
addPerson("Pontus Leitzler", "leitzler@gmail.com")
addPerson("Pontus Leitzler", "leitzler@users.noreply.github.com", "@leitzler")
addPerson("Pradeep Singh", "rautelap@gmail.com")
addPerson("Prasanna Swaminathan", "prasanna@mediamath.com", "@pswaminathan")
addPerson("Prasanna V. Loganathar", "pvl@prasannavl.com")
addPerson("Prashant Varanasi", "prashant@prashantv.com", "@prashantv")
addPerson("Prashanth Pai", "ppai@redhat.com")
addPerson("Praveen Bathala", "praveen.bathala@gmail.com")
addPerson("Pravendra Singh", "hackpravj@gmail.com", "@pravj")
addPerson("Preetam Jinka", "pj@preet.am", "@Preetam")
addPerson("ProhtMeyhet", "sebastian@prohtmeyhet.de")
addPerson("Péter Surányi", "speter.go1@gmail.com", "@speter")
addPerson("Péter Szilágyi", "peterke@gmail.com", "5786@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Péter Szilágyi", "peterke@gmail.com", "@karalabe")
addPerson("Qais Patankar", "qaisjp@gmail.com", "@qaisjp")
addPerson("Qi Zhao", "zhaoq@google.com")
addPerson("Qi Zhao", "zhaoq@google.com", "9480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Qiu", "ilsh1022@gmail.com", "21240@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Qiuxuan Zhu", "ilsh1022@gmail.com", "@Kinghack")
addPerson("QtRoS", "mrqtros@gmail.com", "@QtRoS")
addPerson("Quan Yong Zhai", "qyzhai@gmail.com", "@qyzhai")
addPerson("Quentin Perez", "qperez@ocs.online.net", "@QuentinPerez")
addPerson("Quentin Perez", "quentin@zen.ly")
addPerson("Quentin Renard", "contact@asticode.com", "12775@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Quentin Renard", "contact@asticode.com", "@asticode")
addPerson("Quentin Smith", "quentin@golang.org")
addPerson("Quentin Smith", "quentin@golang.org", "13020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Quentin Smith", "quentin@golang.org", "@quentinmit")
addPerson("Quinn Slack", "sqs@sourcegraph.com", "@sqs")
addPerson("Quoc-Viet Nguyen", "afelion@gmail.com", "@nqv")
addPerson("R Primus", "rprimus@gmail.com")
addPerson("Radek Sohlich", "sohlich@gmail.com", "@sohlich")
addPerson("Radu Berinde", "radu@cockroachlabs.com", "12530@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Radu Berinde", "radu@cockroachlabs.com", "@RaduBerinde")
addPerson("Rafal Jeczalik", "rjeczalik@gmail.com", "@rjeczalik")
addPerson("Raghavendra Nagaraj", "jamdagni86@gmail.com")
addPerson("Raghavendra Nagaraj", "jamdagni86@gmail.com", "@jamdagni86")
addPerson("Rahul Chaudhry", "rahulchaudhry@chromium.org", "@rahulchaudhry", "*goog")
addPerson("Rahul Chaudhry", "rahulchaudhry@google.com", "5211@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Raj Mahey", "raj.axisos@gmail.com")
addPerson("Rajat Goel", "rajat.goel2010@gmail.com", "@rajatgoel")
addPerson("Rajath Agasthya", "rajathagasthya@gmail.com", "24258@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rajath Agasthya", "rajathagasthya@gmail.com", "@rajathagasthya")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk")
addPerson("Ralph Corderoy", "ralph.corderoy@gmail.com", "7020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk", "10961@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk", "@RalphCorderoy")
addPerson("Ralph Ligtenberg", "ralph.ligtenberg@gmail.com")
addPerson("Ramazan AYYILDIZ", "rayyildiz@gmail.com", "@rayyildiz")
addPerson("Ramesh Dharan", "dharan@google.com")
addPerson("Ramon Nogueira", "rmn@google.com")
addPerson("Randy Reddig", "ydnar@shaderlab.com")
addPerson("Raph Levien", "raph@google.com", "@raphlinus")
addPerson("Raphael Geronimi", "raphael.geronimi@gmail.com", "@rgeronimi")
addPerson("Raul Silvera", "rsilvera@google.com", "@rauls5382")
addPerson("Raul Silvera", "rauls5382@gmail.com")
addPerson("Raul Silvera", "rsilvera@google.com", "10031@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("RaviTeja", "ravi.tezu@gmail.com")
addPerson("Ray Tung", "rtung@thoughtworks.com", "@raytung")
addPerson("Raymond Kazlauskas", "raima220@gmail.com", "@Rhymond")
addPerson("Rebecca Stambler", "rstambler@golang.org", "16140@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rebecca Stambler", "rstambler@golang.org", "@stamblerre")
addPerson("Reese Wilson", "reese@shinymayhem.com")
addPerson("Reilly Watson", "reillywatson@gmail.com", "@reillywatson")
addPerson("Reinaldo de Souza Jr", "juniorz@gmail.com", "@juniorz")
addPerson("Remi Gillig", "remigillig@gmail.com", "@speps")
addPerson("Rens Rikkerink", "ikkerens@users.noreply.github.com", "@ikkerens")
addPerson("Rhett Garber", "rhettg@gmail.com")
addPerson("Rhys Hiltner", "rhys@justin.tv", "9210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rhys Hiltner", "rhys@justin.tv", "@rhysh")
addPerson("Ricardo Padilha", "ricardospadilha@gmail.com", "@ricardopadilha")
addPerson("Ricardo Rey", "rrey@google.com")
addPerson("Ricardo Smania", "ricsmania@gmail.com")
addPerson("Ricardo Vegas", "ricardovegas@gmail.com")
addPerson("Riccardo Paccagnella", "ricpacca@gmail.com")
addPerson("Richard Barnes", "rlb@ipv.sx")
addPerson("Richard Crowley", "r@rcrowley.org", "@rcrowley")
addPerson("Richard Dingwall", "rdingwall@gmail.com", "@rdingwall")
addPerson("Richard Eric Gavaletz", "gavaletz@gmail.com", "@gavaletz")
addPerson("Richard Gibson", "richard.gibson@gmail.com", "13081@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Richard Gibson", "richard.gibson@gmail.com", "@gibson042")
addPerson("Richard Miller", "millerresearch@gmail.com", "12217@62eb7196-b449-3ce5-99f1-c037f21e1705", "miller.research@gmail.com")
addPerson("Richard Musiol", "mail@richard-musiol.de", "@neelance", "neelance@gmail.com", "13620@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rick Arnold", "rickarnoldjr@gmail.com", "@rickar")
addPerson("Rick Hudson", "rlh@golang.org", "5186@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rick Hudson", "rlh@golang.org", "@RLH")
addPerson("Rick Sayre", "whorfin@gmail.com")
addPerson("Rijnard van Tonder", "rvantonder@gmail.com", "@rvantonder")
addPerson("Rik van der Heijden", "rikvdh@users.noreply.github.com")
addPerson("Riku Voipio", "riku.voipio@linaro.org", "12765@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Riku Voipio", "riku.voipio@linaro.org", "@suihkulokki")
addPerson("Risto Jaakko Saarelma", "rsaarelm@gmail.com", "@rsaarelm")
addPerson("Rob Bradford", "robert.bradford@intel.com")
addPerson("Rob Earhart", "earhart@google.com")
addPerson("Rob Phoenix", "rob@robphoenix.com", "17946@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rob Phoenix", "rob@robphoenix.com", "@robphoenix")
addPerson("Rob Pike", "r@golang.org", "5015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rob Pike", "r@golang.org", "@robpike")
addPerson("Rob Strong", "robert.a.strong@gmail.com")
addPerson("Robert Anthony Bellamy", "rabellamy@gmail.com")
addPerson("Robert Bittle", "guywithnose@gmail.com")
addPerson("Robert Figueiredo", "robfig@gmail.com", "@robfig")
addPerson("Robert Gogolok", "gogolok@gmail.com")
addPerson("Robert Griesemer", "gri@golang.org", "5210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Robert Griesemer", "gri@golang.org", "gri@gri-macbookair.roam.corp.google.com", "@griesemer")
addPerson("Robert Hencke", "robert.hencke@gmail.com", "@rhencke")
addPerson("Robert Iannucci", "iannucci@google.com")
addPerson("Robert Kuska", "rkuska@gmail.com")
addPerson("Robert Obryk", "robryk@gmail.com", "@robryk")
addPerson("Robert Sesek", "rsesek@google.com", "@rsesek")
addPerson("Robert Stepanek", "robert.stepanek@gmail.com", "6062@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Robert Stepanek", "robert.stepanek@gmail.com", "@rsto")
addPerson("Robert Weber", "robertweber95@gmail.com")
addPerson("Robert Xu", "robxu9@gmail.com")
addPerson("Robert-André Mauchin", "zebob.m@gmail.com")
addPerson("Roberto Clapis", "robclap8@gmail.com", "@empijei")
addPerson("Roberto Lublinerman Reitzes", "rluble@google.com", "@rluble")
addPerson("Roberto Selbach", "roberto@selbach.ca")
addPerson("Roberto Selbach", "roberto@selbach.ca", "@robteix")
addPerson("Roberto", "empijei@users.noreply.github.com", "@empijei")
addPerson("Robin Eklind", "r.eklind.87@gmail.com")
addPerson("Rodolfo Carvalho", "rhcarvalho@gmail.com", "@rhcarvalho")
addPerson("Rodolfo Rodriguez", "rodolfobgibson@gmail.com", "@techmexdev")
addPerson("Rodrigo Moraes de Oliveira", "rodrigo.moraes@gmail.com", "@moraes")
addPerson("Rodrigo Rafael Monti Kochenburger", "divoxx@gmail.com", "@divoxx")
addPerson("Roger Guldbrandsen", "roger@kinbiko.com")
addPerson("Roger Pau Monné", "royger@gmail.com", "@royger")
addPerson("Roger Peppe", "rogpeppe@gmail.com", "@rogpeppe")
addPerson("Roger Simms", "roger.simms@gmail.com")
addPerson("Rohit Agarwal", "agarwalrohit@google.com")
addPerson("Roland Illig", "roland.illig@gmx.de")
addPerson("Roland Illig", "roland.illig@gmx.de", "@rillig")
addPerson("Roland Shoemaker", "rolandshoemaker@gmail.com", "12545@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Roland Shoemaker", "rolandshoemaker@gmail.com", "@rolandshoemaker")
addPerson("Rollie Ma", "rollie.ma@gmail.com")
addPerson("Roman Budnikov", "romanyx90@yandex.ru", "23999@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ron Hashimoto", "mail@h2so5.net", "@h2so5")
addPerson("Ronald G. Minnich", "rminnich@gmail.com", "@rminnich")
addPerson("Ronan Guilloux", "ronan.guilloux@gmail.com")
addPerson("Ross Chater", "rdchater@gmail.com", "@rdcx")
addPerson("Ross Light", "light@google.com", "8285@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ross Light", "light@google.com", "@zombiezen")
addPerson("Rowan Marshall", "rowanajmarshall@gmail.com", "@RowanAJMarshall")
addPerson("Rowan Worth", "sqweek@gmail.com", "@sqweek")
addPerson("Ruben Vermeersch", "ruben@rocketeer.be")
addPerson("Rudi Kramer", "rudi.kramer@gmail.com")
addPerson("Rui Ueyama", "ruiu@google.com", "@rui314")
addPerson("Ruslan Nigmatullin", "elessar@dropbox.com")
addPerson("Russ Cox", "rsc@golang.org", "5056@62eb7196-b449-3ce5-99f1-c037f21e1705", "@rsc")
addPerson("Russell Haering", "russellhaering@gmail.com", "@russellhaering")
addPerson("Ryan Boehning", "ryan.boehning@apcera.com", "@y0ssar1an")
addPerson("Ryan Brown", "ribrdb@google.com", "6136@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ryan Brown", "ribrdb@google.com", "@ribrdb")
addPerson("Ryan Canty", "jrcanty@gmail.com")
addPerson("Ryan Dahl", "ry@tinyclouds.org", "@ry")
addPerson("Ryan Hitchman", "hitchmanr@gmail.com", "@rmmh")
addPerson("Ryan Lower", "rpjlower@gmail.com", "@ryanlower")
addPerson("Ryan Seys", "ryan@ryanseys.com", "@ryanseys")
addPerson("Ryan Slade", "ryanslade@gmail.com", "@ryanslade")
addPerson("Ryan Thomas", "rnt@google.com")
addPerson("Ryan Zhang", "ryan.zhang@docker.com")
addPerson("Ryoichi KATO", "ryo1kato@gmail.com", "@ryo1kato")
addPerson("Ryuji Iwata", "qt.luigi@gmail.com", "@qt-luigi")
addPerson("Ryuma Yoshida", "ryuma.y1117@gmail.com", "@ryysud")
addPerson("Ryuzo Yamamoto", "ryuzo.yamamoto@gmail.com", "@dragon3")
addPerson("Rémy Oudompheng", "oudomphe@phare.normalesup.org", "@remyoudompheng")
addPerson("S.Çağlar Onur", "caglar@10ur.org", "@caglar10ur")
addPerson("Sabin Mihai Rapan", "sabin.rapan@gmail.com", "@sabin-rapan")
addPerson("Sagiv Ofek", "sagiv4@gmail.com")
addPerson("Sai Cheemalapati", "saicheems@google.com")
addPerson("Salman Aljammaz", "s@0x65.net", "5220@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Salman Aljammaz", "s@0x65.net", "@saljam")
addPerson("Sam Broughton", "sambroughton@hotmail.co.uk")
addPerson("Sam Hug", "samuel.b.hug@gmail.com", "@samuelhug")
addPerson("Sam Mulube", "sam@thingful.net")
addPerson("Sam Sendelbach", "sbsends@gmail.com")
addPerson("Sam Thorogood", "thorogood@google.com", "@samthor")
addPerson("Sam Whited", "sam@samwhited.com", "11106@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sam Whited", "sam@samwhited.com", "@SamWhited")
addPerson("Saman Barghi", "saman.b@gmail.com")
addPerson("Sameer Ajmani", "sameer@golang.org", "5265@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sameer Ajmani", "sameer@golang.org", "@Sajmani")
addPerson("Sami Commerot", "samic@google.com")
addPerson("Sami Pönkänen", "sami.ponkanen@gmail.com")
addPerson("Samuel Cochran", "sj26@sj26.com")
addPerson("Samuel Kelemen", "sckelemen@users.noreply.github.com", "@SCKelemen")
addPerson("Samuel Tan", "samueltan@google.com", "16020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Samuel Tan", "samueltan@google.com", "@stjj89")
addPerson("Samuele Pedroni", "pedronis@lucediurna.net", "@pedronis")
addPerson("Sandip Bhattacharya", "sandipb@sandipb.net")
addPerson("Sandy McPherson", "sandyzwin6@gmail.com")
addPerson("Sandy", "openset.wang@gmail.com", "@openset")
addPerson("Sanjay Menakuru", "balasanjay@gmail.com", "@balasanjay")
addPerson("Santhosh Kumar Tekuri", "santhosh.tekuri@gmail.com", "@santhosh-tekuri")
addPerson("Santosh Ananthakrishnan", "santosh@dropbox.com")
addPerson("Sarah Adams", "shadams@google.com", "16850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sarah Adams", "shadams@google.com", "shadams@shadams0.mtv.corp.google.com", "@adams-sarah")
addPerson("Sarah Chacko", "SJC1982.1992@gmail.com")
addPerson("Sascha Brawer", "sascha@brawer.ch", "@brawer")
addPerson("Sasha Lionheart", "lionhearts@google.com")
addPerson("Satyajit Ranjeev", "s@ranjeev.in")
addPerson("Sawood Alam", "ibnesayeed@gmail.com")
addPerson("Scott Bell", "scott@sctsm.com", "13380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Scott Bell", "scott@sctsm.com", "@sctb")
addPerson("Scott Crunkleton", "crunk1@gmail.com")
addPerson("Scott Ferguson", "scottwferg@gmail.com", "@scottferg")
addPerson("Scott Lawrence", "bytbox@gmail.com", "@bytbox")
addPerson("Scott Mansfield", "smansfield@netflix.com", "@ScottMansfield")
addPerson("Scott Pakin", "scott+gpg@pakin.org")
addPerson("Sean Chittenden", "seanc@joyent.com", "@sean-")
addPerson("Sean Christopherson", "sean.j.christopherson@intel.com", "@sean-jc")
addPerson("Sean Dolphin", "Sean.Dolphin@kpcompass.com")
addPerson("Sean Harger", "sharger@google.com")
addPerson("Sean Rees", "sean@erifax.org", "@seanrees")
addPerson("SeanBurford", "sburford@google.com", "@sburford")
addPerson("Sebastiaan van Stijn", "github@gone.nl", "@thaJeztah")
addPerson("Sebastian Schmidt", "mrschmidt@google.com")
addPerson("Sebastian Schuberth", "sschuberth@gmail.com")
addPerson("Sebastian Willing", "sewi.de@gmail.com")
addPerson("Sebastien Binet", "seb.binet@gmail.com", "@sbinet")
addPerson("Sebastien Binet", "binet@cern.ch")
addPerson("Sebastien Binet", "seb.binet@gmail.com", "5810@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Seebs", "seebs@sourcegraph.com", "@seebs")
addPerson("Seiji Takahashi", "timaki.st@gmail.com", "15570@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Seiji Takahashi", "timaki.st@gmail.com", "@timakin")
addPerson("Sergey 'SnakE' Gromov", "snake.scaly@gmail.com", "@snake-scaly")
addPerson("Sergey Frolov", "sfrolov@google.com")
addPerson("Sergey Lemeshkin", "sergeilem@gmail.com")
addPerson("Sergey Lukjanov", "me@slukjanov.name")
addPerson("Sergey Mishin", "sergeymishine@gmail.com", "@dartkron")
addPerson("Sergey Mudrik", "sergey.mudrik@gmail.com")
addPerson("Sergey Rogulenko", "rogulenko@laserlike.com")
addPerson("Sergey Semin", "gray12511@gmail.com", "@Gray5")
addPerson("Sergiusz Bazanski", "bazanski@gmail.com")
addPerson("Serhat Şevki Dinçer", "jfcgauss@gmail.com")
addPerson("Serhii Aheienko", "serhii.aheienko@gmail.com")
addPerson("Serhii Bratus", "sergiibratus@gmail.com")
addPerson("Seshachalam Malisetti", "abbiya@gmail.com")
addPerson("Seth Greenstein", "sgreens@google.com")
addPerson("Seth Hoenig", "seth.a.hoenig@gmail.com", "@shoenig")
addPerson("Seth Hollyman", "shollyman@google.com")
addPerson("Seth Shelnutt", "Shelnutt2@gmail.com")
addPerson("Seth Vargo", "sethvargo@gmail.com", "@sethvargo")
addPerson("Shahar Kohanim", "skohanim@gmail.com")
addPerson("Shahar Kohanim", "skohanim@gmail.com", "12700@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shamil Garatuev", "garatuev@gmail.com", "@FluorescentTouch")
addPerson("Shane Hansen", "shanemhansen@gmail.com", "@shanemhansen")
addPerson("Shannon Wynter", "freman@users.noreply.github.com")
addPerson("Shaun Dunning", "shaun.dunning@uservoice.com")
addPerson("Shawn Pearce", "sop@google.com")
addPerson("Shawn Smith", "shawn.p.smith@gmail.com", "7245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shawn Walker-Salas", "shawn.walker@oracle.com", "7291@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shawn Walker-Salas", "shawn.walker@oracle.com", "@binarycrusader")
addPerson("Shenghou Ma", "minux@golang.org", "minux.ma@gmail.com", "@minux")
addPerson("Shengyu Zhang", "shengyu.zhang@chaitin.com", "@SilverRainZ")
addPerson("Shi Han Ng", "shihanng@gmail.com")
addPerson("Shinichi Nishimura", "nshmura.s@gmail.com")
addPerson("Shinji Tanaka", "shinji.tanaka@gmail.com", "@stanaka")
addPerson("Shintaro Kaneko", "kaneshin0120@gmail.com", "@kaneshin")
addPerson("Shivakumar GN", "shivakumar.gn@gmail.com", "@shivakumargn")
addPerson("Shivansh Rai", "shivansh@freebsd.org", "@shivansh")
addPerson("Shubheksha Jalan", "jshubheksha@gmail.com")
addPerson("Shun Fan", "sfan@google.com")
addPerson("Shushan Chai", "chaishushan@gmail.com", "5095@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shuvo Debnath", "shuvo.debnath@gmail.com")
addPerson("Silvan Jegen", "s.jegen@gmail.com", "@Shugyousha")
addPerson("Simon Inman", "simoninman@google.com")
addPerson("Simon Jefford", "simon.jefford@gmail.com", "@simonjefford")
addPerson("Simon Johansson", "simon@simonjohansson.com")
addPerson("Simon Ordish", "simon.ordish@masagi.co.uk")
addPerson("Simon Rawet", "simon@rawet.se", "@KilledKenny")
addPerson("Simon Thulbourn", "simon+github@thulbourn.com", "@sthulb")
addPerson("Simon Whitehead", "chemnova@gmail.com", "@simon-whitehead")
addPerson("Simone Carletti", "weppos@gmail.com")
addPerson("Sina Siadat", "siadat@gmail.com", "14140@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sina Siadat", "siadat@gmail.com", "@siadat")
addPerson("Sokolov Yura", "funny.falcon@gmail.com", "@funny-falcon")
addPerson("Song Gao", "song@gao.io", "@songgao")
addPerson("Spencer Nelson", "s@spenczar.com", "10000@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Spencer Nelson", "s@spenczar.com", "@spenczar")
addPerson("Spencer Tung", "spencertung@google.com", "20245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Spring Mc", "heresy.mc@gmail.com", "@mcspring")
addPerson("Srdjan Petrovic", "spetrovic@google.com", "6605@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Srdjan Petrovic", "spetrovic@google.com", "@spetrovic77")
addPerson("Sridhar Venkatakrishnan", "sridhar@laddoo.net", "9665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sridhar Venkatakrishnan", "sridhar@laddoo.net", "@sridharv")
addPerson("StalkR", "stalkr@stalkr.net")
addPerson("Stan Chan", "stanchan@gmail.com")
addPerson("Stan Schwertly", "stan@schwertly.com", "@Stantheman")
addPerson("Stanislav Afanasev", "php.progger@gmail.com", "@superstas")
addPerson("Stanislav Paskalev", "kshorg@gmail.com")
addPerson("Stanislav Petrov", "s.e.petrov@gmail.com")
addPerson("Steeve Morin", "steeve.morin@gmail.com", "@steeve")
addPerson("Stefan Schmidt", "stschmidt@google.com")
addPerson("Stepan Shabalin", "neverliberty@gmail.com", "@Neverik")
addPerson("Stephan Renatus", "srenatus@chef.io", "@srenatus")
addPerson("Stephen Gutekanst", "stephen.gutekanst@gmail.com")
addPerson("Stephen L", "36011612+steuhs@users.noreply.github.com", "@steuhs")
addPerson("Stephen Lewis", "stephen@sock.org.uk")
addPerson("Stephen McQuay (smcquay)", "stephen@mcquay.me", "@smcquay")
addPerson("Stephen McQuay", "stephen@mcquay.me", "13960@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Stephen Searles", "stephens2424@gmail.com", "stephen.searles@gmail.com", "@stephens2424")
addPerson("Stephen Solka", "stephen0q@gmail.com")
addPerson("Stephen Sugden", "glurgle@gmail.com")
addPerson("Stephen Weinberg", "stephen@q5comm.com", "stephenmw@google.com", "@stephenmw")
addPerson("Stephen Weinberg", "stephenmw@google.com", "13156@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steve Francia", "spf@golang.org", "14840@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steve Francia", "spf@golang.org", "@spf13")
addPerson("Steve Gilbert", "stevegilbert23@gmail.com")
addPerson("Steve McCoy", "mccoyst@gmail.com", "@mccoyst")
addPerson("Steve Phillips", "steve@tryingtobeawesome.com", "@elimisteve")
addPerson("Steve Reed", "sreed@zulily.com")
addPerson("Steve Streeting", "steve@stevestreeting.com", "@sinbad")
addPerson("Steve Wills", "steve@mouf.net")
addPerson("Steven Berlanga", "zabawaba99@gmail.com")
addPerson("Steven Buss", "sbuss@google.com")
addPerson("Steven Elliot Harris", "seharris@gmail.com", "@seh")
addPerson("Steven Erenst", "stevenerenst@gmail.com")
addPerson("Steven Hartland", "steven.hartland@multiplay.co.uk", "10210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steven Hartland", "steven.hartland@multiplay.co.uk", "@stevenh")
addPerson("Steven Kabbes", "stevenkabbes@gmail.com")
addPerson("Steven Ruckdashel", "steve.ruckdashel@gmail.com")
addPerson("Steven Selph", "sselph@google.com")
addPerson("Steven Wilkin", "stevenwilkin@gmail.com", "@stevenwilkin")
addPerson("Stéphane Travostino", "stephane.travostino@gmail.com", "@1player")
addPerson("Sue Spence", "virtuallysue@gmail.com")
addPerson("Sugu Sougoumarane", "ssougou@gmail.com", "@sougou")
addPerson("Suharsh Sivakumar", "suharshs@google.com", "@suharshs")
addPerson("Suriyaa Sundararuban", "isc.suriyaa@gmail.com")
addPerson("Suriyaa Sundararuban", "suriyaasundararuban@gmail.com", "27899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suriyaa Sundararuban", "suriyaasundararuban@gmail.com", "@SuriyaaKudoIsc")
addPerson("Surma Surma", "surma@google.com")
addPerson("Sutton Yamanashi", "syamanashi@gmail.com")
addPerson("Suyash", "dextrous93@gmail.com", "15015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suyash", "dextrous93@gmail.com", "@suyash")
addPerson("Suzy Mueller", "suzmue@golang.org", "21300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suzy Mueller", "suzmue@golang.org", "@suzmue")
addPerson("Sven Almgren", "sven@tras.se", "@blindmatrix")
addPerson("Sven Blumenstein", "svbl@google.com")
addPerson("Sven Dowideit", "svendowideit@home.org.au")
addPerson("Sylvain Zimmer", "sylvain@sylvainzimmer.com", "@sylvinus")
addPerson("Syohei YOSHIDA", "syohex@gmail.com", "@syohex")
addPerson("Sébastien Paolacci", "sebastien.paolacci@gmail.com", "@spaolacci")
addPerson("Sébastien Portebois", "sportebois@gmail.com")
addPerson("TSUYUSATO Kitsune", "make.just.on@gmail.com")
addPerson("Tad Fisher", "tadfisher@gmail.com")
addPerson("Tad Glines", "tad.glines@gmail.com", "@tadglines")
addPerson("Taesu Pyo", "pyotaesu@gmail.com", "@bigflood")
addPerson("Tair Sabirgaliev", "tair.sabirgaliev@gmail.com")
addPerson("Taj Khattra", "taj.khattra@gmail.com", "@tkhattra")
addPerson("Takashi Matsuo", "tmatsuo@google.com")
addPerson("Takayoshi Nishida", "takayoshi.nishida@gmail.com", "@takp")
addPerson("Takuto Ikuta", "tikuta@google.com", "@atetubou")
addPerson("Takuya Sato", "takuya0219@gmail.com")
addPerson("Takuya Ueda", "uedatakuya@gmail.com", "@tenntenn")
addPerson("Tal Shprecher", "tshprecher@gmail.com", "11915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tal Shprecher", "tshprecher@gmail.com", "@tshprecher")
addPerson("Tamir Duberstein", "tamird@gmail.com", "7955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tamir Duberstein", "tamird@gmail.com", "@tamird")
addPerson("Tamás Gulácsi", "tgulacsi78@gmail.com")
addPerson("Tao Wang", "twang2218@gmail.com")
addPerson("Tardis Xu", "xiaoxubeii@gmail.com")
addPerson("Tarmigan Casebolt", "tarmigan@gmail.com")
addPerson("Tarmigan Casebolt", "tarmigan@gmail.com", "9697@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Taro Aoki", "aizu.s1230022@gmail.com", "@ktr0731")
addPerson("Tarrant", "tarrant@keyneston.com", "@tarrant")
addPerson("Taru Karttunen", "taruti@taruti.net", "@taruti")
addPerson("Tatsuhiro Tsujikawa", "tatsuhiro.t@gmail.com", "@tatsuhiro-t")
addPerson("Taufiq Rahman", "taufiqrx8@gmail.com", "@Inconnu08")
addPerson("Ted Hahn", "teh@uber.com")
addPerson("Ted Kornish", "golang@tedkornish.com", "@tedkornish")
addPerson("Tejasvi Nareddy", "tejunareddy@gmail.com")
addPerson("Terin Stock", "terinjokes@gmail.com", "25203@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Terin Stock", "terinjokes@gmail.com", "@terinjokes")
addPerson("Terrel Shumway", "gopher@shumway.us")
addPerson("Terry Wong", "terry.wong2@yahoo.com")
addPerson("Tess Rinearson", "tess.rinearson@gmail.com")
addPerson("Tetsuo Kiso", "tetsuokiso9@gmail.com", "@tetsuok")
addPerson("Than McIntosh", "thanm@google.com", "14020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Than McIntosh", "thanm@google.com", "@thanm")
addPerson("Thanabodee Charoenpiriyakij", "wingyminus@gmail.com", "19095@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Thanabodee Charoenpiriyakij", "wingyminus@gmail.com", "@wingyplus")
addPerson("Theo Schlossnagle", "jesus@lethargy.org")
addPerson("Thiago Farina", "tfarina@chromium.org")
addPerson("Thiago Fransosi Farina", "thiago.farina@gmail.com", "@thiagofarina")
addPerson("Thibault Falque", "thibault_falque@ens.univ-artois.fr")
addPerson("Thibaut Colar", "tcolar@colar.net")
addPerson("Thomas Alan Copeland", "talan.copeland@gmail.com", "@talanc")
addPerson("Thomas Bonfort", "thomas.bonfort@gmail.com", "@tbonfort")
addPerson("Thomas Bouldin", "inlined@google.com")
addPerson("Thomas Bruyelle", "thomas.bruyelle@gmail.com", "@tbruyelle")
addPerson("Thomas Bushnell, BSG", "tbushnell@google.com")
addPerson("Thomas Desrosiers", "thomasdesr@gmail.com", "@thomaso-mirodin")
addPerson("Thomas Habets", "habets@google.com", "@ThomasHabets")
addPerson("Thomas Johnson", "NTmatter@gmail.com")
addPerson("Thomas Kappler", "tkappler@gmail.com", "@thomas11")
addPerson("Thomas Meson", "zllak@hycik.org")
addPerson("Thomas Sauvaget", "sauvaget.thomas@gmail.com")
addPerson("Thomas Wanielista", "tomwans@gmail.com", "@tomwans")
addPerson("Thomas de Zeeuw", "thomasdezeeuw@gmail.com", "@Thomasdezeeuw")
addPerson("Thorben Krueger", "thorben.krueger@gmail.com", "@benthor")
addPerson("Thordur Bjornsson", "thorduri@secnorth.net", "@thorduri")
addPerson("Tiago Queiroz", "contato@tiago.eti.br")
addPerson("Tilman Dilo", "tilman.dilo@gmail.com", "@tdilo")
addPerson("Tim 'mithro' Ansell", "tansell@google.com")
addPerson("Tim Burks", "timburks@google.com")
addPerson("Tim Cooijmans", "timcooijmans@gmail.com", "@timcooijmans")
addPerson("Tim Cooper", "tim.cooper@layeh.com", "24935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tim Cooper", "tim.cooper@layeh.com", "@bontibon")
addPerson("Tim Ebringer", "tim.ebringer@gmail.com")
addPerson("Tim Heckman", "t@heckman.io", "@theckman")
addPerson("Tim Henderson", "tim.tadh@gmail.com", "@timtadh")
addPerson("Tim Hockin", "thockin@google.com", "@thockin")
addPerson("Tim Shen", "timshen@google.com", "@timshen91")
addPerson("Tim St. Clair", "stclair@google.com")
addPerson("Tim Swast", "swast@google.com", "@tswast")
addPerson("Tim Wright", "tenortim@gmail.com", "25424@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tim Wright", "tenortim@gmail.com", "@tenortim")
addPerson("Tim Xu", "xiaoxubeii@gmail.com", "@xiaoxubeii")
addPerson("Tim", "tdhutt@gmail.com", "@Timmmm")
addPerson("Timo Savola", "timo.savola@gmail.com", "@tsavola")
addPerson("Timothy Raymond", "xtjraymondx@gmail.com")
addPerson("Timothy Studd", "tim@timstudd.com", "@timstudd")
addPerson("Tipp Moseley", "tipp@google.com", "@tippjammer")
addPerson("Tobias Assarsson", "tobias.assarsson@gmail.com")
addPerson("Tobias Columbus", "tobias.columbus@gmail.com", "@tc-0")
addPerson("Tobias Klauser", "tobias.klauser@gmail.com", "@tklauser")
addPerson("Tobias Klauser", "tklauser@distanz.ch", "@tklauser")
addPerson("Tobias Klauser", "tobias.klauser@gmail.com", "19560@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tobias Schottdorf", "tobias.schottdorf@gmail.com")
addPerson("Toby Burress", "kurin@google.com")
addPerson("Todd Neal", "todd@tneal.org", "12836@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Todd Neal", "todd@tneal.org", "@tzneal")
addPerson("Todd Neal", "tolchz@gmail.com", "8481@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Todd Rafferty", "webRat@gmail.com")
addPerson("Todd Wang", "toddwang@gmail.com", "@tatatodd")
addPerson("Tom Bergan", "tombergan@google.com", "10820@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tom Bergan", "tombergan@google.com", "@tombergan")
addPerson("Tom Elliott", "tom.w.elliott@gmail.com")
addPerson("Tom Heng", "zhm20070928@gmail.com", "7380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tom Heng", "zhm20070928@gmail.com", "@tomheng")
addPerson("Tom Holmes", "tom@wandb.com")
addPerson("Tom Lanyon", "tomlanyon@google.com", "@tomlanyon")
addPerson("Tom Levy", "tomlevy93@gmail.com", "@tom93")
addPerson("Tom Limoncelli", "tal@whatexit.org", "@TomOnTime")
addPerson("Tom Linford", "tomlinford@gmail.com", "@tomlinford")
addPerson("Tom Thorogood", "me+google@tomthorogood.co.uk")
addPerson("Tom Thorogood", "me+google@tomthorogood.co.uk", "@tmthrgd")
addPerson("Tom Wilkie", "tom.wilkie@gmail.com", "tom@weave.works", "@tomwilkie")
addPerson("Tomas Basham", "tomasbasham@gmail.com")
addPerson("Tommy Schaefer", "tommy.schaefer@teecom.com", "@tommyschaefer")
addPerson("Tonis Tiigi", "tonistiigi@gmail.com", "@tonistiigi")
addPerson("Tony Reix", "Tony.Reix@bull.net", "16326@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tony Walker", "walkert.uk@gmail.com", "@walkert")
addPerson("Tooru Takahashi", "tooru.takahashi134@gmail.com", "@tooru")
addPerson("Tor Andersson", "tor.andersson@gmail.com", "@ccxvii")
addPerson("Tormod Erevik Lea", "tormodlea@gmail.com", "@tormoder")
addPerson("Toshiki Shima", "haya14busa@gmail.com", "16861@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Totoro W", "tw19881113@gmail.com", "5975@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Travis Beatty", "travisby@gmail.com")
addPerson("Travis Bischel", "travis.bischel@gmail.com", "26898@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Travis Bischel", "travis.bischel@gmail.com", "@twmb")
addPerson("Travis Cline", "travis.cline@gmail.com", "@tmc")
addPerson("Trevor Prater", "trevor.prater@gmail.com")
addPerson("Trey Lawrence", "lawrence.trey@gmail.com", "@TreyLawrence")
addPerson("Tristan Colgate", "tcolgate@gmail.com", "@tcolgate")
addPerson("Tristan Ooohry", "ooohry@gmail.com", "@golantrevize")
addPerson("Tristan Rice", "rice@fn.lc")
addPerson("Troels Thomsen", "troels@thomsen.io", "@tt")
addPerson("Trung Nguyen", "trung.n.k@gmail.com")
addPerson("Tugdual Saunier", "tucksaun@gmail.com", "23797@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tugdual Saunier", "tugdual.saunier@gmail.com")
addPerson("Tuo Shan", "shantuo@google.com", "12855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tuo Shan", "shantuo@google.com", "@shantuo")
addPerson("Tuo Shan", "sturbo89@gmail.com")
addPerson("Tuo Shan", "sturbo89@gmail.com", "12857@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tw", "tw19881113@gmail.com", "@tw4452852")
addPerson("Tyler Bui-Palsulich", "tbp@google.com")
addPerson("Tyler Bui-Palsulich", "tpalsulich@google.com", "@tbpg")
addPerson("Tyler Bunnell", "tylerbunnell@gmail.com", "@tylerb")
addPerson("Tyler Compton", "xaviosx@gmail.com")
addPerson("Tyler Treat", "ttreat31@gmail.com")
addPerson("Tyler Treat", "tyler.treat@apcera.com")
addPerson("Tyler Yahn", "tyler.yahn@urbanairship.com")
addPerson("Tzu-Jung Lee", "roylee17@currant.com", "@roylee17")
addPerson("Ugorji Nwoke", "ugorji@gmail.com", "@ugorji")
addPerson("Ulderico Cirello", "uldericofilho@gmail.com", "7250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ulrich Kunitz", "uli.kunitz@gmail.com", "@ulikunitz")
addPerson("Umang Parmar", "umangjparmar@gmail.com", "@darkLord19")
addPerson("Uriel Mangado", "uriel@berlinblue.org", "@uriel")
addPerson("Urvil Patel", "patelurvil38@gmail.com", "@urvil38")
addPerson("Uttam C Pawar", "uttam.c.pawar@intel.com", "@uttampawar")
addPerson("Vadim Grek", "vadimprog@gmail.com", "@brainiac84")
addPerson("Val Polouchkine", "vpolouch@justin.tv")
addPerson("Valentin Vidic", "vvidic@valentin-vidic.from.hr", "@vvidic")
addPerson("Vanesa", "mail@vanesaortiz.com")
addPerson("Vega Garcia Luis Alfonso", "vegacom@gmail.com", "@vegacom")
addPerson("Venil Noronha", "veniln@vmware.com", "@venilnoronha")
addPerson("Veselkov Konstantin", "kostozyb@gmail.com", "@KosToZyB")
addPerson("Viacheslav Poturaev", "vearutop@gmail.com", "@vearutop")
addPerson("Vicki Niu", "vicki.niu@gmail.com")
addPerson("Victor Chudnovsky", "vchudnov@google.com")
addPerson("Victor Vrantchan", "vrancean+github@gmail.com", "@groob")
addPerson("Vignesh Ramachandra", "vickyramachandra@gmail.com")
addPerson("Vikas Kedia", "vikask@google.com")
addPerson("Vikram Jadhav", "vikramcse.10@gmail.com")
addPerson("Vince0000", "522341976@qq.com")
addPerson("Vincent Batts", "vbatts@hashbangbash.com", "@vbatts")
addPerson("Vincent Bernat", "vincent@bernat.ch")
addPerson("Vincent Demeester", "vinc.demeester@gmail.com")
addPerson("Vincent Vanackere", "vincent.vanackere@gmail.com", "@vanackere")
addPerson("Vincenzo Pupillo", "v.pupillo@gmail.com", "24134@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vinu Rajashekhar", "vinutheraj@gmail.com", "@vinuraja")
addPerson("Vishvananda Ishaya", "vishvananda@gmail.com", "@vishvananda")
addPerson("Vitor De Mario", "vitordemario@gmail.com", "@vdemario")
addPerson("Vitor De Mario", "vitor.demario@mendelics.com.br")
addPerson("Vivek Ayer", "vivek@restlessbandit.com")
addPerson("Vivek Sekhar", "vivek@viveksekhar.ca")
addPerson("Vlad Krasnov", "vlad@cloudflare.com", "7601@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vlad Krasnov", "vlad@cloudflare.com", "@vkrasnov")
addPerson("Vladimir Kovpak", "cn007b@gmail.com", "@cn007b")
addPerson("Vladimir Kuzmin", "vkuzmin@uber.com", "26409@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vladimir Kuzmin", "vkuzmin@uber.com", "@vkuzmin-uber")
addPerson("Vladimir Mezentsev", "vladimir.mezentsev@oracle.com")
addPerson("Vladimir Mihailenco", "vladimir.webdev@gmail.com", "@vmihailenco")
addPerson("Vladimir Nikishenko", "vova616@gmail.com", "@vova616")
addPerson("Vladimir Stefanovic", "vladimir.stefanovic@imgtec.com", "@vstefanovic")
addPerson("Vladimir Stefanovic", "vladimir.stefanovic@mips.com", "15150@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vladimir Varankin", "nek.narqo@gmail.com")
addPerson("Vladimir Varankin", "vladimir@varank.in", "@narqo")
addPerson("Volker Dobler", "dr.volker.dobler@gmail.com", "5050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Volker Dobler", "dr.volker.dobler@gmail.com", "@vdobler")
addPerson("Volodymyr Paprotski", "vpaprots@ca.ibm.com", "@vpaprots")
addPerson("W. Mark Kubacki", "wmark@hurrikane.de")
addPerson("W. Trevor King", "wking@tremily.us")
addPerson("Wade Simmons", "wade@wades.im", "@wadey")
addPerson("Waldemar Quevedo", "waldemar.quevedo@gmail.com")
addPerson("Walter Poupore", "wpoupore@google.com")
addPerson("Wander Lairson Costa", "wcosta@mozilla.com", "@walac")
addPerson("Warren Fernandes", "warren.f.fernandes@gmail.com")
addPerson("Warren Fernandes", "warren.f.fernandes@gmail.com", "@wfernandes")
addPerson("Warren Harper", "warrenjharper@gmail.com")
addPerson("Wayne Ashley Berry", "wayneashleyberry@gmail.com", "@wayneashleyberry")
addPerson("Wedson Almeida Filho", "wedsonaf@google.com", "12200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wedson Almeida Filho", "wedsonaf@google.com", "@wedsonaf")
addPerson("Weerasak Chongnguluam", "singpor@gmail.com")
addPerson("Weerasak Chongnguluam", "singpor@gmail.com", "@iporsut")
addPerson("Wei Fu", "fhfuwei@163.com")
addPerson("Wei Guangjing", "vcc.163@gmail.com", "@wgj-zz")
addPerson("Wei Xiao", "Wei.Xiao@arm.com", "16227@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wei Xiao", "wei.xiao@arm.com", "@williamweixiao")
addPerson("Weichao Tang", "tevic.tt@gmail.com")
addPerson("Weichao Tang", "tevic.tt@gmail.com", "@Tevic")
addPerson("Wembley G. Leach, Jr", "wembley.gl@gmail.com", "@wemgl")
addPerson("Wes Widner", "kai5263499@gmail.com")
addPerson("Wesley Hill", "hakobyte@gmail.com")
addPerson("WhisperRain", "2516435583@qq.com", "@WhisperRain")
addPerson("Wil Selwood", "wselwood@gmail.com")
addPerson("Wil Selwood", "wselwood@gmail.com", "@wselwood")
addPerson("Wilfried Teiken", "wteiken@google.com")
addPerson("Will Beason", "willbeason@gmail.com", "@willbeason")
addPerson("Will Bond", "will@wbond.net", "9815@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Will Chan", "willchan@google.com")
addPerson("Will Faught", "will.faught@gmail.com", "@willfaught")
addPerson("Will Madison", "wmadisonDev@GMail.com")
addPerson("Will Morrow", "wmorrow.qdt@qualcommdatacenter.com")
addPerson("Will Norris", "willnorris@google.com", "@willnorris")
addPerson("Will Storey", "will@summercat.com", "@horgh")
addPerson("Will", "willow.pine.2011@gmail.com")
addPerson("Willem van der Schyff", "willemvds@gmail.com", "@willemvds")
addPerson("William Chan", "willchan@chromium.org", "@willchan", "*goog")
addPerson("William Chang", "mr.williamchang@gmail.com", "27627@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("William Orr", "will@worrbase.com", "@worr")
addPerson("Wisdom Omuya", "deafgoat@gmail.com", "@deafgoat")
addPerson("Wèi Cōngruì", "crvv.mail@gmail.com", "22895@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wèi Cōngruì", "crvv.mail@gmail.com", "@crvv")
addPerson("XAX", "xaxiclouddev@gmail.com")
addPerson("Xargin", "cao1988228@163.com")
addPerson("Xi Ruoyao", "xry23333@gmail.com")
addPerson("Xia Bin", "snyh@snyh.org", "12161@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Xia Bin", "snyh@snyh.org", "@snyh")
addPerson("Xing Xing", "mikespook@gmail.com", "@mikespook")
addPerson("Xudong Zhang", "felixmelon@gmail.com")
addPerson("Xudong Zheng", "7pkvm5aw@slicealias.com", "@xudongzheng")
addPerson("Xuyang Kang", "xuyang@google.com")
addPerson("Xuyang Kang", "xuyangkang@gmail.com", "@xuyangkang")
addPerson("Yaacov Akiba Slama", "yaslama@gmail.com")
addPerson("Yamagishi Kazutoshi", "ykzts@desire.sh")
addPerson("Yann Hodique", "yhodique@google.com", "@sigma")
addPerson("Yann Kerhervé", "yann.kerherve@gmail.com", "@yannk")
addPerson("Yaron de Leeuw", "jarondl@google.com")
addPerson("Yasha Bubnov", "girokompass@gmail.com")
addPerson("Yasha Bubnov", "girokompass@gmail.com", "@ybubnov")
addPerson("Yasser Abdolmaleki", "yasser@yasser.ca", "@spring1843")
addPerson("Yasuharu Goto", "matope.ono@gmail.com", "8070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yasuharu Goto", "matope.ono@gmail.com", "@matope")
addPerson("Yasuhiro MATSUMOTO", "mattn.jp@gmail.com", "5025@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yasuhiro Matsumoto", "mattn.jp@gmail.com", "@mattn")
addPerson("Yazen2017", "yazen.shunnar@gmail.com", "@yazsh")
addPerson("Yestin", "ylh@pdx.edu", "@ylih")
addPerson("Yesudeep Mangalapilly", "yesudeep@google.com", "@gorakhargosh")
addPerson("Ying Zou", "xpzouying@gmail.com")
addPerson("Yissakhar Z. Beck", "yissakhar.beck@gmail.com", "@DeedleFake")
addPerson("Yogesh Desai", "er.yogeshdesai@gmail.com")
addPerson("Yongjian Xu", "i3dmaster@gmail.com", "@i3d")
addPerson("Yoon", "learder@gmail.com")
addPerson("Yoshi Yamaguchi", "ymotongpoo@gmail.com")
addPerson("Yoshiya Hinosawa", "stibium121@gmail.com")
addPerson("Yoshiyuki Kanno", "nekotaroh@gmail.com", "@mocchira")
addPerson("Yuki Yugui Sonoda", "yugui@google.com")
addPerson("Yury Smolsky", "yury@smolsky.by", "26536@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yury Smolsky", "yury@smolsky.by", "@ysmolsky")
addPerson("Yusuke Kagiwada", "block.rxckin.beats@gmail.com", "@Jxck")
addPerson("Yuusei Kuwana", "kuwana@kumama.org", "@kumama")
addPerson("Yuval Pavel Zholkover", "paulzhol@gmail.com", "5781@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yuval Pavel Zholkover", "paulzhol@gmail.com", "@paulzhol")
addPerson("Yuwei Ba", "xiaobayuwei@gmail.com")
addPerson("Yuya Kusakabe", "yuya.kusakabe@gmail.com")
addPerson("Yves Junqueira", "yves.junqueira@gmail.com", "@nictuku")
addPerson("ZZMarquis", "zhonglingjian3821@163.com", "@ZZMarquis")
addPerson("Zac Bergquist", "zbergquist99@gmail.com", "9250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zac Bergquist", "zbergquist99@gmail.com", "@zmb3")
addPerson("Zach Auclair", "zach101@gmail.com")
addPerson("Zach Bintliff", "zbintliff@gmail.com", "@zbintliff")
addPerson("Zach Gershman", "zachgersh@gmail.com", "6360@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zachary Madigan", "zachary.madigan@apollovideo.com")
addPerson("Zachary Amsden", "zach@thundertoken.com")
addPerson("Zachary Amsden", "zach@thundertoken.com", "@zamsden")
addPerson("Zachary Gershman", "zgershman@pivotal.io")
addPerson("Zachary Madigan", "zacharywmadigan@gmail.com", "25899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zachary Romero", "zacromero3@gmail.com")
addPerson("Zacharya", "zacharya19@gmail.com")
addPerson("Zaq? Wiedmann", "zaquestion@gmail.com")
addPerson("Zero King", "l2d4y3@gmail.com")
addPerson("Zev Goldstein", "zev.goldstein@gmail.com", "@zevdg")
addPerson("Zezhou Yu", "ray.zezhou@gmail.com")
addPerson("Zhang Qiang", "dotslash.lu@gmail.com")
addPerson("Zhang Wei", "zhangwei198900@gmail.com")
addPerson("Zheng Dayu", "davidzheng23@gmail.com", "@ceshihao")
addPerson("Zheng Xu", "zheng.xu@arm.com")
addPerson("Zheng Xu", "zheng.xu@arm.com", "@Zheng-Xu")
addPerson("Zheng Yang", "zhengyang4k@gmail.com")
addPerson("Zhengyu He", "hzy@google.com")
addPerson("ZhiFeng Hu", "hufeng1987@gmail.com")
addPerson("Zhongpeng Lin", "zplin@uber.com", "@linzhp")
addPerson("Zhongwei Yao", "zhongwei.yao@arm.com", "@zhongweiy")
addPerson("Zhou Peng", "p@ctriple.cn", "26955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zhou Peng", "p@ctriple.cn", "@ctriple")
addPerson("Zhuo Meng", "mengzhuo1203@gmail.com", "7530@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ziad Hatahet", "hatahet@gmail.com", "@hatahet")
addPerson("Zorion Arrizabalaga", "zorionk@gmail.com", "@zorion")
addPerson("a.lukinykh", "a.lukinykh@xsolla.com")
addPerson("abdul.mannan", "abdul.mannan@thirdbridge.com")
addPerson("acoshift", "acoshift@gmail.com")
addPerson("adrienpetel", "peteladrien@gmail.com", "@feliixx")
addPerson("aecdanjun", "aeciodantasjunior@gmail.com", "@aecdanjun")
addPerson("ajackura", "ajackura@localhost")
addPerson("ajnirp", "ajnirp@users.noreply.github.com", "@ajnirp")
addPerson("akushman", "zeusakm@gmail.com")
addPerson("alexpantyukhin", "apantykhin@gmail.com", "@alexpantyukhin")
addPerson("alkesh26", "alkesh26@gmail.com", "@alkesh26")
addPerson("alokc", "alokkr1090@gmail.com")
addPerson("alpha.wong", "alpha.wong@lalamove.com")
addPerson("amandhora", "aman.usa07@gmail.com")
addPerson("amirrezaask", "raskarpour@gmail.com")
addPerson("anatoly techtonik", "techtonik@gmail.com")
addPerson("andrew werner", "andrew@upthere.com")
addPerson("andrey mirtchovski", "mirtchovski@gmail.com", "@mirtchovski")
addPerson("andrius4669", "andrius4669@gmail.com", "@andrius4669")
addPerson("andy", "andyjgarfield@gmail.com")
addPerson("apoorvam", "app.apoorva@gmail.com")
addPerson("areski", "areski@gmail.com", "@areski")
addPerson("as", "as.utf8@gmail.com", "@as")
addPerson("asgaines", "andrew.s.gaines@gmail.com")
addPerson("avi", "hi@avi.im")
addPerson("aviau", "alexandre@alexandreviau.net")
addPerson("avsharapov", "analytics.kzn@gmail.com", "@avsharapov")
addPerson("awaw fumin", "awawfumin@gmail.com", "@fumin")
addPerson("ayanamist", "ayanamist@gmail.com", "@ayanamist")
addPerson("azat", "kaumov.a.r@gmail.com", "@akaumov")
addPerson("azretkenzhaliev", "azret.kenzhaliev@gmail.com")
addPerson("bbrodriges", "bender.rodriges@gmail.com")
addPerson("benjamin-rood", "bisr@icloud.com")
addPerson("berkant ipek", "41230766+0xbkt@users.noreply.github.com", "@0xbkt")
addPerson("bogem", "albertnigma@gmail.com", "@bogem")
addPerson("bontequero", "bontequero@gmail.com", "@bontequero")
addPerson("boreq", "boreq@sourcedrops.com")
addPerson("buddhamagnet", "buddhamagnet@gmail.com")
addPerson("c9s", "yoanlin93@gmail.com", "@c9s")
addPerson("calerogers", "cale.rogers.m@gmail.com")
addPerson("caosz", "cszznbb@gmail.com")
addPerson("catatsuy", "m.ddotx.f@gmail.com", "@catatsuy")
addPerson("cch123", "buaa.cch@gmail.com", "@cch123")
addPerson("chanxuehong", "chanxuehong@gmail.com", "@chanxuehong")
addPerson("christopher-henderson", "chris@chenderson.org", "@christopher-henderson")
addPerson("cia-rana", "kiwamura0314@gmail.com", "@cia-rana")
addPerson("closs", "the.cody.oss@gmail.com", "@codyoss")
addPerson("conorbroderick", "cjayjayb@gmail.com")
addPerson("cyacco", "cyacco@gmail.com")
addPerson("dalyk", "dalyk@google.com")
addPerson("danoscarmike", "danom@google.com")
addPerson("datianshi", "dsz0111@gmail.com", "@datianshi")
addPerson("dchenk", "dcherchenko@gmail.com", "@dchenk")
addPerson("dechen-sherpa", "Dechen.Sherpa@dal.ca")
addPerson("delioda", "delioda@consenteye.com")
addPerson("diana ortega", "dicaormu@gmail.com")
addPerson("diplozoon", "huyuumi.dev@gmail.com", "@JohnTitor")
addPerson("djherbis", "djherbis@gmail.com", "@djherbis")
addPerson("dsivalingam", "dayansivalingam@gmail.com")
addPerson("dupoxy", "dupoxy@users.noreply.github.com", "@dupoxy")
addPerson("elmar", "ktye78@gmail.com")
addPerson("elpinal", "6elpinal@gmail.com", "@elpinal")
addPerson("emersion", "contact@emersion.fr")
addPerson("epkann", "epkann@gmail.com")
addPerson("erdi", "erdi@google.com")
addPerson("eric fang", "eric.fang@arm.com", "24534@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("erifan01", "eric.fang@arm.com", "@erifan")
addPerson("esell", "eujon.sellers@gmail.com")
addPerson("esell", "eujon.sellers@gmail.com", "@esell")
addPerson("fannie zhang", "Fannie.Zhang@arm.com", "21345@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("fanzha02", "fannie.zhang@arm.com", "@zhangfannie")
addPerson("feilengcui008", "feilengcui008@gmail.com", "@feilengcui008")
addPerson("feng pengfei", "mountainfpf@gmail.com")
addPerson("fenwickelliott", "charles@fenwickelliott.io")
addPerson("ferhat elmas", "elmas.ferhat@gmail.com")
addPerson("filewalkwithme", "maiconscosta@gmail.com", "@filewalkwithme")
addPerson("gangachris", "ganga.chris@gmail.com")
addPerson("garrickevans", "garrick@google.com")
addPerson("gbbr", "ga@stripetree.co.uk", "@gbbr")
addPerson("glorieux", "lorieux.g@gmail.com", "@glorieux")
addPerson("gmarik", "gmarik@gmail.com", "@gmarik")
addPerson("go101", "tapir.liu@gmail.com", "@TapirLiu")
addPerson("guitarbum722", "johnkenneth.moore@gmail.com")
addPerson("gulyasm", "mgulyas86@gmail.com", "@gulyasm")
addPerson("guyfedwards", "guyfedwards@gmail.com")
addPerson("hagen1778", "hagen1778@gmail.com", "@hagen1778")
addPerson("halfcrazy", "hackzhuyan@gmail.com")
addPerson("halgrimur", "douga@google.com")
addPerson("hanyang.tay", "htay@wesleyan.edu")
addPerson("haormj", "haormj@gmail.com", "@haormj")
addPerson("harshit777", "harshit.g.0702@gmail.com")
addPerson("haya14busa", "haya14busa@gmail.com", "@haya14busa")
addPerson("haya14busa", "hayabusa1419@gmail.com", "@haya14busa")
addPerson("hearot", "gabriel@hearot.it", "@hearot")
addPerson("helloPiers", "google@hellopiers.pro")
addPerson("hellozee", "hellozee@disroot.org", "@hellozee")
addPerson("hengwu0", "41297446+hengwu0@users.noreply.github.com", "@hengwu0")
addPerson("hertzbach", "rhertzbach@gmail.com")
addPerson("hezhenwei", "3711971@qq.com")
addPerson("hsinhoyeh", "yhh92u@gmail.com")
addPerson("huangyonglin", "1249107551@qq.com")
addPerson("ia", "isaac.ardis@gmail.com")
addPerson("iamqizhao", "toqizhao@gmail.com")
addPerson("ianzapolsky", "ianzapolsky@gmail.com", "@ianzapolsky")
addPerson("irfan sharif", "irfanmahmoudsharif@gmail.com")
addPerson("ivan parra", "ivantrips1@gmail.com")
addPerson("jaredculp", "jculp14@gmail.com", "@jaredculp")
addPerson("jerome-laforge", "jerome.laforge@gmail.Com")
addPerson("jimmy frasche", "soapboxcicero@gmail.com", "13220@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("jimmyfrasche", "soapboxcicero@gmail.com", "@jimmyfrasche")
addPerson("jirawat001", "paoji@icloud.com")
addPerson("joshua stein", "jcs@jcs.org")
addPerson("kanapuliAthavan", "athavankanapuli@gmail.com")
addPerson("kargakis", "mkargaki@redhat.com", "@kargakis")
addPerson("khr", "khr@khr-glaptop.roam.corp.google.com")
addPerson("kim yongbin", "kybinz@gmail.com", "@kybin")
addPerson("kirinrastogi", "kirin.rastogi@shopify.com")
addPerson("kirk", "kirk91.han@gmail.com", "@kirk91")
addPerson("knqyf263", "knqyf263@gmail.com")
addPerson("komuW", "komuw05@gmail.com", "@komuw")
addPerson("komuw", "komuw05@gmail.com")
addPerson("konstantin8105", "konstantin8105@gmail.com", "@Konstantin8105")
addPerson("kortschak", "dan.kortschak@adelaide.edu.au", "@kortschak")
addPerson("kujenga", "ataylor0123@gmail.com")
addPerson("lcd1232", "8745863+lcd1232@users.noreply.github.com")
addPerson("leigh schrandt", "leigh@null.net")
addPerson("linatiantamade", "linqiyo@gmail.com")
addPerson("lotus.wu", "lotus.wu@outlook.com")
addPerson("lsytj0413", "511121939@qq.com")
addPerson("ltnwgl", "ltnwgl@gmail.com")
addPerson("ltnwgl", "ltnwgl@gmail.com", "@gengliangwang")
addPerson("lucor", "lu.corbo@gmail.com")
addPerson("ludweeg", "mursalimovemeel@gmail.com", "@ludweeg")
addPerson("lukechampine", "luke.champine@gmail.com")
addPerson("lukechampine", "luke.champine@gmail.com", "@lukechampine")
addPerson("maiyang", "yangwen.yw@gmail.com")
addPerson("majiang", "ma.jiang@zte.com.cn", "@zte-majiang")
addPerson("mapeiqi", "mapeiqi2017@gmail.com")
addPerson("marwan-at-work", "marwan.sameer@gmail.com", "@marwan-at-work")
addPerson("matematik7", "domen@ipavec.net")
addPerson("mattyw", "gh@mattyw.net", "@mattyw")
addPerson("mdp", "m@mdp.im")
addPerson("meir fischer", "meirfischer@gmail.com", "8955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("mewmew", "rnd0x00@gmail.com", "@mewmew")
addPerson("mihasya", "m@mihasya.com", "@mihasya")
addPerson("mike andrews", "mra@xoba.com", "@xoba")
addPerson("milad arabi", "milad.arabi@gmail.com")
addPerson("mingrammer", "mingrammer@gmail.com", "@mingrammer")
addPerson("mischief", "mischief@offblast.org", "@mischief")
addPerson("mmaldo329", "michael_maldonado@comcast.com")
addPerson("molivier", "olivier.matthieu@gmail.com", "@molivier")
addPerson("monkeybutter", "pablo.larraondo@anu.edu.au")
addPerson("moznion", "moznion@gmail.com")
addPerson("mpl", "mathieu.lonjaret@gmail.com", "@mpl")
addPerson("mstrong", "mstrong1341@gmail.com", "@xmattstrongx")
addPerson("musgravejw", "musgravejw@gmail.com")
addPerson("nicerobot", "golang@nicerobot.org")
addPerson("nick.grange", "nicolas.grange@retrievercommunications.com")
addPerson("nkhumphreys", "nkhumphreys@gmail.com")
addPerson("nobonobo", "irieda@gmail.com", "@nobonobo")
addPerson("nogoegst", "nogoegst@users.noreply.github.com", "@nogoegst")
addPerson("nwidger", "niels.widger@gmail.com", "@nwidger")
addPerson("oiooj", "nototon@gmail.com")
addPerson("omarvides", "omarvides@gmail.com")
addPerson("pallat", "yod.pallat@gmail.com")
addPerson("pamelin", "amelin.paul@gmail.com")
addPerson("pankona", "yosuke.akatsuka@gmail.com")
addPerson("pavel-paulau", "pavel.paulau@gmail.com", "@pavel-paulau")
addPerson("pbberlin", "peter.buchmann@web.de")
addPerson("peter zhang", "i@ddatsh.com")
addPerson("phayes", "patrick.d.hayes@gmail.com")
addPerson("philhofer", "phofer@umich.edu", "@philhofer")
addPerson("pityonline", "pityonline@gmail.com", "@pityonline")
addPerson("prateekgogia", "prateekgogia42@gmail.com")
addPerson("pvoicu", "pvoicu@paypal.com", "@pvoicu")
addPerson("pytimer", "lixin20101023@gmail.com")
addPerson("qeed", "qeed.quan@gmail.com", "@qeedquan")
addPerson("ragavendra", "ragavendra.bn@gmail.com")
addPerson("rajender", "rajenderreddykompally@gmail.com", "@rajender")
addPerson("rajnikant", "rajnikant12345@gmail.com")
addPerson("rhysd", "lin90162@yahoo.co.jp")
addPerson("robnorman", "rob.norman@infinitycloud.com", "@robnorman")
addPerson("roger peppe", "rogpeppe@gmail.com", "6010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("romanyx", "romanyx90@yandex.ru", "@romanyx")
addPerson("ron minnich", "rminnich@gmail.com", "12935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("rubyist", "scott.barron@github.com", "@rubyist")
addPerson("rust", "pruest@gmail.com", "@gazed")
addPerson("rwaweber", "rwaweber@gmail.com")
addPerson("saberuster", "saberuster@gmail.com")
addPerson("sagarkrkv", "sagarkrkv@gmail.com")
addPerson("sam boyer", "tech@samboyer.org", "@sdboyer")
addPerson("sandyskies", "chenmingjie0828@163.com")
addPerson("sasha-s", "sasha@scaledinference.com")
addPerson("sayden", "mariocaster@gmail.com")
addPerson("sbramin", "s@sbramin.com")
addPerson("sdheisenberg", "nicholasleli@gmail.com", "@smugcloud")
addPerson("sergey", "sngasuan@gmail.com", "@Asuan")
addPerson("sergey.arseev", "sergey.arseev@intel.com", "@sergeyarseev")
addPerson("sergey.dobrodey", "sergey.dobrodey@synesis.ru")
addPerson("sevki", "s@sevki.org", "@sevki")
addPerson("shaharko", "skohanim@gmail.com", "@skohanim")
addPerson("shawnps", "shawnpsmith@gmail.com", "@shawnps")
addPerson("shinofara", "shinofara@gmail.com")
addPerson("shogo-ma", "choroma194@gmail.com", "@shogo-ma")
addPerson("shwsun", "jethro.sun7@gmail.com")
addPerson("slene", "vslene@gmail.com")
addPerson("softctrl", "carlostimoshenkorodrigueslopes@gmail.com")
addPerson("soluchok", "isoluchok@gmail.com", "@soluchok")
addPerson("spring1843", "yasser@yasser.ca")
addPerson("stephane benoit", "stefb965@gmail.com")
addPerson("stxmendez", "stxmendez@gmail.com")
addPerson("sukrithanda", "sukrit.handa@utoronto.ca")
addPerson("tal@whatexit.org", "tal@whatexit.org")
addPerson("taylorza", "taylorza@gmail.com")
addPerson("tbunyk", "tbunyk@gmail.com", "@bunyk")
addPerson("teague", "tnc1443@gmail.com", "@teaguecole")
addPerson("telecoda", "robbaines@gmail.com")
addPerson("templexxx", "lucas1x1x@gmail.com", "@templexxx")
addPerson("tengufromsky", "nick27surgut@gmail.com", "@tengufromsky")
addPerson("theairkit", "theairkit@gmail.com")
addPerson("themester", "Garriga975@gmail.com")
addPerson("themester", "dgrripoll@gmail.com")
addPerson("themihai", "mihai@epek.com")
addPerson("thoeni", "thoeni@gmail.com")
addPerson("thoeni", "thoeni@gmail.com", "@thoeni")
addPerson("thor wolpert", "thor@wolpert.ca")
addPerson("tkivisik", "taavi.kivisik@gmail.com", "@tkivisik")
addPerson("tliu", "terry.liu.y@gmail.com")
addPerson("tnt", "alkaloid.btx@gmail.com", "@trtstm")
addPerson("tom", "tommiemeyer290@gmail.com")
addPerson("tro3", "trey.roessig@gmail.com", "@tro3")
addPerson("ttacon", "ttacon@gmail.com", "@ttacon")
addPerson("ttyh061", "ttyh061@gmail.com")
addPerson("tuxpy", "q8886888@qq.com")
addPerson("unknown", "daria.kolistratova@intel.com", "@DarKol13")
addPerson("unknown", "geon0250@gmail.com", "@KimMachineGun")
addPerson("unknown", "nonamezeil@gmail.com", "@zeil")
addPerson("uropek", "uropek@gmail.com", "@uropek")
addPerson("vabr-g", "vabr@google.com")
addPerson("viswesr", "r.visweswara@gmail.com")
addPerson("voutasaurus", "voutasaurus@gmail.com", "@voutasaurus")
addPerson("vvakame", "vvakame+dev@gmail.com")
addPerson("wbond", "will@wbond.net")
addPerson("weeellz", "weeellz12@gmail.com", "@weeellz")
addPerson("wheelcomplex yin", "wheelcomplex@gmail.com")
addPerson("woodsaj", "awoods@raintank.io", "@woodsaj")
addPerson("wozz", "wozz@users.noreply.github.com")
addPerson("wrfly", "mr.wrfly@gmail.com")
addPerson("wu-heng", "41297446+wu-heng@users.noreply.github.com")
addPerson("wuyunzhou", "yunzhouwu@gmail.com", "@wuyunzhou")
addPerson("wzshiming", "wzshiming@foxmail.com")
addPerson("xiezhenye", "xiezhenye@gmail.com")
addPerson("xufei_Alex", "badgangkiller@gmail.com", "18915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("xufei_Alex", "badgangkiller@gmail.com", "@knightXun")
addPerson("yansal", "yannsalaun1@gmail.com", "@yansal")
addPerson("yanyiwu", "wuyanyi09@gmail.com")
addPerson("yazver", "ya.zver@gmail.com")
addPerson("yo-tak", "yo.tak0812@gmail.com", "@yo-tak")
addPerson("yuuji.yaginuma", "yuuji.yaginuma@gmail.com", "@y-yagi")
addPerson("zachgersh", "zachgersh@gmail.com")
addPerson("zaq1tomo", "zaq1tomo@gmail.com", "@zaq1tomo")
addPerson("zhongtao.chen", "chenzhongtao@126.com", "@chenzhongtao")
addPerson("zhoujun", "dev.zhoujun@gmail.com")
addPerson("Özgür Kesim", "oec-go@kesim.org")
addPerson("Максим Федосеев", "max.faceless.frei@gmail.com", "@codesenberg")
addPerson("Фахриддин Балтаев", "faxriddinjon@gmail.com", "@faxriddin")
addPerson("Юрий Соколов", "funny.falcon@gmail.com", "7215@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("一痕 刘", "liuyihen@gmail.com")
addPerson("张嵩", "zs349596@gmail.com", "@zs1379")
addPerson("沈涛", "shentaoskyking@gmail.com", "@smileusd")
addPerson("祥曦 徐", "lucas1x1x@gmail.com", "28434@62eb7196-b449-3ce5-99f1-c037f21e1705")
}
// GithubOfGomoteUser returns the GitHub username for the provided gomote user.
func GithubOfGomoteUser(gomoteUser string) (githubUser string) {
switch gomoteUser {
case "austin":
return "aclements"
case "cbro":
return "broady"
case "cherryyz":
return "cherrymui"
case "cmang":
return "paranoiacblack"
case "drchase":
return "dr2chase"
case "gri":
return "griesemer"
case "hakim":
return "hyangah"
case "herbie":
return "cybrcodr"
case "iant":
return "ianlancetaylor"
case "jbd":
return "rakyll"
case "joetsai":
return "dsnet"
case "jrjohnson":
return "johnsonj"
case "khr":
return "randall77"
case "lazard":
return "davidlazar"
case "pjw":
return "pjweinbgo"
case "r":
return "robpike"
case "rstambler":
return "stamblerre"
case "sameer":
return "Sajmani"
case "shadams":
return "adams-sarah"
case "spf":
return "spf13"
case "valsorda":
return "FiloSottile"
}
return gomoteUser
}
internal/gophers: add Carlos Amedee
Add my @golang email, GitHub account and Gerrit account.
Change-Id: I62b9abe3b312db1f534a327ec51e07c9bf275425
Reviewed-on: https://go-review.googlesource.com/c/build/+/205217
Reviewed-by: Dmitri Shuralyov <d181b7fea0ec87c86ba5a890ab716db52498e3ba@golang.org>
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
Run-TryBot: Carlos Amedee <ab5e2bca84933118bbc9d48ffaccce3bac4eeb64@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gophers is a list of names, emails, and Github usernames of people
// from the Go git repos and issue trackers.
package gophers
import (
"strings"
"golang.org/x/build/gerrit"
)
// Person represents a person.
type Person struct {
Name string // "Foo Bar"
Github string // "FooBar" (orig case, no '@')
Gerrit string // "foo@bar.com" (lowercase)
Emails []string // all lower
Googler bool // whether person is (or was) a Googler; determined via heuristics
Bot bool // whether it's a known bot (GopherBot, Gerrit Bot)
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
func (p *Person) mergeIDs(ids ...string) {
for _, id := range ids {
switch {
case strings.HasPrefix(id, "@"):
p.Github = id[1:]
idToPerson[strings.ToLower(id)] = p
case strings.Contains(id, "@"):
email := strings.ToLower(id)
if !strSliceContains(p.Emails, email) {
p.Emails = append(p.Emails, email)
}
idToPerson[email] = p
if strings.HasSuffix(email, "@golang.org") || strings.HasSuffix(email, "@google.com") {
p.Googler = true
}
// The first email seen is considered the person's Gerrit email.
if len(p.Emails) == 1 {
p.Gerrit = email
}
case id == "*goog":
p.Googler = true
case id == "*bot":
p.Bot = true
default:
p.Name = id
idToPerson[strings.ToLower(id)] = p
}
}
}
// keys are "@lowercasegithub", "lowercase name", "lowercase@email.com".
var idToPerson = map[string]*Person{}
// GetPerson looks up a person by id and returns one if found,
// or nil otherwise.
//
// The id is case insensitive, and may be one of:
//
// • full name (for example, "Dmitri Shuralyov")
//
// • GitHub username (for example, "@dmitshur"), leading '@' is mandatory
//
// • Gerrit <account ID>@<instance ID> (for example, "6005@62eb7196-b449-3ce5-99f1-c037f21e1705")
//
// • email (for example, "dmitshur@golang.org")
//
// Only exact matches are supported.
//
func GetPerson(id string) *Person {
return idToPerson[strings.ToLower(id)]
}
// GetGerritPerson looks up a person from the Gerrit account ai.
// It uses the name and email in the Gerrit account for the lookup.
func GetGerritPerson(ai gerrit.AccountInfo) *Person {
if p := GetPerson(ai.Name); p != nil {
return p
}
if p := GetPerson(ai.Email); p != nil {
return p
}
return nil
}
func addPerson(ids ...string) *Person {
var p *Person
for _, id := range ids {
p = GetPerson(id)
if p != nil {
break
}
}
if p == nil {
p = &Person{}
}
p.mergeIDs(ids...)
return p
}
func init() {
// Not people, but hereby granted personhood:
addPerson("Gopherbot", "gobot@golang.org", "@gopherbot", "5976@62eb7196-b449-3ce5-99f1-c037f21e1705", "*bot")
addPerson("Gerrit Bot", "letsusegerrit@gmail.com", "12446@62eb7196-b449-3ce5-99f1-c037f21e1705", "*bot")
addPerson("212472270", "ggp493@gmail.com", "@ggriffiths")
addPerson("9.nashi", "9.nashi@gmail.com", "@80nashi")
addPerson("AJ Yoo", "ajarusan@arista.com")
addPerson("Aamir Khan", "syst3m.w0rm@gmail.com", "7715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aamir Khan", "syst3m.w0rm@gmail.com", "@syst3mw0rm")
addPerson("Aaron Cannon", "cannona@fireantproductions.com", "@cannona")
addPerson("Aaron Clawson", "Aaron.Clawson@gmail.com")
addPerson("Aaron France", "aaron.l.france@gmail.com", "@AeroNotix")
addPerson("Aaron Jacobs", "jacobsa@google.com", "6475@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aaron Jacobs", "jacobsa@google.com", "@jacobsa")
addPerson("Aaron Kemp", "kemp.aaron@gmail.com", "@writeonlymemory")
addPerson("Aaron Kemp", "kemp@google.com")
addPerson("Aaron Torres", "tcboox@gmail.com", "6165@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aaron Torres", "tcboox@gmail.com", "@agtorre")
addPerson("Aaron Zinman", "aaron@azinman.com", "@azinman")
addPerson("Aarti Parikh", "aarti.parikh@gmail.com", "@aarti")
addPerson("Aashish Karki", "0133asis@gmail.com")
addPerson("Abe Haskins", "abeisgreat@abeisgreat.com")
addPerson("Abhijit Pai", "abhijitpai05@gmail.com")
addPerson("Abhinav Gupta", "abhinav.g90@gmail.com", "@abhinav")
addPerson("Adam Azarchs", "adam.azarchs@10xgenomics.com", "@adam-azarchs")
addPerson("Adam Bender", "abender@google.com", "@bitlux")
addPerson("Adam Eijdenberg", "adam@continusec.com")
addPerson("Adam Harvey", "aharvey@php.net")
addPerson("Adam Jones", "adam@modsrus.com")
addPerson("Adam Kisala", "adam.kisala@gmail.com", "@adamkisala")
addPerson("Adam Langley", "agl@golang.org", "5425@62eb7196-b449-3ce5-99f1-c037f21e1705", "@agl", "agl@google.com", "7285@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Medzinski", "adam.medzinski@gmail.com", "@medzin")
addPerson("Adam Ostor", "adam.ostor@gmail.com")
addPerson("Adam Ryman", "adamryman@gmail.com")
addPerson("Adam Shannon", "adamkshannon@gmail.com", "26193@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Shannon", "adamkshannon@gmail.com", "@adamdecaf")
addPerson("Adam Sindelar", "adamsh@google.com", "27224@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adam Thomason", "athomason@gmail.com")
addPerson("Adam Wolfe Gordon", "awg@xvx.ca")
addPerson("Adam Woodbeck", "adam@woodbeck.net", "@awoodbeck")
addPerson("Adam Yi", "i@adamyi.com")
addPerson("Adin Scannell", "ascannell@google.com")
addPerson("Aditya Mukerjee", "dev@chimeracoder.net", "@ChimeraCoder")
addPerson("Aditya Rastogi", "adirastogi@google.com")
addPerson("Adrian Hesketh", "adrianhesketh@hushmail.com", "24533@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Adrian Hesketh", "adrianhesketh@hushmail.com", "@a-h")
addPerson("Adrian O'Grady", "elpollouk@gmail.com", "@elpollouk")
addPerson("Aeneas Rekkas (arekkas)", "aeneas@ory.am")
addPerson("Afanasev Stanislav", "phpprogger@gmail.com")
addPerson("Agis Anastasopoulos", "agis.anast@gmail.com", "@agis")
addPerson("Agniva De Sarker", "agniva.quicksilver@gmail.com", "24096@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Agniva De Sarker", "agniva.quicksilver@gmail.com", "@agnivade")
addPerson("Agniva De Sarker", "agnivade@yahoo.co.in", "@agnivade")
addPerson("Ahmed W.", "oneofone@gmail.com", "5255@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ahmet Alp Balkan", "ahmetb@google.com", "@ahmetb")
addPerson("Ahmet Soormally", "ahmet@mangomm.co.uk", "@asoorm")
addPerson("Ahmy Yulrizka", "yulrizka@gmail.com", "@yulrizka")
addPerson("Aiden Scandella", "ai@uber.com", "@sectioneight")
addPerson("Aiden Scandella", "sc@ndella.com")
addPerson("Ainar Garipov", "gugl.zadolbal@gmail.com", "@ainar-g")
addPerson("Aishraj", "aishraj@users.noreply.github.com", "@aishraj")
addPerson("Akhil Indurti", "aindurti@gmail.com", "contact@akhilindurti.com", "@smasher164", "17921@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Akihiko Odaki", "akihiko.odaki.4i@stu.hosei.ac.jp")
addPerson("Akihiro Suda", "suda.kyoto@gmail.com", "13030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Akihiro Suda", "suda.kyoto@gmail.com", "@AkihiroSuda")
addPerson("Alan Bradley", "alan@gangleri.net")
addPerson("Alan Braithwaite", "alan@ipaddr.org", "@abraithwaite")
addPerson("Alan Donovan", "adonovan@google.com", "5195@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alan Donovan", "adonovan@google.com", "@alandonovan") // work profile
addPerson("Alan Donovan", "alan@alandonovan.net", "@adonovan") // personal profile
addPerson("Alan Gardner", "alanctgardner@gmail.com")
addPerson("Alan Shreve", "alan@inconshreveable.com", "@inconshreveable")
addPerson("Albert Nigmatzianov", "albertnigma@gmail.com", "15270@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Albert Smith", "albert@horde.today")
addPerson("Albert Strasheim", "fullung@gmail.com", "@alberts")
addPerson("Albert Yu", "yukinying@gmail.com", "@yukinying")
addPerson("Alberto Bertogli", "albertito@blitiri.com.ar", "10985@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alberto Bertogli", "albertito@blitiri.com.ar", "@albertito")
addPerson("Alberto Donizetti", "alb.donizetti@gmail.com", "5385@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alberto Donizetti", "alb.donizetti@gmail.com", "@ALTree")
addPerson("Alberto García Hierro", "alberto@garciahierro.com", "@fiam")
addPerson("Aleksandar Dezelin", "dezelin@gmail.com", "@dezelin")
addPerson("Aleksandr Demakin", "alexander.demakin@gmail.com", "8245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aleksandr Demakin", "alexander.demakin@gmail.com", "@avdva")
addPerson("Aleksandr Razumov", "ar@cydev.ru")
addPerson("Aleksandr Razumov", "ar@cydev.ru", "@ernado")
addPerson("Alekseev Artem", "a.artem060@gmail.com", "@fexolm")
addPerson("Alessandro Arzilli", "alessandro.arzilli@gmail.com", "5821@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alessandro Arzilli", "alessandro.arzilli@gmail.com", "@aarzilli")
addPerson("Alessandro Baffa", "alessandro.baffa@gmail.com", "@alebaffa")
addPerson("Alex A Skinner", "alex@lx.lc")
addPerson("Alex Brainman", "alex.brainman@gmail.com", "5070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Brainman", "alex.brainman@gmail.com", "@alexbrainman")
addPerson("Alex Bramley", "a.bramley@gmail.com", "@fluffle")
addPerson("Alex Browne", "stephenalexbrowne@gmail.com", "@albrow")
addPerson("Alex Carol", "alex.carol.c@gmail.com", "@alexcarol")
addPerson("Alex Crawford", "alex@acrawford.com")
addPerson("Alex Flint", "alex.flint@gmail.com")
addPerson("Alex Jin", "toalexjin@gmail.com", "@toalexjin")
addPerson("Alex Kohler", "alexjohnkohler@gmail.com", "@alexkohler")
addPerson("Alex Myasoedov", "msoedov@gmail.com", "@msoedov")
addPerson("Alex Plugaru", "alex@plugaru.org", "@xarg")
addPerson("Alex Schroeder", "alex@gnu.org", "@kensanata")
addPerson("Alex Sergeyev", "abc@alexsergeyev.com", "@asergeyev")
addPerson("Alex Seubert", "alexseubert@gmail.com")
addPerson("Alex Skinner", "alex@lx.lc", "6090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Stoddard", "alex.stoddard@comcast.net")
addPerson("Alex Tokarev", "aleksator@gmail.com", "@aleksator")
addPerson("Alex Vaghin", "ddos@google.com", "alex@cloudware.io", "@x1ddos")
addPerson("Alex Vaghin", "crhyme@google.com", "6347@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Vaghin", "alex@cloudware.io", "8870@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alex Yu", "yu.alex96@gmail.com")
addPerson("AlexRudd", "rudd.alex1@gmail.com")
addPerson("Alexander A. Klimov", "alexander.klimov@netways.de")
addPerson("Alexander Ben Nasrallah", "me@abn.sh")
addPerson("Alexander Döring", "email@alexd.ch", "15115@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Döring", "email@alexd.ch", "@alexd765")
addPerson("Alexander F Rødseth", "alexander.rodseth@appeartv.com", "@xyproto")
addPerson("Alexander F Rødseth", "rodseth@gmail.com")
addPerson("Alexander Guz", "kalimatas@gmail.com", "@kalimatas")
addPerson("Alexander Kauer", "alexander@affine.space", "@kaueraal")
addPerson("Alexander Kucherenko", "alxkchr@gmail.com")
addPerson("Alexander Kuleshov", "kuleshovmail@gmail.com")
addPerson("Alexander Larsson", "alexander.larsson@gmail.com", "@alexlarsson")
addPerson("Alexander Menzhinsky", "amenzhinsky@gmail.com", "16045@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Menzhinsky", "amenzhinsky@gmail.com", "@amenzhinsky")
addPerson("Alexander Milyutin", "alexander.milyutin@lazada.com")
addPerson("Alexander Morozov", "lk4d4math@gmail.com", "8340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Morozov", "lk4d4math@gmail.com", "@LK4D4")
addPerson("Alexander Neumann", "alexander@bumpern.de", "@fd0")
addPerson("Alexander Orlov", "alexander.orlov@loxal.net", "@loxal")
addPerson("Alexander Polcyn", "apolcyn@google.com", "16623@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexander Polcyn", "apolcyn@google.com", "@apolcyn")
addPerson("Alexander Reece", "awreece@gmail.com", "@awreece")
addPerson("Alexander Shopov", "ash@kambanaria.org", "@alshopov")
addPerson("Alexander Zhavnerchik", "alex.vizor@gmail.com", "@alxzh")
addPerson("Alexander Zolotov", "goldifit@gmail.com", "@zolotov")
addPerson("Alexandre Cesaro", "alexandre.cesaro@gmail.com", "5647@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandre Cesaro", "alexandre.cesaro@gmail.com", "@alexcesaro")
addPerson("Alexandre Fiori", "fiorix@gmail.com", "@fiorix")
addPerson("Alexandre Maari", "draeron@gmail.com", "@draeron")
addPerson("Alexandre Normand", "alexandre.normand@gmail.com", "@alexandre-normand")
addPerson("Alexandre Parenteau", "aubonbeurre@gmail.com")
addPerson("Alexandre Viau", "viau.alexandre@gmail.com", "27580@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "alexandru@mosoi.ro")
addPerson("Alexandru Moșoi", "alexandru@mosoi.ro", "6173@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "brtzsnr@gmail.com", "5930@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexandru Moșoi", "brtzsnr@gmail.com", "@brtzsnr")
addPerson("Alexandru Moșoi", "mosoi@google.com")
addPerson("Alexei Sholik", "alcosholik@gmail.com", "@alco")
addPerson("Alexey Alexandrov", "aalexand@google.com", "@aalexand")
addPerson("Alexey Borzenkov", "snaury@gmail.com", "@snaury")
addPerson("Alexey Naidonov", "alexey.naidyonov@gmail.com")
addPerson("Alexey Naidonov", "alexey.naidyonov@gmail.com", "@growler")
addPerson("Alexey Neganov", "neganovalexey@gmail.com", "@neganovalexey")
addPerson("Alexey Nezhdanov", "snakeru@gmail.com")
addPerson("Alexey Nezhdanov", "snakeru@gmail.com", "9000@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexey Palazhchenko", "alexey.palazhchenko@gmail.com", "13090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Alexey Palazhchenko", "alexey.palazhchenko@gmail.com", "@AlekSi")
addPerson("Alexey Vilenskiy", "vilenskialeksei@gmail.com")
addPerson("Alexis Hildebrandt", "surryhill@gmail.com")
addPerson("Alexis Horgix Chotard", "alexis.horgix.chotard@gmail.com")
addPerson("Alexis Hunt", "lexer@google.com")
addPerson("Alexis Imperial-Legrand", "ail@google.com", "@ailg")
addPerson("Ali Rizvi-Santiago", "arizvisa@gmail.com", "@arizvisa")
addPerson("Aliaksandr Valialkin", "valyala@gmail.com", "9525@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aliaksandr Valialkin", "valyala@gmail.com", "@valyala")
addPerson("Alif Rachmawadi", "subosito@gmail.com", "@subosito")
addPerson("Alistair Barrell", "alistair.t.barrell@gmail.com")
addPerson("Allan Simon", "allan.simon@supinfo.com", "@allan-simon")
addPerson("Alok Menghrajani", "alok.menghrajani@gmail.com", "@alokmenghrajani")
addPerson("Aman Gupta", "aman@tmm1.net", "20002@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aman Gupta", "aman@tmm1.net", "@tmm1")
addPerson("Amanuel Bogale", "abogale2@gmail.com")
addPerson("Amir Mohammad Saied", "amir@gluegadget.com", "@amir")
addPerson("Amit Ghadge", "amitg.b14@gmail.com")
addPerson("Ammar Bandukwala", "ammar@ammar.io")
addPerson("Amr A.Mohammed", "merodiro@gmail.com")
addPerson("Amrut Joshi", "amrut.joshi@gmail.com", "@rumple")
addPerson("Amy Schlesener", "amyschlesener@gmail.com")
addPerson("Anand K. Mistry", "anand@mistry.ninja")
addPerson("Anders Pearson", "anders@columbia.edu", "@thraxil")
addPerson("Andrea Nodari", "andrea.nodari91@gmail.com")
addPerson("Andrea Nodari", "andrea.nodari91@gmail.com", "@nodo")
addPerson("Andrea Spadaccini", "spadaccio@google.com", "@lupino3")
addPerson("Andreas Auernhamer", "andreas_golang@mail.de")
addPerson("Andreas Auernhammer", "aead@mail.de", "14805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andreas Auernhammer", "aead@mail.de", "@aead")
addPerson("Andreas Auernhammer", "enceve@mail.de")
addPerson("Andreas Jellinghaus", "andreas@ionisiert.de", "@tolonuga")
addPerson("Andreas Litt", "andreas.litt@gmail.com")
addPerson("Andrei Gherzan", "andrei@resin.io")
addPerson("Andrei Korzhevskii", "a.korzhevskiy@gmail.com", "@nordligulv")
addPerson("Andrei Tudor Călin", "mail@acln.ro", "27279@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrei Tudor Călin", "mail@acln.ro", "@acln0")
addPerson("Andres Erbsen", "andres.erbsen@gmail.com")
addPerson("Andrew Austin", "andrewaclt@gmail.com", "@andrewaustin")
addPerson("Andrew Benton", "andrewmbenton@gmail.com", "@andrewmbenton")
addPerson("Andrew Bonventre", "andybons@golang.org", "andybons@gmail.com", "@andybons", "365204+andybons@users.noreply.github.com", "22285@62eb7196-b449-3ce5-99f1-c037f21e1705", "andybons@google.com", "10660@62eb7196-b449-3ce5-99f1-c037f21e1705", "hello@obvy.co")
addPerson("Andrew Brampton", "bramp@google.com")
addPerson("Andrew Braunstein", "awbraunstein@gmail.com", "@awbraunstein")
addPerson("Andrew Ekstedt", "andrew.ekstedt@gmail.com", "6255@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Ekstedt", "andrew.ekstedt@gmail.com", "@magical")
addPerson("Andrew Etter", "andrew.etter@gmail.com", "@andrewetter")
addPerson("Andrew Gerrand", "adg@golang.org", "5010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Gerrand", "adg@golang.org", "@adg")
addPerson("Andrew Gerrand", "nf@wh3rd.net", "@nf")
addPerson("Andrew Harding", "andrew@spacemonkey.com", "@azdagron")
addPerson("Andrew M Bursavich", "abursavich@gmail.com", "@abursavich")
addPerson("Andrew Patzer", "andrew.patzer@gmail.com")
addPerson("Andrew Pilloud", "andrewpilloud@igneoussystems.com", "@apilloud")
addPerson("Andrew Pogrebnoy", "absourd.noise@gmail.com", "@dAdAbird")
addPerson("Andrew Poydence", "apoydence@pivotal.io", "@poy")
addPerson("Andrew Pritchard", "awpritchard@gmail.com", "@awpr")
addPerson("Andrew Radev", "andrey.radev@gmail.com", "@AndrewRadev")
addPerson("Andrew Skiba", "skibaa@gmail.com", "@skibaa")
addPerson("Andrew Szeto", "andrew@jabagawee.com", "@jabagawee")
addPerson("Andrew Wilkins", "axwalk@gmail.com", "8640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrew Wilkins", "axwalk@gmail.com", "@axw")
addPerson("Andrew Williams", "williams.andrew@gmail.com", "@williamsandrew")
addPerson("Andrey Petrov", "andrey.petrov@shazow.net", "@shazow")
addPerson("Andrii Soldatenko", "andrii.soldatenko@gmail.com", "@andriisoldatenko")
addPerson("Andrii Soluk", "isoluchok@gmail.com", "24501@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andrii Zakharov", "andrii@messagebird.com")
addPerson("Andris Valums", "avalums.spam@linelane.com")
addPerson("Andriy Lytvynov", "lytvynov.a.v@gmail.com", "@awly")
addPerson("Andrzej Żeżel", "andrii.zhezhel@gmail.com", "@zhezhel")
addPerson("André Carvalho", "asantostc@gmail.com", "@andrestc")
addPerson("Andy Balholm", "andy@balholm.com", "6535@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Andy Balholm", "andy@balholm.com", "@andybalholm")
addPerson("Andy Balholm", "andybalholm@gmail.com")
addPerson("Andy Bursavich", "bursavich@google.com")
addPerson("Andy Davis", "andy@bigandian.com", "@bigandian")
addPerson("Andy Finkenstadt", "afinkenstadt@zynga.com", "@afinkenstadt")
addPerson("Andy Lindeman", "andy@lindeman.io")
addPerson("Andy Maloney", "asmaloney@gmail.com", "@asmaloney")
addPerson("Andy Walker", "walkeraj@gmail.com")
addPerson("Anfernee Yongkun Gui", "anfernee.gui@gmail.com", "@anfernee")
addPerson("Angelo Bulfone", "mbulfone@gmail.com", "@boomshroom")
addPerson("Angelo Compagnucci", "angelo.compagnucci@gmail.com")
addPerson("Anh Hai Trinh", "anh.hai.trinh@gmail.com", "@aht")
addPerson("Anit Gandhi", "anitgandhi@gmail.com", "@anitgandhi")
addPerson("Ankit Goyal", "ankit3goyal@gmail.com", "@goyalankit")
addPerson("Anmol Sethi", "anmol@aubble.com", "@nhooyr")
addPerson("Anmol Sethi", "hi@nhooyr.io")
addPerson("Anmol Sethi", "me+git@anmol.io", "@nhooyr")
addPerson("Anmol Sethi", "me@anmol.io", "9620@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Anschel Schaffer-Cohen", "anschelsc@gmail.com", "@anschelsc")
addPerson("Anthony Alves", "cvballa3g0@gmail.com")
addPerson("Anthony Canino", "anthony.canino1@gmail.com", "@anthonycanino1")
addPerson("Anthony Eufemio", "anthony.eufemio@gmail.com", "@tymat")
addPerson("Anthony Martin", "ality@pbrane.org", "5635@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Anthony Martin", "ality@pbrane.org", "@ality")
addPerson("Anthony Pesch", "inolen@gmail.com")
addPerson("Anthony Romano", "anthony.romano@coreos.com")
addPerson("Anthony Sottile", "asottile@umich.edu", "@asottile")
addPerson("Anthony Starks", "ajstarks@gmail.com", "@ajstarks")
addPerson("Antoine Martin", "antoine97.martin@gmail.com", "@alarsyo")
addPerson("Anton Gyllenberg", "anton@iki.fi", "@antong")
addPerson("Antonin Amand", "antonin.amand@gmail.com", "@gwik")
addPerson("Antonio Antelo", "aantelov87@gmail.com")
addPerson("Antonio Bibiano", "antbbn@gmail.com", "@antbbn")
addPerson("Antonio Murdaca", "runcom@redhat.com", "@runcom")
addPerson("Aram Hăvărneanu", "aram@mgk.ro", "5036@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Aram Hăvărneanu", "aram@mgk.ro", "@4ad")
addPerson("Arash Bina", "arash@arash.io")
addPerson("Arash Bina", "arash@arash.io", "@arashbina")
addPerson("Areski Belaid", "areski@gmail.com", "5825@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ariel Mashraki", "ariel@mashraki.co.il", "@a8m")
addPerson("Arlo Breault", "arlolra@gmail.com", "@arlolra")
addPerson("Arnaud Ysmal", "stacktic@netbsd.org", "@stacktic")
addPerson("Arne Hormann", "arnehormann@gmail.com", "@arnehormann")
addPerson("Arnout Engelen", "arnout@bzzt.net")
addPerson("Aron Nopanen", "aron.nopanen@gmail.com", "@aroneous")
addPerson("Artem V. Navrotskiy", "bozaro@gmail.com")
addPerson("Artemiy Ryabinkov", "getmylag@gmail.com")
addPerson("Arthur Khashaev", "arthur@khashaev.ru", "@Invizory")
addPerson("Arthur Mello", "arthur.mello85@gmail.com")
addPerson("Artyom Pervukhin", "artyom.pervukhin@gmail.com", "9870@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Artyom Pervukhin", "artyom.pervukhin@gmail.com", "@artyom")
addPerson("Arvindh Rajesh Tamilmani", "art@a-30.net", "@arvindht")
addPerson("Asad Mehmood", "asad78611@googlemail.com")
addPerson("Ashish Gandhi", "ag@ashishgandhi.org", "@ashishgandhi")
addPerson("Asim Shankar", "asimshankar@gmail.com", "@asimshankar")
addPerson("Atin M", "amalaviy@akamai.com", "@amalaviy")
addPerson("Ato Araki", "ato.araki@gmail.com", "@atotto")
addPerson("Attila Tajti", "attila.tajti@gmail.com")
addPerson("Audrey Lim", "audreylh@gmail.com", "13190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Audrey Lim", "audreylh@gmail.com", "@audreylim")
addPerson("Audrius Butkevicius", "audrius.butkevicius@gmail.com", "25277@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Audrius Butkevicius", "audrius.butkevicius@gmail.com", "@AudriusButkevicius")
addPerson("Augusto Roman", "aroman@gmail.com", "@augustoroman")
addPerson("Aulus Egnatius Varialus", "varialus@gmail.com", "@varialus")
addPerson("Aurélien Rainone", "aurelien.rainone@gmail.com")
addPerson("Aurélien Rainone", "aurelien.rainone@gmail.com", "@arl")
addPerson("Austin Clements", "austin@google.com", "5167@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Austin Clements", "austin@google.com", "@aclements")
addPerson("Austin J. Alexander", "austinjalexander@gmail.com")
addPerson("Author Name", "aaronstein12@gmail.com", "@aastein")
addPerson("Author Name", "brett.j.merrill94@gmail.com", "@bmerrill42")
addPerson("Author Name", "mikemitchellwebdev@gmail.com")
addPerson("Author: grantseltzer", "grantseltzer@gmail.com")
addPerson("Avelino", "t@avelino.xxx", "8805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Avelino", "t@avelino.xxx", "@avelino")
addPerson("Awn Umar", "awn@cryptolosophy.org", "21940@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Awn", "awn@cryptolosophy.io")
addPerson("Axel Wagner", "axel.wagner.hh@googlemail.com", "@Merovius")
addPerson("Ayan George", "ayan@ayan.net")
addPerson("Ayke van Laethem", "aykevanlaethem@gmail.com")
addPerson("Aymerick", "aymerick@jehanne.org", "@aymerick")
addPerson("B.G.Adrian", "aditza8@gmail.com")
addPerson("Baiju Muthukadan", "baiju.m.mail@gmail.com", "@baijum")
addPerson("Bakin Aleksey", "kultihell@gmail.com")
addPerson("Balaram Makam", "bmakam.qdt@qualcommdatacenter.com", "25702@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Balaram Makam", "bmakam.qdt@qualcommdatacenter.com", "@bmakam-qdt")
addPerson("Balazs Lecz", "leczb@google.com", "@leczb")
addPerson("Baokun Lee", "nototon@gmail.com", "9646@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Baokun Lee", "nototon@gmail.com", "@oiooj")
addPerson("Bartek Plotka", "bwplotka@gmail.com")
addPerson("Bartosz Modelski", "modelski.bartosz@gmail.com")
addPerson("Bastian Ike", "bastian.ike@gmail.com")
addPerson("Baylee Feore", "baylee.feore@gmail.com")
addPerson("Ben Burkert", "ben@benburkert.com", "5673@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Burkert", "ben@benburkert.com", "@benburkert")
addPerson("Ben Fried", "ben.fried@gmail.com", "@benfried")
addPerson("Ben Haines", "bhainesva@gmail.com")
addPerson("Ben Hoyt", "benhoyt@gmail.com", "@benhoyt")
addPerson("Ben Laurie", "ben@links.org", "21925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Lubar", "ben.lubar@gmail.com", "@BenLubar")
addPerson("Ben Lynn", "benlynn@gmail.com", "@blynn")
addPerson("Ben Olive", "sionide21@gmail.com", "@sionide21")
addPerson("Ben Schwartz", "bemasc@google.com", "20251@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Schwartz", "bemasc@google.com", "@bemasc")
addPerson("Ben Shi", "powerman1st@163.com", "16935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ben Shi", "powerman1st@163.com", "@benshi001")
addPerson("Ben Toews", "mastahyeti@gmail.com")
addPerson("Benjamin Black", "b@b3k.us", "@b")
addPerson("Benjamin Cable", "cable.benjamin@gmail.com", "@ladydascalie")
addPerson("Benjamin Hsieh", "tanookiben@users.noreply.github.com")
addPerson("Benjamin Prosnitz", "bprosnitz@google.com", "6965@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benjamin Prosnitz", "bprosnitz@google.com", "@bprosnitz")
addPerson("Benjamin Wester", "bwester@squareup.com", "@bwester")
addPerson("Benny Siegert", "bsiegert@gmail.com", "5184@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benny Siegert", "bsiegert@gmail.com", "@bsiegert")
addPerson("Benny Siegert", "bsiegert@google.com", "@bsiegert")
addPerson("Benoit Sigoure", "tsunanet@gmail.com", "9643@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Benoit Sigoure", "tsunanet@gmail.com", "@tsuna")
addPerson("Bernat Moix", "bmoix@bmoix.io")
addPerson("Bernd Fix", "brf@hoi-polloi.org")
addPerson("BigMikes", "giulio.micheloni@gmail.com", "@BigMikes")
addPerson("Bill Neubauer", "wcn@google.com")
addPerson("Bill O'Farrell", "billo@ca.ibm.com", "@wgo")
addPerson("Bill O'Farrell", "billotosyr@gmail.com", "11191@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bill Prin", "waprin@google.com")
addPerson("Bill Thiede", "couchmoney@gmail.com", "6175@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bill Thiede", "couchmoney@gmail.com", "@wathiede", "*goog")
addPerson("Bill Zissimopoulos", "billziss@navimatics.com", "@billziss-gh")
addPerson("Billie H. Cleek", "bhcleek@gmail.com", "@bhcleek")
addPerson("Billy Lynch", "wlynch@google.com", "@wlynch")
addPerson("Blain Smith", "blain.smith@gmail.com", "22696@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blain Smith", "rebelgeek@blainsmith.com", "@blainsmith")
addPerson("Blake Gentry", "blakesgentry@gmail.com", "5683@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blake Gentry", "blakesgentry@gmail.com", "@bgentry")
addPerson("Blake Mesdag", "blakemesdag@gmail.com")
addPerson("Blake Mizerany", "blake.mizerany@gmail.com", "10551@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Blake Mizerany", "blake.mizerany@gmail.com", "@bmizerany")
addPerson("Blixt", "me@blixt.nyc", "@blixt")
addPerson("Bob B.", "rbriski@gmail.com", "26997@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bob Potter", "bobby.potter@gmail.com")
addPerson("Bobby DeSimone", "bobbydesimone@gmail.com", "@desimone")
addPerson("Bobby Powers", "bobbypowers@gmail.com", "@bpowers")
addPerson("Bodo Junglas", "bodo.junglas@leanovate.de")
addPerson("Boris Nagaev", "nagaev@google.com")
addPerson("Boris Schrijver", "bschrijver@schubergphilis.com")
addPerson("Borja Clemente", "borja.clemente@gmail.com", "@clebs")
addPerson("Brad Burch", "brad.burch@gmail.com", "@brad-burch")
addPerson("Brad Fitzpatrick", "bradfitz@golang.org", "brad@danga.com", "@bradfitz", "5065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brad Jones", "rbjones@google.com")
addPerson("Brad Morgan", "brad@morgabra.com")
addPerson("Brad Whitaker", "brad.whitaker@gmail.com")
addPerson("Braden Bassingthwaite", "bbassingthwaite@vendasta.com")
addPerson("Bradley Kemp", "bradleyjkemp96@gmail.com")
addPerson("Bradley Schoch", "bschoch@gmail.com")
addPerson("Brady Catherman", "brady@gmail.com")
addPerson("Brady Sullivan", "brady@bsull.com", "@d1str0")
addPerson("Brandon Bennett", "bbennett@fb.com", "@brbe")
addPerson("Brandon Bennett", "bbennett@fb.com", "@nemith")
addPerson("Brandon Dyck", "brandon@dyck.us")
addPerson("Brandon Gilmore", "varz@google.com", "@bgilmore")
addPerson("Brandon Gonzalez", "bg@lightstep.com")
addPerson("Brandon Lum", "lumjjb@gmail.com")
addPerson("Brendan Ashworth", "brendan.ashworth@me.com")
addPerson("Brendan Daniel Tracey", "tracey.brendan@gmail.com", "@btracey")
addPerson("Brendan Tracey", "tracey.brendan@gmail.com", "7155@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brett Cannon", "bcannon@gmail.com", "@brettcannon")
addPerson("Brett Jones", "bjones027@gmail.com")
addPerson("Brian Dellisanti", "briandellisanti@gmail.com", "@briandellisanti")
addPerson("Brian Downs", "brian.downs@gmail.com", "@briandowns")
addPerson("Brian Flanigan", "brian_flanigan@cable.comcast.com")
addPerson("Brian G. Merrell", "bgmerrell@gmail.com", "@bgmerrell")
addPerson("Brian Gitonga Marete", "bgm@google.com", "@marete")
addPerson("Brian Gitonga Marete", "marete@toshnix.com", "@marete")
addPerson("Brian Kennedy", "btkennedy@gmail.com", "@briantkennedy")
addPerson("Brian Kessler", "brian.m.kessler@gmail.com", "20650@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Brian Kessler", "brian.m.kessler@gmail.com", "@bmkessler")
addPerson("Brian Ketelsen", "bketelsen@gmail.com", "@bketelsen")
addPerson("Brian Slesinskya", "skybrian@google.com", "@skybrian")
addPerson("Brian Smith", "ohohvi@gmail.com", "@sirwart")
addPerson("Brian Starke", "brian.starke@gmail.com", "@brianstarke")
addPerson("Brian Starkey", "stark3y@gmail.com")
addPerson("Bruno Clermont", "bruno.clermont@gmail.com")
addPerson("Bryan Alexander", "kozical@msn.com", "@Kozical")
addPerson("Bryan C. Mills", "bcmills@google.com", "6365@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan C. Mills", "bcmills@google.com", "@bcmills")
addPerson("Bryan Chan", "bryan.chan@ca.ibm.com", "@bryanpkc")
addPerson("Bryan Chan", "bryanpkc@gmail.com", "6576@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan Ford", "brynosaurus@gmail.com", "5500@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Bryan Ford", "brynosaurus@gmail.com", "@bford")
addPerson("Bryan Heden", "b.heden@gmail.com", "@hedenface")
addPerson("Bryan Mills", "bcmills@google.com", "@bcmills")
addPerson("Bryan Turley", "bryanturley@gmail.com")
addPerson("Bulat Gaifullin", "gaifullinbf@gmail.com", "@bgaifullin")
addPerson("Burak Guven", "bguven@gmail.com", "@burakguven")
addPerson("Caine Tighe", "arctanofyourface@gmail.com", "@nilnilnil")
addPerson("Caio Marcelo de Oliveira Filho", "caio.oliveira@intel.com", "@cmarcelo")
addPerson("Caio Oliveira", "caio.oliveira@intel.com", "12640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Caleb Doxsey", "caleb@doxsey.net")
addPerson("Caleb Martinez", "accounts@calebmartinez.com", "@conspicuousClockwork")
addPerson("Caleb Spare", "cespare@gmail.com", "5615@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Caleb Spare", "cespare@gmail.com", "@cespare")
addPerson("Calvin Behling", "calvin.behling@gmail.com")
addPerson("Calvin Leung Huang", "cleung2010@gmail.com")
addPerson("Cameron Howey", "chowey@ualberta.net")
addPerson("Carl Chatfield", "carlchatfield@gmail.com", "@0xfaded")
addPerson("Carl Henrik Lunde", "chlunde@ifi.uio.no")
addPerson("Carl Henrik Lunde", "chlunde@ifi.uio.no", "@chlunde")
addPerson("Carl Jackson", "carl@stripe.com", "@carl-stripe")
addPerson("Carl Johnson", "me@carlmjohnson.net", "12425@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carl Johnson", "me@carlmjohnson.net", "@carlmjohnson")
addPerson("Carl Mastrangelo", "notcarl@google.com", "@carl-mastrangelo", "carl.mastrangelo@gmail.com", "carlmastrangelo@gmail.com", "12225@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlisia Campos", "carlisia@grokkingtech.io", "@carlisia")
addPerson("Carlo Alberto Ferraris", "cafxx@strayorange.com", "11500@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlo Alberto Ferraris", "cafxx@strayorange.com", "@CAFxX")
addPerson("Carlos Amedee", "carlos@golang.org", "@cagedmantis", "34411@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlos C", "uldericofilho@gmail.com", "@ucirello")
addPerson("Carlos Castillo", "cookieo9@gmail.com", "5141@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlos Castillo", "cookieo9@gmail.com", "@cookieo9")
addPerson("Carlos Eduardo Seo", "cseo@linux.vnet.ibm.com", "13015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Carlos Eduardo Seo", "cseo@linux.vnet.ibm.com", "@ceseo")
addPerson("Carolyn Van Slyck", "me@carolynvanslyck.com", "@carolynvs")
addPerson("Carrie Bynon", "cbynon@gmail.com", "@cbynon")
addPerson("Casey Callendrello", "squeed@gmail.com")
addPerson("Casey Marshall", "casey.marshall@gmail.com", "@cmars")
addPerson("Casey Smith", "smithc@homesandland.com")
addPerson("Cassandra Salisbury", "cls@golang.org")
addPerson("Cassandra Salisbury", "salisburycl@gmail.com")
addPerson("Catalin Nicutar", "cnicutar@google.com")
addPerson("Catalin Nicutar", "cnicutar@google.com", "12526@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Catalin Patulea", "catalinp@google.com", "@cpatulea")
addPerson("Cedric Staub", "cs@squareup.com", "@csstaub")
addPerson("Cezar Espinola", "cezarsa@gmail.com", "9010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cezar Sa Espinola", "cezarsa@gmail.com", "@cezarsa")
addPerson("Chad Kunde", "Kunde21@gmail.com")
addPerson("Chad Rosier", "mrosier.qdt@qualcommdatacenter.com", "25690@62eb7196-b449-3ce5-99f1-c037f21e1705", "@mrosier-qdt")
addPerson("ChaiShushan", "chaishushan@gmail.com", "@chai2010")
addPerson("Chance Zibolski", "chance.zibolski@coreos.com")
addPerson("Changsoo Kim", "broodkcs@gmail.com")
addPerson("Channing Kimble-Brown", "channing@golang.org", "@cnoellekb")
addPerson("Charle Demers", "charle.demers@gmail.com")
addPerson("Charles Fenwick Elliott", "Charles@FenwickElliott.io")
addPerson("Charles Kenney", "charlesc.kenney@gmail.com", "@Charliekenney23")
addPerson("Charles Weill", "weill@google.com", "@cweill")
addPerson("Charlie Dorian", "cldorian@gmail.com", "5435@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Charlie Dorian", "cldorian@gmail.com", "@cldorian")
addPerson("Cheng-Lung Sung", "clsung@gmail.com", "@clsung")
addPerson("Cherry Zhang", "cherryyz@google.com", "13315@62eb7196-b449-3ce5-99f1-c037f21e1705", "@cherrymui", "lunaria21@gmail.com", "9670@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chew Choon Keat", "choonkeat@gmail.com", "@choonkeat")
addPerson("Chintan Sheth", "shethchintan7@gmail.com")
addPerson("Cholerae Hu", "choleraehyq@gmail.com", "15760@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cholerae Hu", "choleraehyq@gmail.com", "@choleraehyq")
addPerson("Chotepud Teo", "alexrousg@users.noreply.github.com", "@AlexRouSg")
addPerson("Chris Ball", "chris@printf.net", "@cjb")
addPerson("Chris Biscardi", "chris@christopherbiscardi.com", "@ChristopherBiscardi")
addPerson("Chris Broadfoot", "cbro@golang.org", "7935@62eb7196-b449-3ce5-99f1-c037f21e1705", "cbro@google.com", "@broady", "7440@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Dollin", "ehog.hedge@gmail.com", "@ehedgehog")
addPerson("Chris Donnelly", "cmd@ceedon.io")
addPerson("Chris Duarte", "csduarte@gmail.com")
addPerson("Chris Farmiloe", "chrisfarms@gmail.com", "@chrisfarms")
addPerson("Chris H (KruftMaster)", "chrusty@gmail.com")
addPerson("Chris Hines", "chris.cs.guy@gmail.com", "7850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Hines", "chris.cs.guy@gmail.com", "@ChrisHines")
addPerson("Chris J Arges", "christopherarges@gmail.com")
addPerson("Chris Jones", "chris@cjones.org", "@cjyar")
addPerson("Chris K", "c@chrisko.ch")
addPerson("Chris Kastorff", "encryptio@gmail.com", "@encryptio")
addPerson("Chris Lennert", "calennert@gmail.com", "@calennert")
addPerson("Chris Lewis", "cflewis@golang.org")
addPerson("Chris Lewis", "cflewis@google.com")
addPerson("Chris Liles", "caveryliles@gmail.com", "26297@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris Manghane", "cmang@golang.org", "5130@62eb7196-b449-3ce5-99f1-c037f21e1705", "@paranoiacblack")
addPerson("Chris Marchesi", "chrism@vancluevertech.com", "@vancluever")
addPerson("Chris McGee", "newton688@gmail.com", "15452@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Chris McGee", "sirnewton_01@yahoo.ca", "@sirnewton01")
addPerson("Chris Raynor", "raynor@google.com")
addPerson("Chris Roche", "rodaine@gmail.com", "@rodaine")
addPerson("Chris Stockton", "chrisstocktonaz@gmail.com")
addPerson("Chris Zou", "chriszou@ca.ibm.com", "@ChrisXZou")
addPerson("ChrisALiles", "caveryliles@gmail.com", "@ChrisALiles")
addPerson("Christian Alexander", "christian@linux.com", "@ChristianAlexander")
addPerson("Christian Couder", "chriscool@tuxfamily.org", "@chriscool")
addPerson("Christian Couder", "christian.couder@gmail.com", "11200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Christian Haas", "christian.haas@sevensuns.at")
addPerson("Christian Himpel", "chressie@googlemail.com", "@chressie")
addPerson("Christian Mauduit", "ufoot@ufoot.org")
addPerson("Christian Pellegrin", "chri@evolware.org")
addPerson("Christian Simon", "simon@swine.de")
addPerson("Christoph Hack", "christoph@tux21b.org", "@tux21b")
addPerson("Christophe Kamphaus", "christophe.kamphaus@gmail.com")
addPerson("Christophe Taton", "taton@google.com")
addPerson("Christopher Boumenot", "chrboum@microsoft.com")
addPerson("Christopher Cahoon", "chris.cahoon@gmail.com", "@ccahoon")
addPerson("Christopher Guiney", "chris@guiney.net", "@chrisguiney")
addPerson("Christopher Koch", "chrisko@google.com", "@hugelgupf")
addPerson("Christopher Nelson", "nadiasvertex@gmail.com", "11675@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Christopher Nelson", "nadiasvertex@gmail.com", "@nadiasvertex")
addPerson("Christopher Nielsen", "m4dh4tt3r@gmail.com", "@m4dh4tt3r")
addPerson("Christopher Redden", "christopher.redden@gmail.com", "@topherredden")
addPerson("Christopher Wedgwood", "cw@f00f.org", "@cwedgwood")
addPerson("Christos Zoulas", "christos@zoulas.com", "@zoulasc")
addPerson("Christos Zoulas", "zoulasc@gmail.com")
addPerson("Christy Perez", "christy@linux.vnet.ibm.com", "@clnperez")
addPerson("Cindy Pallares", "cindy@gitlab.com")
addPerson("Cixtor", "cixtords@gmail.com", "@cixtor")
addPerson("Claire Wang", "cw773@cornell.edu")
addPerson("Clement Courbet", "courbet@google.com")
addPerson("Clement Skau", "clementskau@gmail.com", "@cskau")
addPerson("Clément Chigot", "clement.chigot@atos.net", "@Helflym")
addPerson("Clément Denis", "clement@altirnao.com")
addPerson("Coda Hale", "coda.hale@gmail.com")
addPerson("Colby Ranger", "cranger@google.com", "@crangeratgoogle")
addPerson("Colin Cross", "ccross@android.com", "@colincross")
addPerson("Colin Edwards", "colin@recursivepenguin.com", "@DDRBoxman")
addPerson("Colin Kennedy", "moshen.colin@gmail.com", "@moshen")
addPerson("Colin", "clr@google.com")
addPerson("Connor McGuinness", "connor.mcguinness@izettle.com")
addPerson("Conrad Irwin", "conrad.irwin@gmail.com", "@ConradIrwin")
addPerson("Conrad Meyer", "cemeyer@cs.washington.edu", "@cemeyer")
addPerson("Conrad Taylor", "conradwt@gmail.com")
addPerson("Conrado Gouvea", "conradoplg@gmail.com", "@conradoplg")
addPerson("Constantijn Schepens", "constantijnschepens@gmail.com")
addPerson("Constantin Konstantinidis", "constantinkonstantinidis@gmail.com", "26957@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Constantin Konstantinidis", "constantinkonstantinidis@gmail.com", "@iWdGo")
addPerson("Corey Thomasson", "cthom.lists@gmail.com", "@cthom06")
addPerson("Cory LaNou", "cory@lanou.com")
addPerson("Cosmos Nicolaou", "cnicolaou@grailbio.com")
addPerson("Costin Chirvasuta", "ctin@google.com", "@ct1n")
addPerson("Craig Citro", "craigcitro@google.com", "@craigcitro")
addPerson("Craig Peterson", "cpeterson@stackoverflow.com")
addPerson("Cristian Staretu", "unclejacksons@gmail.com", "@unclejack")
addPerson("Cuihtlauac ALVARADO", "cuihtlauac.alvarado@orange.com", "@cuihtlauac")
addPerson("Cuong Manh Le", "cuong.manhle.vn@gmail.com", "14665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Cyrill Schumacher", "cyrill@schumacher.fm", "@SchumacherFM")
addPerson("Daker Fernandes Pinheiro", "daker.fernandes.pinheiro@intel.com", "@dakerfp")
addPerson("Dalton Scott", "dscott.jobs@gmail.com")
addPerson("Damian Gryski", "damian@gryski.com", "@dgryski")
addPerson("Damian Gryski", "dgryski@gmail.com", "7050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damian Gryski", "dgryski@gmail.com", "@dgryski")
addPerson("Damien Lespiau", "damien.lespiau@gmail.com", "13855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damien Lespiau", "damien.lespiau@intel.com", "damien.lespiau@gmail.com", "@dlespiau")
addPerson("Damien Mathieu", "42@dmathieu.com", "@dmathieu")
addPerson("Damien Neil", "dneil@google.com", "5305@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Damien Neil", "dneil@google.com", "@neild")
addPerson("Damien Tournoud", "damien@platform.sh")
addPerson("Dan Adkins", "dadkins@gmail.com")
addPerson("Dan Ballard", "dan@mindstab.net")
addPerson("Dan Barry", "dan@bakineggs.com")
addPerson("Dan Bentley", "dtbentley@gmail.com")
addPerson("Dan Caddigan", "goldcaddy77@gmail.com", "@goldcaddy77")
addPerson("Dan Callahan", "dan.callahan@gmail.com", "@callahad")
addPerson("Dan Ertman", "dtertman@gmail.com")
addPerson("Dan Goldsmith", "dan@d2g.org.uk")
addPerson("Dan Harrington", "harringtond@google.com")
addPerson("Dan Jacques", "dnj@google.com")
addPerson("Dan Johnson", "computerdruid@google.com", "@ComputerDruid")
addPerson("Dan Kortschak", "dan.kortschak@adelaide.edu.au", "6480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dan Kortschak", "dan@kortschak.io", "@kortschak")
addPerson("Dan Luedtke", "mail@danrl.com")
addPerson("Dan Moore", "mooreds@gmail.com")
addPerson("Dan Peterson", "dpiddy@gmail.com", "5665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dan Peterson", "dpiddy@gmail.com", "@danp")
addPerson("Dan Richards", "dan.m.richards@gmail.com")
addPerson("Dan Richelson", "drichelson@gmail.com")
addPerson("Dan Sinclair", "dan.sinclair@gmail.com", "@dj2")
addPerson("Dana Hoffman", "danahoffman@google.com")
addPerson("Daniel Cormier", "daniel.cormier@gmail.com")
addPerson("Daniel Fleischman", "danielfleischman@gmail.com", "@danielf")
addPerson("Daniel Heckrath", "d.heckrath@maple-apps.com")
addPerson("Daniel Hultqvist", "daniel@typedef.se")
addPerson("Daniel Ingram", "ingramds@appstate.edu", "@daniel-s-ingram")
addPerson("Daniel Johansson", "dajo2002@gmail.com", "9663@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Johansson", "dajo2002@gmail.com", "@dajoo75")
addPerson("Daniel Kerwin", "d.kerwin@gini.net", "@dkerwin")
addPerson("Daniel Krech", "eikeon@eikeon.com", "@eikeon")
addPerson("Daniel Mahu", "dmahu@google.com")
addPerson("Daniel Martí", "mvdan@mvdan.cc", "13550@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Martí", "mvdan@mvdan.cc", "@mvdan")
addPerson("Daniel Morsing", "daniel.morsing@gmail.com", "5310@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Morsing", "daniel.morsing@gmail.com", "@DanielMorsing")
addPerson("Daniel Nephin", "dnephin@gmail.com", "@dnephin")
addPerson("Daniel Ortiz Pereira da Silva", "daniel.particular@gmail.com", "@dopsilva")
addPerson("Daniel Skinner", "daniel@dasa.cc", "10675@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Skinner", "daniel@dasa.cc", "@dskinner")
addPerson("Daniel Speichert", "daniel@speichert.pl", "@DSpeichert")
addPerson("Daniel Theophanes", "kardianos@gmail.com", "5080@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Daniel Theophanes", "kardianos@gmail.com", "@kardianos")
addPerson("Daniel Toebe", "dtoebe@gmail.com")
addPerson("Daniel Upton", "daniel@floppy.co", "@boxofrad")
addPerson("Daniel Wagner-Hall", "dawagner@gmail.com")
addPerson("Daniel", "danielfs.ti@gmail.com")
addPerson("Daniel, Dao Quang Minh", "dqminh89@gmail.com")
addPerson("Daniela Petruzalek", "daniela.petruzalek@gmail.com", "@danicat")
addPerson("Daniël de Kok", "me@danieldk.eu", "@danieldk")
addPerson("Danny Hadley", "dadleyy@gmail.com")
addPerson("Danny Rosseau", "daniel.rosseau@gmail.com")
addPerson("Danny Wyllie", "wylliedanny@gmail.com")
addPerson("Danny Yoo", "dannyyoo@google.com")
addPerson("Dante Shareiff", "prophesional@gmail.com")
addPerson("Darien Raymond", "admin@v2ray.com")
addPerson("Darien Raymond", "admin@v2ray.com", "@DarienRaymond")
addPerson("Darren Elwood", "darren@textnode.com", "@textnode")
addPerson("Darron Froese", "dfroese@salesforce.com")
addPerson("Darshan Parajuli", "parajulidarshan@gmail.com", "@darshanparajuli")
addPerson("Datong Sun", "dndx@idndx.com", "@dndx")
addPerson("Dave Borowitz", "dborowitz@google.com", "@dborowitz")
addPerson("Dave Cheney", "dave@cheney.net", "5150@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dave Cheney", "dave@cheney.net", "@davecheney")
addPerson("Dave Day", "djd@golang.org", "5170@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dave Day", "djd@golang.org", "@okdave")
addPerson("Dave MacFarlane", "driusan@gmail.com")
addPerson("Dave Russell", "forfuncsake@gmail.com", "@forfuncsake")
addPerson("Dave Setzke", "daveset73@gmail.com")
addPerson("Dave Wyatt", "dlwyatt115@gmail.com")
addPerson("David Anderson", "danderson@google.com", "13070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Anderson", "danderson@google.com", "@danderson")
addPerson("David Barnett", "dbarnett@google.com", "@dbarnett")
addPerson("David Bartley", "bartle@stripe.com")
addPerson("David Benjamin", "davidben@google.com", "7805@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Benjamin", "davidben@google.com", "@davidben")
addPerson("David Benque", "dbenque@gmail.com")
addPerson("David Brophy", "dave@brophy.uk", "@dave")
addPerson("David Bürgin", "676c7473@gmail.com", "@glts")
addPerson("David Calavera", "david.calavera@gmail.com", "@calavera")
addPerson("David Carlier", "devnexen@gmail.com", "@devnexen")
addPerson("David Chase", "drchase@google.com", "7061@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Chase", "drchase@google.com", "@dr2chase")
addPerson("David Crawshaw", "crawshaw@golang.org", "5030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Crawshaw", "crawshaw@golang.org", "@crawshaw")
addPerson("David Deng", "daviddengcn@gmail.com")
addPerson("David Finkel", "david.finkel@gmail.com")
addPerson("David Forsythe", "dforsythe@gmail.com", "@dforsyth")
addPerson("David G. Andersen", "dave.andersen@gmail.com", "@dave-andersen")
addPerson("David Glasser", "glasser@meteor.com", "9556@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Glasser", "glasser@meteor.com", "@glasser")
addPerson("David Good", "dgood@programminggoody.com")
addPerson("David Heuschmann", "heuschmann.d@gmail.com", "@dddent")
addPerson("David Howden", "dhowden@gmail.com")
addPerson("David Hubbard", "dsp@google.com")
addPerson("David Jakob Fritz", "david.jakob.fritz@gmail.com", "@djfritz")
addPerson("David Kitchen", "david@buro9.com")
addPerson("David Lazar", "lazard@golang.org", "16260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Lazar", "lazard@golang.org", "@davidlazar")
addPerson("David Leon Gil", "coruus@gmail.com", "5830@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Leon Gil", "coruus@gmail.com", "@coruus")
addPerson("David Ndungu", "dnjuguna@gmail.com")
addPerson("David NewHamlet", "david@newhamlet.com", "@wheelcomplex")
addPerson("David Newhamlet", "david@newhamlet.com", "13738@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Presotto", "presotto@gmail.com", "@presotto")
addPerson("David R. Jenni", "david.r.jenni@gmail.com", "@davidrjenni")
addPerson("David R. Jenni", "davidrjenni@protonmail.com", "6180@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Sansome", "me@davidsansome.com")
addPerson("David Stainton", "dstainton415@gmail.com", "@david415")
addPerson("David Symonds", "dsymonds@golang.org", "5045@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Symonds", "dsymonds@golang.org", "@dsymonds")
addPerson("David Thomas", "davidthomas426@gmail.com", "@davidthomas426")
addPerson("David Timm", "dtimm@pivotal.io", "@dtimm")
addPerson("David Titarenco", "david.titarenco@gmail.com", "@dvx")
addPerson("David Tolpin", "david.tolpin@gmail.com", "@dtolpin")
addPerson("David Url", "david@urld.io", "26506@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David Url", "david@urld.io", "@urld")
addPerson("David Volquartz Lebech", "david@lebech.info")
addPerson("David Wimmer", "davidlwimmer@gmail.com", "@dwimmer")
addPerson("David du Colombier", "0intro@gmail.com")
addPerson("David du Colombier", "0intro@gmail.com", "5060@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("David du Colombier", "0intro@gmail.com", "@0intro")
addPerson("Davies Liu", "davies.liu@gmail.com", "@davies")
addPerson("Davor Kapsa", "davor.kapsa@gmail.com", "@dvrkps")
addPerson("Ddo", "joeddo89@gmail.com")
addPerson("Dean Prichard", "dean.prichard@gmail.com", "@zard49")
addPerson("Deepak Jois", "deepak.jois@gmail.com", "@deepakjois")
addPerson("Deepali Raina", "deepali.raina@gmail.com")
addPerson("Denis Bernard", "db047h@gmail.com", "@db47h")
addPerson("Denis Nagorny", "denis.nagorny@intel.com", "10734@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Denis Nagorny", "denis.nagorny@intel.com", "@dvnagorny")
addPerson("Dennis Kuhnert", "mail.kuhnert@gmail.com", "26874@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dennis Kuhnert", "mail.kuhnert@gmail.com", "@kyroy")
addPerson("Denys Honsiorovskyi", "honsiorovskyi@gmail.com", "@honsiorovskyi")
addPerson("Denys Smirnov", "denis.smirnov.91@gmail.com", "@dennwc")
addPerson("Derek Bruening", "bruening@google.com")
addPerson("Derek Buitenhuis", "derek.buitenhuis@gmail.com", "@dwbuiten")
addPerson("Derek Che", "drc@yahoo-inc.com")
addPerson("Derek Che", "drc@yahoo-inc.com", "5750@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Derek McGowan", "derek@mcgstyle.net")
addPerson("Derek Parker", "parkerderek86@gmail.com", "@derekparker")
addPerson("Derek Perkins", "derek@derekperkins.com")
addPerson("Derek Phan", "derekphan94@gmail.com", "@dphan72")
addPerson("Derek Shockey", "derek.shockey@gmail.com", "@derelk")
addPerson("Dev Ojha", "dojha12@gmail.com", "27059@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Deval Shah", "devalshah88@gmail.com")
addPerson("Devon H. O'Dell", "devon.odell@gmail.com", "25956@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Devon H. O'Dell", "devon.odell@gmail.com", "@dhobsd")
addPerson("Dhaivat Pandit", "dhaivatpandit@gmail.com", "15030@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dhaivat Pandit", "dhaivatpandit@gmail.com", "@ceocoder")
addPerson("Dhananjay Nakrani", "dhananjayn@google.com")
addPerson("Dhananjay Nakrani", "dhananjayn@google.com", "15558@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dhananjay Nakrani", "dhananjaynakrani@gmail.com", "@dhananjay92")
addPerson("Dhiru Kholia", "dhiru.kholia@gmail.com", "@kholia")
addPerson("Dhruvdutt Jadhav", "dhruvdutt.jadhav@gmail.com", "@dhruvdutt")
addPerson("Di Xiao", "dixiao@google.com")
addPerson("Di Xiao", "xiaodi.larry@gmail.com")
addPerson("Didier Spezia", "didier.06@gmail.com", "7795@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Didier Spezia", "didier.06@gmail.com", "@dspezia")
addPerson("Diego Saint Esteben", "diego@saintesteben.me")
addPerson("Diego Siqueira", "diego9889@gmail.com", "@DiSiqueira")
addPerson("Dieter Plaetinck", "dieter@raintank.io")
addPerson("Dieter Plaetinck", "dieter@raintank.io", "@Dieterbe")
addPerson("Dimitri Tcaciuc", "dtcaciuc@gmail.com", "@dtcaciuc")
addPerson("Dimitrios Arethas", "darethas@gmail.com")
addPerson("Dina Garmash", "dgrmsh@gmail.com", "@dgrmsh")
addPerson("Dinesh Kumar", "dinesh.kumar@go-jek.com")
addPerson("Diogo Pinela", "diogoid7400@gmail.com")
addPerson("Diogo Pinela", "diogoid7400@gmail.com", "16943@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Diogo Pinela", "diogoid7400@gmail.com", "@dpinela")
addPerson("Dirk Gadsden", "dirk@esherido.com", "@dirk")
addPerson("Diwaker Gupta", "diwakergupta@gmail.com", "@diwakergupta")
addPerson("Dmitri Popov", "operator@cv.dp-net.com", "@pin")
addPerson("Dmitri Shuralyov", "dmitshur@golang.org", "dmitri@shuralyov.com", "shurcool@gmail.com", "@dmitshur", "6005@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitriy Dudkin", "dudkin.dmitriy@gmail.com", "@tmwh")
addPerson("Dmitriy", "dchenk@users.noreply.github.com")
addPerson("Dmitry Chestnykh", "dchest@gmail.com", "@dchest")
addPerson("Dmitry Doroginin", "doroginin@gmail.com", "@doroginin")
addPerson("Dmitry Mottl", "dmitry.mottl@gmail.com", "@Mottl")
addPerson("Dmitry Neverov", "dmitry.neverov@gmail.com", "@nd")
addPerson("Dmitry Pokidov", "dooman87@gmail.com")
addPerson("Dmitry Savintsev", "dsavints@gmail.com", "6190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitry Savintsev", "dsavints@gmail.com", "@dmitris")
addPerson("Dmitry Vyukov", "dvyukov@google.com", "5400@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dmitry Vyukov", "dvyukov@google.com", "@dvyukov")
addPerson("DocMerlin", "landivar@gmail.com")
addPerson("Dominic Barnes", "dominic@dbarnes.info")
addPerson("Dominic Green", "dominicgreen1@gmail.com")
addPerson("Dominik Honnef", "dominik@honnef.co", "5020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dominik Honnef", "dominik@honnef.co", "@dominikh")
addPerson("Dominik Vogt", "vogt@linux.vnet.ibm.com", "6065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dominik Vogt", "vogt@linux.vnet.ibm.com", "@vogtd")
addPerson("Don Byington", "don@dbyington.com", "@dbyington")
addPerson("Donald Huang", "don.hcd@gmail.com")
addPerson("Dong-hee Na", "donghee.na92@gmail.com", "17352@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Dong-hee Na", "donghee.na92@gmail.com", "@corona10")
addPerson("Donovan Hide", "donovanhide@gmail.com", "@donovanhide")
addPerson("Doug Evans", "dje@google.com")
addPerson("Doug Fawley", "dfawley@google.com")
addPerson("Dragoslav Mitrinovic", "fdm224@motorola.com")
addPerson("Drew Flower", "drewvanstone@gmail.com", "@drewvanstone")
addPerson("Drew Hintz", "adhintz@google.com", "@adhintz")
addPerson("Duco van Amstel", "duco@improbable.io")
addPerson("Duncan Holm", "mail@frou.org", "@frou")
addPerson("Dusan Kasan", "me@dusankasan.com")
addPerson("Dustin Carlino", "dcarlino@google.com")
addPerson("Dustin Shields-Cloues", "dcloues@gmail.com", "@dcloues")
addPerson("Dylan Carney", "dcarney@gmail.com")
addPerson("Dylan Waits", "dylan@waits.io", "@waits")
addPerson("EKR", "ekr@rtfm.com", "@ekr")
addPerson("Edan B", "3d4nb3@gmail.com", "@edanbe")
addPerson("Eddie Ringle", "eddie@ringle.io")
addPerson("Eden Li", "eden.li@gmail.com", "@eden")
addPerson("Edson Medina", "edsonmedina@gmail.com")
addPerson("EduRam", "eduardo.ramalho@gmail.com", "@EduRam")
addPerson("Eduard Urbach", "e.urbach@gmail.com")
addPerson("Eduard Urbach", "e.urbach@gmail.com", "@blitzprog")
addPerson("Edward Muller", "edwardam@interlix.com", "9641@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Edward Muller", "edwardam@interlix.com", "@freeformz")
addPerson("Egon Elbre", "egonelbre@gmail.com", "6785@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Egon Elbre", "egonelbre@gmail.com", "@egonelbre")
addPerson("Ehden Sinai", "ehdens@gmail.com")
addPerson("Ehren Kret", "ehren.kret@gmail.com", "@eakret")
addPerson("Eitan Adler", "lists@eitanadler.com", "@grimreaper")
addPerson("Eivind Uggedal", "eivind@uggedal.com", "@uggedal")
addPerson("Elbert Fliek", "efliek@gmail.com", "@Nr90")
addPerson("Eldar Rakhimberdin", "ibeono@gmail.com")
addPerson("Elen Eisendle", "elen@eisendle.ee")
addPerson("Elena Grahovac", "elena@grahovac.me")
addPerson("Elias Naur", "mail@eliasnaur.com", "@eliasnaur")
addPerson("Elias Naur", "mail@eliasnaur.com", "elias.naur@gmail.com", "@eliasnaur", "7435@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Elliot Morrison-Reed", "elliotmr@gmail.com", "@elliotmr")
addPerson("Emanuele Iannone", "emanuele@fondani.it")
addPerson("Emerson Lin", "linyintor@gmail.com", "21970@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emil Hessman", "emil@hessman.se", "5555@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emil Hessman", "emil@hessman.se", "c.emil.hessman@gmail.com", "@ceh")
addPerson("Emmanuel Odeke", "emm.odeke@gmail.com", "5137@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emmanuel Odeke", "emm.odeke@gmail.com", "@odeke-em")
addPerson("Emmanuel Odeke", "emmanuel@orijtech.com", "27585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Emmanuel Odeke", "odeke@ualberta.ca")
addPerson("Emmanuel Odeke", "odeke@ualberta.ca", "5735@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eno Compton", "enocom@google.com")
addPerson("Enrico Candino", "enrico.candino@gmail.com")
addPerson("Eoghan Sherry", "ejsherry@gmail.com", "@ejsherry")
addPerson("Eric Adams", "ercadams@gmail.com")
addPerson("Eric Brown", "browne@vmware.com")
addPerson("Eric Chiang", "eric.chiang.m@gmail.com", "@ericchiang")
addPerson("Eric Clark", "zerohp@gmail.com", "@eclark")
addPerson("Eric Daniels", "eric@erdaniels.com", "25196@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eric Daniels", "eric@erdaniels.com", "@edaniels")
addPerson("Eric Dube", "eric.alex.dube@gmail.com")
addPerson("Eric Engestrom", "eric@engestrom.ch", "@1ace")
addPerson("Eric Garrido", "ekg@google.com", "@minusnine")
addPerson("Eric Hopper", "hopper@omnifarious.org")
addPerson("Eric Koleda", "ekoleda+devrel@google.com")
addPerson("Eric Lagergren", "ericscottlagergren@gmail.com", "@ericlagergren")
addPerson("Eric Lagergren", "eric@ericlagergren.com", "@ericlagergren")
addPerson("Eric Lagergren", "ericscottlagergren@gmail.com", "7276@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eric Milliken", "emilliken@gmail.com", "@emilliken")
addPerson("Eric Pauley", "eric@pauley.me", "@ericpauley")
addPerson("Eric Ponce", "tricokun@gmail.com", "@trico")
addPerson("Eric Roshan-Eisner", "eric.d.eisner@gmail.com", "@eisner")
addPerson("Eric Rykwalder", "e.rykwalder@gmail.com", "@erykwalder")
addPerson("Eric Schow", "eric.schow@gmail.com")
addPerson("Erik Aigner", "aigner.erik@gmail.com", "@eaigner")
addPerson("Erik Dubbelboer", "erik@dubbelboer.com", "8976@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Erik Dubbelboer", "erik@dubbelboer.com", "@erikdubbelboer")
addPerson("Erik St. Martin", "alakriti@gmail.com", "@erikstmartin")
addPerson("Erik Staab", "estaab@google.com", "@erikus")
addPerson("Erik Westrup", "erik.westrup@gmail.com", "@erikw")
addPerson("Erin Call", "hello@erincall.com")
addPerson("Erin Masatsugu", "erin.masatsugu@gmail.com", "@emasatsugu")
addPerson("Ernest Chiang", "ernest_chiang@htc.com")
addPerson("Erwin Oegema", "blablaechthema@hotmail.com", "@diamondo25")
addPerson("Esko Luontola", "esko.luontola@gmail.com", "@orfjackal")
addPerson("Etai Lev Ran", "etail@il.ibm.com")
addPerson("Ethan Burns", "eaburns@google.com")
addPerson("Ethan Miller", "eamiller@us.ibm.com", "@millere")
addPerson("Euan Kemp", "euank@euank.com", "@euank")
addPerson("Eugene Kalinin", "e.v.kalinin@gmail.com", "12380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Eugene Kalinin", "e.v.kalinin@gmail.com", "@ekalinin")
addPerson("Evan Broder", "evan@stripe.com", "@evan-stripe")
addPerson("Evan Brown", "evanbrown@google.com")
addPerson("Evan Brown", "evanbrown@google.com", "9260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evan Brown", "evanbrown@google.com", "@evandbrown")
addPerson("Evan Farrar", "evanfarrar@gmail.com")
addPerson("Evan Hicks", "evan.hicks2@gmail.com", "@FearlessDestiny")
addPerson("Evan Jones", "ej@evanjones.ca", "@evanj")
addPerson("Evan Klitzke", "evan@eklitzke.org")
addPerson("Evan Klitzke", "evan@eklitzke.org", "@eklitzke")
addPerson("Evan Kroske", "evankroske@google.com", "@evankroske")
addPerson("Evan Martin", "evan.martin@gmail.com", "@evmar")
addPerson("Evan Phoenix", "evan@phx.io", "6330@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evan Phoenix", "evan@phx.io", "@evanphx")
addPerson("Evan Shaw", "chickencha@gmail.com")
addPerson("Evan Shaw", "edsrzf@gmail.com", "@edsrzf")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net", "17055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Evgeniy Polyakov", "zbr@ioremap.net", "@bioothod")
addPerson("Ewan Chou", "coocood@gmail.com", "@coocood")
addPerson("Eyal Posener", "posener@gmail.com")
addPerson("Fab>rizio (Misto) Milo", "mistobaan@gmail.com", "@Mistobaan")
addPerson("Fabian Wickborn", "fabian@wickborn.net", "@fawick")
addPerson("Fabian", "fabian@youremail.eu")
addPerson("Fabien Silberstein", "silberfab@gmail.com")
addPerson("Fabio Alessandro Locati", "me@fale.io")
addPerson("Faiyaz Ahmed", "ahmedf@vmware.com", "@fdawg4l")
addPerson("Fan Hongjian", "fan.howard@gmail.com", "@fango")
addPerson("Fan Jiang", "fan.torchz@gmail.com")
addPerson("Fangming Fang", "Fangming.Fang@arm.com", "19276@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Fangming.Fang", "fangming.fang@arm.com", "@zorrorffm")
addPerson("Fatih Arslan", "fatih@arslan.io", "@fatih")
addPerson("Fatih Arslan", "ftharsln@gmail.com", "@fatih")
addPerson("Fazal Majid", "majid@apsalar.com")
addPerson("Fazlul Shahriar", "fshahriar@gmail.com", "@fhs")
addPerson("Federico Simoncelli", "fsimonce@redhat.com", "@simon3z")
addPerson("Fedor Indutny", "fedor@indutny.com", "@indutny")
addPerson("Felix Kollmann", "felix.kollmann@twinpoint.de")
addPerson("Felix Kollmann", "fk@konsorten.de")
addPerson("Felix Kollmann", "mail@fkollmann.de", "26861@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Feng Liyuan", "darktemplar.f@gmail.com")
addPerson("Filip Gruszczyński", "gruszczy@gmail.com", "17532@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Filip Gruszczyński", "gruszczy@gmail.com", "@gruszczy")
addPerson("Filip Haglund", "drathier@users.noreply.github.com")
addPerson("Filip Ochnik", "filip.ochnik@gmail.com")
addPerson("Filip Stanis", "fstanis@google.com")
addPerson("Filippo Valsorda", "filippo@golang.org", "11715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Filippo Valsorda", "hi@filippo.io", "@FiloSottile", "filippo@cloudflare.com")
addPerson("Filippo Valsorda", "6195@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Firmansyah Adiputra", "frm.adiputra@gmail.com", "@frm-adiputra")
addPerson("Florian Forster", "octo@google.com", "@octo")
addPerson("Florian Uekermann", "florian@uekermann.me", "13410@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Florian Uekermann", "florian@uekermann.me", "@FlorianUekermann")
addPerson("Florian Uekermann", "florian@uekermann.me", "@MaVo159")
addPerson("Florian Weimer", "fw@deneb.enyo.de", "@fweimer")
addPerson("Florian", "sinnlosername@users.noreply.github.com")
addPerson("Florian", "sinnlosername@users.noreply.github.com", "@sinnlosername")
addPerson("Florin Patan", "florinpatan@gmail.com", "6473@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Florin Patan", "florinpatan@gmail.com", "@dlsniper")
addPerson("Ford Hurley", "ford.hurley@gmail.com", "@fordhurley")
addPerson("FourSeventy", "msiggy@gmail.com")
addPerson("Francesc Campoy Flores", "campoy@golang.org", "5955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Francesc Campoy Flores", "campoy@google.com", "7455@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Francesc Campoy", "campoy@golang.org", "campoy@google.com", "@campoy")
addPerson("Francesc Campoy", "francesc@campoy.cat")
addPerson("Francisco Claude", "fclaude@recoded.cl", "@fclaude")
addPerson("Francisco Rojas", "francisco.rojas.gallegos@gmail.com", "@frojasg")
addPerson("Francisco Souza", "franciscossouza@gmail.com", "@fsouza")
addPerson("Francisco Souza", "fsouza@users.noreply.github.com")
addPerson("Frank Rehwinkel", "frankrehwinkel@gmail.com")
addPerson("Frank Schroeder", "frank.schroeder@gmail.com")
addPerson("Frank Schroeder", "frank.schroeder@gmail.com", "@magiconair")
addPerson("Frank Schröder", "frank.schroeder@gmail.com", "11300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Frank Somers", "fsomers@arista.com", "@somersf")
addPerson("Franz Bettag", "franz@bett.ag")
addPerson("Fred Carle", "fred.carle@thorium90.io")
addPerson("Frederick Kelly Mayle III", "frederickmayle@gmail.com", "@fkm3")
addPerson("Frederik Ring", "frederik.ring@gmail.com")
addPerson("Fredrik Enestad", "fredrik.enestad@soundtrackyourbrand.com", "@fredr")
addPerson("Fredrik Forsmo", "fredrik.forsmo@gmail.com", "@frozzare")
addPerson("Fredrik Wallgren", "fredrik.wallgren@gmail.com")
addPerson("Frits van Bommel", "fvbommel@gmail.com", "13460@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Frits van Bommel", "fvbommel@gmail.com", "@fvbommel")
addPerson("Frédéric Guillot", "frederic.guillot@gmail.com", "@fguillot")
addPerson("Fumitoshi Ukai", "ukai@google.com", "@ukai")
addPerson("G. Hussain Chinoy", "ghchinoy@gmail.com", "@ghchinoy")
addPerson("Gaal Yahas", "gaal@google.com")
addPerson("Gabe Dalay", "gabedalay@gmail.com")
addPerson("Gabriel Aszalos", "gabriel.aszalos@gmail.com", "5465@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gabriel Aszalos", "gabriel.aszalos@gmail.com", "@gbbr")
addPerson("Gabriel Nicolas Avellaneda", "avellaneda.gabriel@gmail.com", "@GabrielNicolasAvellaneda")
addPerson("Gabriel Rosenhouse", "rosenhouse@gmail.com")
addPerson("Gabriel Russell", "gabriel.russell@gmail.com", "@wiccatech")
addPerson("Gabríel Arthúr Pétursson", "gabriel@system.is", "@polarina")
addPerson("Gareth Paul Jones", "gpj@foursquare.com", "@garethpaul")
addPerson("Garret Kelly", "gdk@google.com")
addPerson("Gary Burd", "gary@beagledreams.com", "@garyburd")
addPerson("Gary Elliott", "garyelliott@google.com")
addPerson("Gaurish Sharma", "contact@gaurishsharma.com", "@gaurish")
addPerson("Gautam Dey", "gautam.dey77@gmail.com")
addPerson("Gautham Thambidorai", "gautham.dorai@gmail.com", "@gauthamt")
addPerson("Gauthier Jolly", "gauthier.jolly@gmail.com")
addPerson("Geert-Johan Riemer", "gjr19912@gmail.com")
addPerson("Genevieve Luyt", "genevieve.luyt@gmail.com", "@genevieveluyt")
addPerson("Geoff Berry", "gberry.qdt@qualcommdatacenter.com", "25768@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Geoff Berry", "gberry.qdt@qualcommdatacenter.com", "@gberry-qdt")
addPerson("Georg Reinke", "guelfey@gmail.com", "@guelfey")
addPerson("George Gkirtsou", "ggirtsou@gmail.com", "@ggirtsou")
addPerson("George Shammas", "george@shamm.as", "@georgyo")
addPerson("George Tankersley", "george.tankersley@gmail.com")
addPerson("Gepser Hoil", "geharold@gmail.com")
addPerson("Gerasimos (Makis) Maropoulos", "kataras2006@hotmail.com", "@kataras")
addPerson("Gerasimos Dimitriadis", "gedimitr@gmail.com", "@gedimitr")
addPerson("Gergely Brautigam", "skarlso777@gmail.com", "@Skarlso")
addPerson("Getulio Sánchez", "valentin2507@gmail.com")
addPerson("Ggicci", "ggicci.t@gmail.com", "@ggicci")
addPerson("Gianguido Sora`", "g.sora4@gmail.com")
addPerson("Giannis Kontogianni", "giannis2792@gmail.com")
addPerson("GiantsLoveDeathMetal", "sebastien@cytora.com", "@foxyblue")
addPerson("Gil Raphaelli", "g@raphaelli.com")
addPerson("Giovanni Bajo", "rasky@develer.com", "5340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Giovanni Bajo", "rasky@develer.com", "@rasky")
addPerson("Giulio Iotti", "dullgiulio@gmail.com", "@dullgiulio")
addPerson("Giuseppe Valente", "gvalente@arista.com")
addPerson("Gleb Smirnoff", "glebius@netflix.com")
addPerson("Gleb Stepanov", "glebstepanov1992@gmail.com", "14596@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gleb Stepanov", "glebstepanov1992@gmail.com", "@stgleb")
addPerson("Glenn Brown", "glennb@google.com")
addPerson("Glenn Griffin", "glenng@google.com")
addPerson("Glenn Lewis", "gmlewis@google.com", "@gmlewis")
addPerson("Glib Smaga", "code@gsmaga.com")
addPerson("Go Team", "no-reply@golang.org")
addPerson("Goo", "liuwanle2010@gmail.com", "@l-we")
addPerson("Gordon Klaus", "gordon.klaus@gmail.com", "5780@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Gordon Klaus", "gordon.klaus@gmail.com", "@gordonklaus")
addPerson("Graham Miller", "graham.miller@gmail.com", "@laslowh")
addPerson("Greg Poirier", "greg.istehbest@gmail.com", "@grepory")
addPerson("Greg Ward", "greg@gerg.ca", "@gward")
addPerson("Gregory Colella", "gcolella@google.com")
addPerson("Gregory Haskins", "gregory.haskins@gmail.com")
addPerson("Gregory Man", "man.gregory@gmail.com")
addPerson("Gregory Man", "man.gregory@gmail.com", "@gregory-m")
addPerson("Greyh4t", "greyh4t1337@gmail.com")
addPerson("Grim", "megaskyhawk@gmail.com")
addPerson("Grégoire Delattre", "gregoire.delattre@gmail.com", "@gregdel")
addPerson("Guilherme Garnier", "guilherme.garnier@gmail.com", "@ggarnier")
addPerson("Guilherme Goncalves", "guilhermeaugustosg@gmail.com", "@guilhermeasg")
addPerson("Guilherme Rezende", "guilhermebr@gmail.com", "22856@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Guilherme Rezende", "guilhermebr@gmail.com", "@guilhermebr")
addPerson("Guilherme Santos", "guilherme.santos@foodora.com")
addPerson("GuilhermeCaruso", "gui.martinscaruso@gmail.com", "@GuilhermeCaruso")
addPerson("Guillaume J. Charmes", "guillaume@charmes.net", "@creack")
addPerson("Guillaume J. Charmes", "gcharmes@magicleap.com")
addPerson("Guillaume Koenig", "guillaume.edward.koenig@gmail.com")
addPerson("Guillaume Leroi", "leroi.g@gmail.com")
addPerson("Guillermo López-Anglada", "guillermo.lopez@outlook.com", "@guillermooo")
addPerson("Guobiao Mei", "meiguobiao@gmail.com", "@guobiao")
addPerson("Guoliang Wang", "iamwgliang@gmail.com", "@wgliang")
addPerson("Gurpartap Singh", "hi@gurpartap.com")
addPerson("Gustav Paul", "gustav.paul@gmail.com", "@gpaul")
addPerson("Gustav Westling", "gustav@westling.xyz", "@zegl")
addPerson("Gustav Westling", "zegl@westling.xyz", "@zegl")
addPerson("Gustavo Niemeyer", "gustavo@niemeyer.net", "n13m3y3r@gmail.com", "@niemeyer")
addPerson("Gustavo Picón", "tabo@tabo.pe")
addPerson("Gyu-Ho Lee", "gyuhox@gmail.com", "@gyuho")
addPerson("H. İbrahim Güngör", "igungor@gmail.com", "@igungor")
addPerson("HAMANO Tsukasa", "hamano@osstech.co.jp", "@hamano")
addPerson("HENRY-PC\\Henry", "henry.adisumarto@gmail.com")
addPerson("Hajime Hoshi", "hajimehoshi@gmail.com", "7938@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hajime Hoshi", "hajimehoshi@gmail.com", "@hajimehoshi")
addPerson("Hamit Burak Emre", "hamitburakemre@gmail.com")
addPerson("Han-Wen Nienhuys", "hanwen@google.com", "5893@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Han-Wen Nienhuys", "hanwen@google.com", "@hanwen")
addPerson("Han-Wen Nienhuys", "hanwenn@gmail.com", "6115@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hana Kim", "hyangah@gmail.com", "@hyangah")
addPerson("Hang Qian", "hangqian90@gmail.com")
addPerson("Hanjun Kim", "hallazzang@gmail.com", "@hallazzang")
addPerson("Hannes Landeholm", "hnsl@google.com")
addPerson("Haosdent Huang", "haosdent@gmail.com")
addPerson("Harald Nordgren", "haraldnordgren@gmail.com", "26145@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("HaraldNordgren", "haraldnordgren@gmail.com", "@HaraldNordgren")
addPerson("Hari haran", "hariharan.uno@gmail.com", "@hariharan-uno")
addPerson("Hariharan Srinath", "srinathh@gmail.com", "@srinathh")
addPerson("Harry Moreno", "morenoh149@gmail.com", "@morenoh149")
addPerson("Harshavardhana", "hrshvardhana@gmail.com", "@harshavardhana")
addPerson("Harshavardhana", "harsha@minio.io")
addPerson("Harshavardhana", "hrshvardhana@gmail.com", "11900@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hauke Löffler", "hloeffler@users.noreply.github.com", "@hloeffler")
addPerson("He Liu", "liulonnie@gmail.com")
addPerson("Hector Chu", "hectorchu@gmail.com", "@hectorchu")
addPerson("Hector Jusforgues", "hector.jusforgues@gmail.com")
addPerson("Hector Martin Cantero", "hector@marcansoft.com", "@marcan")
addPerson("Hector Rivas Gandara", "keymon@gmail.com")
addPerson("Henning Schmiedehausen", "henning@schmiedehausen.org", "@hgschmie")
addPerson("Henrik Hodne", "henrik@hodne.io", "@henrikhodne")
addPerson("Henrique Vicente", "henriquevicente@gmail.com")
addPerson("Henry Chang", "mr.changyuheng@gmail.com")
addPerson("Henry Clifford", "h.a.clifford@gmail.com", "@hcliff")
addPerson("Henry D. Case", "kris@amongbytes.com")
addPerson("Henry", "google@mindeco.de")
addPerson("Herbert Georg Fischer", "herbert.fischer@gmail.com", "@hgfischer")
addPerson("Herbie Ong", "herbie@google.com", "17100@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Herbie Ong", "herbie@google.com", "@cybrcodr")
addPerson("Heschi Kreinick", "heschi@google.com", "17090@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Heschi Kreinick", "heschi@google.com", "@heschik")
addPerson("Hidetatsu Yaginuma", "ygnmhdtt@gmail.com", "@yagi5")
addPerson("Hilko Bengen", "bengen@hilluzination.de")
addPerson("Hiroaki Nakamura", "hnakamur@gmail.com")
addPerson("Hiroaki Nakamura", "hnakamur@gmail.com", "17745@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hironao OTSUBO", "motemen@gmail.com", "@motemen")
addPerson("Hiroshi Ioka", "hirochachacha@gmail.com", "11631@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hiroshi Ioka", "hirochachacha@gmail.com", "@hirochachacha")
addPerson("Hitoshi Mitake", "mitake.hitoshi@gmail.com", "@mitake")
addPerson("Homan Chou", "homanchou@gmail.com")
addPerson("Hong Ruiqi", "hongruiqi@gmail.com", "@hongruiqi")
addPerson("Hsin Tsao", "tsao@google.com")
addPerson("Hsin Tsao", "tsao@google.com", "@lazyhackeratwork")
addPerson("HuKeping", "hukeping@huawei.com", "@HuKeping")
addPerson("Huadcu Sulivan", "huadcu@gmail.com")
addPerson("Hugo Rut", "hugorut@gmail.com")
addPerson("Hugues Bruant", "hugues.bruant@gmail.com", "17586@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hugues Bruant", "hugues.bruant@gmail.com", "@huguesb")
addPerson("Hyang-Ah Hana Kim", "hyangah@gmail.com", "5190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Hyang-Ah Hana Kim", "hyangah@gmail.com", "hakim@google.com", "@hyangah")
addPerson("Håvard Haugen", "havard.haugen@gmail.com", "5505@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Håvard Haugen", "havard.haugen@gmail.com", "@osocurioso")
addPerson("INADA Naoki", "songofacandy@gmail.com", "@methane")
addPerson("Ian Cottrell", "iancottrell@google.com", "9711@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ian Cottrell", "iancottrell@google.com", "@ianthehat")
addPerson("Ian Davis", "nospam@iandavis.com")
addPerson("Ian Davis", "nospam@iandavis.com", "@iand")
addPerson("Ian Ennis", "michaelian.ennis@gmail.com")
addPerson("Ian Gudger", "igudger@google.com", "12625@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ian Gudger", "igudger@google.com", "ian@loosescre.ws", "@iangudger")
addPerson("Ian Haken", "ihaken@netflix.com")
addPerson("Ian Johnson", "person.uwsome@gmail.com")
addPerson("Ian Kent", "iankent85@gmail.com", "@ian-kent")
addPerson("Ian Lance Taylor", "iant@golang.org", "@ianlancetaylor", "5206@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ibrahim AshShohail", "ibra.sho@gmail.com", "@ibrasho")
addPerson("Ibrahim AshShohail", "me@ibrasho.com")
addPerson("Iccha Sethi", "icchasethi@gmail.com", "@isethi")
addPerson("Idora Shinatose", "idora.shinatose@gmail.com", "@idora")
addPerson("Igor Bernstein", "igorbernstein@google.com")
addPerson("Igor Dolzhikov", "bluesriverz@gmail.com", "@takama")
addPerson("Igor Vashyst", "ivashyst@gmail.com", "@ivashyst")
addPerson("Igor Zhilianin", "igor.zhilianin@gmail.com", "@igorzhilianin")
addPerson("Ilan Pillemer", "ilan.pillemer@gmail.com")
addPerson("Ilia Filippov", "ilia.filippov@intel.com")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "26817@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "8585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ilya Tocar", "ilya.tocar@intel.com", "@TocarIP")
addPerson("Inanc Gumus", "m@inanc.io", "25354@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Inanc Gumus", "m@inanc.io", "@inancgumus")
addPerson("Ingo Gottwald", "in.gottwald@gmail.com")
addPerson("Ingo Krabbe", "ikrabbe.ask@gmail.com", "@ikrabbe")
addPerson("Ingo Oeser", "nightlyone@googlemail.com", "5021@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ingo Oeser", "nightlyone@googlemail.com", "@nightlyone")
addPerson("Ioannis Georgoulas", "geototti21@hotmail.com", "@geototti21")
addPerson("Ishani Garg", "ishani.garg@gmail.com")
addPerson("Iskander Sharipov", "iskander.sharipov@intel.com", "24037@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Iskander Sharipov", "iskander.sharipov@intel.com", "@Quasilyte")
addPerson("Iskander Sharipov", "quasilyte@gmail.com", "25422@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Issac Trotts", "issac.trotts@gmail.com")
addPerson("Issac Trotts", "issactrotts@google.com", "@ijt")
addPerson("Ivan Babrou", "ivan@cloudflare.com", "@bobrik")
addPerson("Ivan Bertona", "ivan.bertona@gmail.com", "@ibrt")
addPerson("Ivan Jovanovic", "ivan@loopthrough.ch")
addPerson("Ivan Krasin", "krasin@golang.org", "@krasin2")
addPerson("Ivan Kruglov", "ivan.kruglov@yahoo.com")
addPerson("Ivan Kutuzov", "arbrix@gmail.com")
addPerson("Ivan Kutuzov", "arbrix@gmail.com", "@arbrix")
addPerson("Ivan Markin", "sw@nogoegst.net")
addPerson("Ivan Markin", "twim@riseup.net")
addPerson("Ivan Moscoso", "moscoso@gmail.com", "@ivan3bx")
addPerson("Ivan Sharavuev", "shpiwan@gmail.com", "@Shiwin")
addPerson("Ivan Ukhov", "ivan.ukhov@gmail.com", "@IvanUkhov")
addPerson("Ivy Evans", "ivy@ivyevans.net", "@ivy")
addPerson("J. Mroz", "nalik.nal@gmail.com")
addPerson("Jay Conrod", "jayconrod@google.com", "@jayconrod")
addPerson("JBD (DO NOT USE)", "jbd@golang.org", "10107@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("JBD", "jbd@google.com", "5040@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("JP Sugarbroad", "jpsugar@google.com", "@taralx")
addPerson("JT Olds", "hello@jtolds.com", "@jtolds")
addPerson("Jaana Burcu Dogan", "jbd@google.com", "jbd@golang.org", "@rakyll")
addPerson("Jack Christensen", "jack@jackchristensen.com")
addPerson("Jack Lindamood", "jlindamo@justin.tv", "@cep21")
addPerson("Jack Parkinson", "jdparkinson93@gmail.com")
addPerson("Jack", "jackxbritton@gmail.com", "@jackxbritton")
addPerson("Jackson Owens", "jackson_owens@alumni.brown.edu")
addPerson("Jacob H. Haven", "jacob@cloudflare.com")
addPerson("Jacob H. Haven", "jacob@jhaven.me", "@jacobhaven")
addPerson("Jacob Haven", "jacob@cloudflare.com", "5346@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jacob Hoffman-Andrews", "github@hoffman-andrews.com", "10927@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jacob Hoffman-Andrews", "github@hoffman-andrews.com", "@jsha")
addPerson("Jacob Kobernik", "jkobernik@gmail.com")
addPerson("Jacob Marble", "jacobmarble@google.com")
addPerson("Jacob Walker", "jacobwalker0814@gmail.com")
addPerson("Jade Auer", "jda@tapodi.net")
addPerson("Jae Kwon", "jae@tendermint.com", "@jaekwon")
addPerson("Jaime Geiger", "jaime@grimm-co.com")
addPerson("Jake B", "doogie1012@gmail.com")
addPerson("Jake B", "doogie1012@gmail.com", "@silbinarywolf")
addPerson("Jake Burkhead", "jake.b@socialcodeinc.com")
addPerson("Jakob Borg", "jakob@nym.se", "@calmh")
addPerson("Jakob Weisblat", "jakobw@mit.edu", "@jakob223")
addPerson("Jakub Katarzynski", "kkatarzynski@gmail.com")
addPerson("Jakub Ryszard Czarnowicz", "j.czarnowicz@gmail.com", "@Naranim")
addPerson("Jakub Čajka", "jcajka@redhat.com", "11002@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jakub Čajka", "jcajka@redhat.com", "@jcajka")
addPerson("James Abley", "james.abley@gmail.com")
addPerson("James Bardin", "j.bardin@gmail.com", "@jbardin")
addPerson("James Chacon", "jchacon@google.com")
addPerson("James Clarke", "jrtc27@jrtc27.com", "15676@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Clarke", "jrtc27@jrtc27.com", "@jrtc27")
addPerson("James Cowgill", "James.Cowgill@imgtec.com")
addPerson("James Cowgill", "james.cowgill@mips.com", "17679@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Cowgill", "james.cowgill@mips.com", "@jcowgill")
addPerson("James Craig Burley", "james-github@burleyarch.com", "@jcburley")
addPerson("James F. Carter", "jfc.org.uk@gmail.com")
addPerson("James Fysh", "james.fysh@gmail.com", "@JamesFysh")
addPerson("James Gray", "james@james4k.com", "@james4k")
addPerson("James Greenhill", "fuziontech@gmail.com")
addPerson("James Hall", "james.hall@shopify.com")
addPerson("James Hartig", "fastest963@gmail.com", "17920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("James Hartig", "fastest963@gmail.com", "@fastest963")
addPerson("James Lawrence", "jljatone@gmail.com", "@james-lawrence")
addPerson("James Munnelly", "james@munnelly.eu")
addPerson("James Myers", "jfmyers9@gmail.com", "@jfmyers9")
addPerson("James Neve", "jamesoneve@gmail.com", "@jamesneve")
addPerson("James Robinson", "jamesr@google.com", "@jamesr")
addPerson("James Schofield", "james@shoeboxapp.com", "@jamesshoebox")
addPerson("James Smith", "jrs1995@icloud.com", "@jimmysmith95")
addPerson("James Sweet", "james.sweet88@googlemail.com", "@Omegaice")
addPerson("James Toy", "nil@opensesame.st", "@jamestoy")
addPerson("James Treanor", "jtreanor3@gmail.com")
addPerson("James Tucker", "raggi@google.com", "@raggi")
addPerson("James Whitehead", "jnwhiteh@gmail.com", "@jnwhiteh")
addPerson("Jamie Barnett", "jamiebarnett1992@gmail.com")
addPerson("Jamie Beverly", "jamie.r.beverly@gmail.com", "@jbeverly")
addPerson("Jamie Hall", "jamiehall@google.com")
addPerson("Jamie Kerr", "jkerr113@googlemail.com")
addPerson("Jamie Liu", "jamieliu@google.com", "@nixprime")
addPerson("Jamie Stackhouse", "contin673@gmail.com", "@itsjamie")
addPerson("Jamie Wilkinson", "jaq@spacepants.org", "@jaqx0r")
addPerson("Jamil Djadala", "djadala@gmail.com", "@djadala")
addPerson("Jan Berktold", "jan@berktold.co")
addPerson("Jan Berktold", "jan@berktold.co", "@JanBerktold")
addPerson("Jan H. Hosang", "jan.hosang@gmail.com", "@hosang")
addPerson("Jan Kratochvil", "jan.kratochvil@redhat.com", "@jankratochvil")
addPerson("Jan Lehnardt", "jan@apache.org", "@janl")
addPerson("Jan Mercl", "0xjnml@gmail.com", "5295@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jan Mercl", "0xjnml@gmail.com", "@cznic")
addPerson("Jan Mercl", "befelemepeseveze@gmail.com", "@bflm")
addPerson("Jan Pilzer", "jan.pilzer@gmx.de")
addPerson("Jan Ziak", "0xe2.0x9a.0x9b@gmail.com", "@atomsymbol")
addPerson("Janne Snabb", "snabb@epipe.com", "@snabb")
addPerson("Jason A. Donenfeld", "jason@zx2c4.com", "@zx2c4")
addPerson("Jason Barnett", "jason.w.barnett@gmail.com", "@jasonwbarnett")
addPerson("Jason Buberel", "jbuberel@google.com", "8445@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jason Buberel", "jbuberel@google.com", "jason@buberel.org", "@jbuberel")
addPerson("Jason Chu", "jasonchujc@gmail.com", "@1lann")
addPerson("Jason Cwik", "jason@cwik.org")
addPerson("Jason Del Ponte", "delpontej@gmail.com", "@jasdel")
addPerson("Jason Donenfeld", "jason.donenfeld@gmail.com", "20556@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jason E. Aten", "j.e.aten@gmail.com")
addPerson("Jason Hall", "imjasonh@gmail.com")
addPerson("Jason Hall", "jasonhall@google.com")
addPerson("Jason Hewes", "jasonhewes5@gmail.com")
addPerson("Jason Keene", "jasonkeene@gmail.com")
addPerson("Jason Keene", "jasonkeene@gmail.com", "@jasonkeene")
addPerson("Jason LeBrun", "jblebrun@gmail.com", "@jblebrun")
addPerson("Jason McVetta", "jason.mcvetta@gmail.com", "@jmcvetta")
addPerson("Jason Murray", "jason@chaosaffe.io")
addPerson("Jason Smale", "jsmale@zendesk.com")
addPerson("Jason Travis", "infomaniac7@gmail.com", "@corburn")
addPerson("Jason Wangsadinata", "jwangsadinata@gmail.com", "@jwangsadinata")
addPerson("Jason Wilder", "mail@jasonwilder.com")
addPerson("Javier Kohen", "jkohen@google.com", "@jkohen")
addPerson("Javier Segura", "javism@gmail.com", "@jsegura")
addPerson("Jay Conrod", "jayconrod@google.com", "17092@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jay Conrod", "jayconrod@google.com", "@jayconrod")
addPerson("Jay Satiro", "raysatiro@yahoo.com")
addPerson("Jay Stramel", "js@ionactual.com")
addPerson("Jay Weisskopf", "jay@jayschwa.net", "@jayschwa")
addPerson("Jayabaskar Rajagopal", "jayabaskar.rajagopal@gmail.com")
addPerson("Jean de Klerk", "deklerk@google.com", "26615@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jean de Klerk", "deklerk@google.com", "@jadekler")
addPerson("Jean de Klerk", "jadekler@gmail.com")
addPerson("Jean-André Santoni", "jean.andre.santoni@gmail.com")
addPerson("Jean-Francois Cantin", "jfcantin@gmail.com", "@jfcantin")
addPerson("Jean-Marc Eurin", "jmeurin@google.com", "@jmeurin")
addPerson("Jean-Nicolas Moal", "jn.moal@gmail.com", "@jnmoal")
addPerson("Jed Denlea", "jed@fastly.com", "5550@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jed Denlea", "jed@fastly.com", "@jeddenlea")
addPerson("Jeet Parekh", "jeetparekh96@gmail.com", "24716@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeet Parekh", "jeetparekh96@gmail.com", "@jeet-parekh")
addPerson("Jeevanandam M", "jeeva@myjeeva.com")
addPerson("Jeff (Zhefu) Jiang", "jeffjiang@google.com")
addPerson("Jeff Buchbinder", "jeff@ourexchange.net")
addPerson("Jeff Craig", "jeffcraig@google.com", "@foxxtrot")
addPerson("Jeff Dupont", "jeff.dupont@gmail.com", "@jeffdupont")
addPerson("Jeff Grafton", "jgrafton@google.com")
addPerson("Jeff Hodges", "jeff@somethingsimilar.com", "@jmhodges")
addPerson("Jeff Johnson", "jrjohnson@google.com", "16958@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeff Johnson", "jrjohnson@google.com", "@johnsonj")
addPerson("Jeff R. Allen", "jra@nella.org", "5646@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeff R. Allen", "jra@nella.org", "@jeffallen")
addPerson("Jeff Sickel", "jas@corpus-callosum.com", "@vat9")
addPerson("Jeff Wendling", "jeff@spacemonkey.com", "@zeebo")
addPerson("Jeff Williams", "jefesaurus@google.com")
addPerson("Jeff", "jeffreyh192@gmail.com", "@jeffizhungry")
addPerson("Jeffrey Yong", "jeffreyyong10@gmail.com")
addPerson("Jelte Fennema", "github-tech@jeltef.nl", "@JelteF")
addPerson("Jens Frederich", "jfrederich@gmail.com", "@frederich")
addPerson("Jeremiah Harmsen", "jeremiah@google.com", "@jharmsen")
addPerson("Jeremy Baumont", "jeremy.baumont@gmail.com")
addPerson("Jeremy Jackins", "jeremyjackins@gmail.com", "5300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jeremy Jackins", "jeremyjackins@gmail.com", "@jnjackins")
addPerson("Jeremy Jay", "jeremy@pbnjay.com")
addPerson("Jeremy Jay", "jeremy@pbnjay.com", "@pbnjay")
addPerson("Jeremy Loy", "jeremy.b.loy@icloud.com")
addPerson("Jeremy Schlatter", "jeremy.schlatter@gmail.com", "@jeremyschlatter")
addPerson("Jeremy", "jcanady@gmail.com")
addPerson("Jeroen Bobbeldijk", "jerbob92@gmail.com", "@jerbob92")
addPerson("Jerrin Shaji George", "jerrinsg@gmail.com", "@jerrinsg")
addPerson("Jess Frazelle", "acidburn@google.com", "@jessfraz")
addPerson("Jess Frazelle", "me@jessfraz.com", "@jessfraz")
addPerson("Jesse Szwedko", "jesse.szwedko@gmail.com", "@jszwedko")
addPerson("Jessie Frazelle", "me@jessfraz.com", "6071@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jesús Espino", "jespinog@gmail.com")
addPerson("Jian Zhen", "zhenjl@gmail.com")
addPerson("Jianing Yu", "jnyu@google.com")
addPerson("Jianqiao Li", "jianqiaoli@google.com")
addPerson("Jianqiao Li", "jianqiaoli@jianqiaoli.svl.corp.google.com")
addPerson("Jihyun Yu", "yjh0502@gmail.com", "@yjh0502")
addPerson("Jille Timmermans", "quis@google.com")
addPerson("Jim Cote", "jfcote87@gmail.com", "5320@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jim Cote", "jfcote87@gmail.com", "@jfcote87")
addPerson("Jim Kingdon", "jim@bolt.me", "@jkingdon")
addPerson("Jim McGrath", "jimmc2@gmail.com", "@mcgoo")
addPerson("Jim Minter", "jminter@redhat.com")
addPerson("Jim Myers", "jfmyers9@gmail.com", "16855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jimmy Zelinskie", "jimmyzelinskie@gmail.com", "@jzelinskie")
addPerson("Jin-wook Jeong", "jeweljar@hanmail.net", "@jeweljar")
addPerson("Jingcheng Zhang", "diogin@gmail.com", "@diogin")
addPerson("Jingguo Yao", "yaojingguo@gmail.com", "@yaojingguo")
addPerson("Jiong Du", "londevil@gmail.com", "@lodevil")
addPerson("Jirka Daněk", "dnk@mail.muni.cz", "@jirkadanek")
addPerson("Jiulong Wang", "jiulongw@gmail.com")
addPerson("Jizhong Jiang", "jiangjizhong@gmail.com")
addPerson("Joakim Sernbrant", "serbaut@gmail.com", "@serbaut")
addPerson("Joe Cortopassi", "joe@joecortopassi.com", "@JoeCortopassi")
addPerson("Joe Farrell", "joe2farrell@gmail.com", "@joe2far")
addPerson("Joe Harrison", "joehazzers@gmail.com", "@sigwinch28")
addPerson("Joe Henke", "joed.henke@gmail.com", "@jdhenke")
addPerson("Joe Kyo", "xunianzu@gmail.com", "21935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Kyo", "xunianzu@gmail.com", "@joekyo")
addPerson("Joe Poirier", "jdpoirier@gmail.com", "@jpoirier")
addPerson("Joe Richey", "joerichey@google.com", "17411@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Shaw", "joe@joeshaw.org")
addPerson("Joe Shaw", "joe@joeshaw.org", "5185@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Shaw", "joe@joeshaw.org", "@joeshaw")
addPerson("Joe Sylve", "joe.sylve@gmail.com", "11851@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Sylve", "joe.sylve@gmail.com", "@jtsylve")
addPerson("Joe Tsai", "joetsai@google.com", "joetsai@digital-static.net", "thebrokentoaster@gmail.com", "@dsnet")
addPerson("Joe Tsai", "joetsai@digital-static.net", "8495@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Tsai", "joetsai@google.com", "12850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joe Tsai", "thebrokentoaster@gmail.com", "9735@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joel Sing", "joel@sing.id.au", "13640@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joel Sing", "joel@sing.id.au", "jsing@google.com", "@4a6f656c")
addPerson("Joel Sing", "jsing@google.com", "5770@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Brandhorst", "johan.brandhorst@gmail.com", "16585@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Brandhorst", "johan.brandhorst@gmail.com", "@johanbrandhorst")
addPerson("Johan Brandhorst", "johan@cognitivelogic.com")
addPerson("Johan Brandhorst", "johan@infosum.com")
addPerson("Johan Euphrosine", "proppy@google.com", "5480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johan Euphrosine", "proppy@google.com", "@proppy")
addPerson("Johan Sageryd", "j@1616.se", "@jsageryd")
addPerson("Johan Schuijt-Li", "johan@300.nl")
addPerson("Johanna Mantilla Duque", "johanna1431@gmail.com")
addPerson("Johannes Ebke", "johannes@ebke.org")
addPerson("John Asmuth", "jasmuth@gmail.com", "jasmuth@google.com", "@skelterjohn")
addPerson("John Beisley", "huin@google.com", "@huin-google")
addPerson("John Dethridge", "jcd@golang.org", "5515@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("John Dethridge", "jcd@golang.org", "@jcd2")
addPerson("John Eikenberry", "jae@zhar.net")
addPerson("John Gibb", "johngibb@gmail.com", "@johngibb")
addPerson("John Howard Palevich", "jack.palevich@gmail.com", "@jackpal")
addPerson("John Jeffery", "jjeffery@sp.com.au", "@jjeffery")
addPerson("John Leidegren", "john.leidegren@gmail.com")
addPerson("John Paul Adrian Glaubitz", "glaubitz@physik.fu-berlin.de")
addPerson("John Potocny", "johnp@vividcortex.com", "@potocnyj")
addPerson("John R. Lenton", "jlenton@gmail.com", "@chipaca")
addPerson("John Schnake", "schnake.john@gmail.com", "@johnSchnake")
addPerson("John ShaggyTwoDope Jenkins", "twodopeshaggy@gmail.com", "@shaggytwodope")
addPerson("John Shahid", "jvshahid@gmail.com", "@jvshahid")
addPerson("John Starks", "jostarks@microsoft.com")
addPerson("John Tuley", "john@tuley.org", "@jmtuley")
addPerson("JohnCGriffin", "griffinish@gmail.com")
addPerson("Johnny Boursiquot", "jboursiquot@gmail.com")
addPerson("Johnny Luo", "johnnyluo1980@gmail.com", "19155@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Johnny Luo", "johnnyluo1980@gmail.com", "@johnnyluo")
addPerson("Jon Chen", "jchen@justin.tv", "@bsdlp")
addPerson("Jon Jenkins", "invultussolis@gmail.com")
addPerson("Jon Jenkins", "jon@mj12.su")
addPerson("Jonathan Amsterdam", "jba@google.com", "14570@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jonathan Amsterdam", "jba@google.com", "@jba")
addPerson("Jonathan Anderson", "jonathan.anderson@mun.ca")
addPerson("Jonathan Boulle", "jonathanboulle@gmail.com", "@jonboulle")
addPerson("Jonathan Chen", "dijonkitchen@users.noreply.github.com", "@dijonkitchen")
addPerson("Jonathan Doklovic", "doklovic@atlassian.com")
addPerson("Jonathan ES Lin", "ernsheong@gmail.com")
addPerson("Jonathan Feinberg", "feinberg@google.com", "@google-feinberg")
addPerson("Jonathan Hseu", "jhseu@google.com", "@jhseu")
addPerson("Jonathan Lloyd", "j.lloyd.email@gmail.com")
addPerson("Jonathan Mark", "jhmark@xenops.com", "@jhmark")
addPerson("Jonathan Mayer", "jonmayer@google.com")
addPerson("Jonathan Nieder", "jrn@google.com", "@jrn")
addPerson("Jonathan Pentecost", "pentecostjonathan@gmail.com")
addPerson("Jonathan Pittman", "jmpittman@google.com", "@jonathanpittman")
addPerson("Jonathan Rudenberg", "jonathan@titanous.com", "5431@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jonathan Rudenberg", "jonathan@titanous.com", "@titanous")
addPerson("Jonathan Turner", "jt@jtnet.co.uk")
addPerson("Jonathan Wills", "runningwild@gmail.com", "@runningwild")
addPerson("Jongmin Kim", "atomaths@gmail.com", "@atomaths")
addPerson("Jongmin Kim", "jmkim@pukyong.ac.kr", "@jmkim")
addPerson("Jono Gould", "jono.gould@gmail.com")
addPerson("Joonas Kuorilehto", "joneskoo@derbian.fi", "14770@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joonas Kuorilehto", "joneskoo@derbian.fi", "@joneskoo")
addPerson("Joop Kiefte", "joop@kiefte.net", "@LaPingvino")
addPerson("Jordan Lewis", "jordanthelewis@gmail.com", "@jordanlewis")
addPerson("Jordan Liggitt", "jliggitt@redhat.com")
addPerson("Jordan Rhee", "jordanrh@microsoft.com", "28473@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Jordan Rhee", "jordanrh@microsoft.com", "@jordanrh1")
addPerson("Jos Visser", "josv@google.com", "@gosbisser")
addPerson("Jose Luis Vázquez González", "josvazg@gmail.com", "@josvazg")
addPerson("Joseph Herlant", "herlantj@gmail.com")
addPerson("Joseph Holsten", "joseph@josephholsten.com", "@josephholsten")
addPerson("Joseph Poirier", "jdpoirier@gmail.com")
addPerson("Joseph Richey", "joerichey@google.com", "@josephlr")
addPerson("Joseph Spurrier", "code@josephspurrier.com")
addPerson("Josh Bleecher Snyder", "josharian@gmail.com", "5143@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Josh Bleecher Snyder", "josharian@gmail.com", "@josharian")
addPerson("Josh Chorlton", "jchorlton@gmail.com", "@jchorl")
addPerson("Josh Deprez", "josh.deprez@gmail.com", "@DrJosh9000")
addPerson("Josh Goebel", "dreamer3@gmail.com", "@yyyc514")
addPerson("Josh Hoak", "jhoak@google.com", "@Kashomon")
addPerson("Josh Lubawy", "jlubawy@gmail.com")
addPerson("Josh Roppo", "joshroppo@gmail.com", "@Ropes")
addPerson("Josh Varga", "josh.varga@gmail.com")
addPerson("Joshua Blakeley", "jtblakeley@gmail.com", "27898@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Joshua Boelter", "joshua.boelter@intel.com")
addPerson("Joshua Boelter", "joshua.boelter@intel.com", "@duckized")
addPerson("Joshua Humphries", "jhumphries131@gmail.com")
addPerson("Joshua Rubin", "joshua@rubixconsulting.com", "@joshuarubin")
addPerson("Joshua T Corbin", "joshua@uber.com")
addPerson("Josselin Costanzi", "josselin@costanzi.fr")
addPerson("Josselin Costanzi", "josselin@costanzi.fr", "16720@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Josselin Costanzi", "josselin@costanzi.fr", "@josselin-c")
addPerson("Jostein Stuhaug", "js@solidsystem.no")
addPerson("José Carlos Nieto", "jose.carlos@menteslibres.net")
addPerson("João Lucas Melo Brasio", "jaumlucas@gmail.com")
addPerson("Joël Stemmer", "jstemmer@google.com", "@jstemmer")
addPerson("Joël Stemmer", "stemmertech@gmail.com", "@jstemmer")
addPerson("Juan Carlos", "juanjcsr@gmail.com", "@juanjcsr")
addPerson("JuciÊ Dias Andrade", "ojucie@gmail.com")
addPerson("Jude Pereira", "judebpereira@gmail.com")
addPerson("Jukka-Pekka Kekkonen", "karatepekka@gmail.com", "@madari")
addPerson("Julia Hansbrough", "flowerhack@google.com", "@flowerhack")
addPerson("Julian Kornberger", "jk+github@digineo.de", "@corny")
addPerson("Julian Pastarmov", "pastarmovj@google.com")
addPerson("Julian Phillips", "julian@quantumfyre.co.uk", "@qur")
addPerson("Julie Qiu", "julieyeqiu@gmail.com", "@julieqiu", "julieqiu@google.com", "julie@golang.org")
addPerson("Julien Kauffmann", "julien.kauffmann@freelan.org")
addPerson("Julien Salleyron", "julien.salleyron@gmail.com", "@Juliens")
addPerson("Julien Schmidt", "google@julienschmidt.com", "@julienschmidt")
addPerson("Julio Montes", "julio.montes@intel.com", "@devimc")
addPerson("Junda Liu", "junda@celer.network")
addPerson("Jungho Ahn", "jhahn@google.com", "@jhahn21")
addPerson("Junya Hayashi", "ledmonster@gmail.com")
addPerson("Juraj Sukop", "sukop@users.noreply.github.com", "@sukop")
addPerson("Jure Ham", "jure.ham@zemanta.com", "@hamaxx")
addPerson("Jurgen De Commer", "jurgen.decommer@gmail.com")
addPerson("Justin Gracenin", "jgracenin@gmail.com")
addPerson("Justin Li", "git@justinli.net", "@pushrax")
addPerson("Justin Nuß", "nuss.justin@gmail.com", "5475@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Justin Nuß", "nuss.justin@gmail.com", "@nussjustin")
addPerson("Justyn Temme", "justyntemme@gmail.com", "@justyntemme")
addPerson("KB Sriram", "kbsriram@google.com", "@kbsriram")
addPerson("Kaarthik Rao Bekal Radhakrishna", "karthik.0703@gmail.com")
addPerson("Kale Blankenship", "kale@lemnisys.com", "15430@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kale Blankenship", "kale@lemnisys.com", "@vcabbage")
addPerson("Kaleb Elwert", "kelwert@atlassian.com", "@belak")
addPerson("Kamal Aboul-Hosn", "aboulhosn@google.com")
addPerson("Kamal Aboul-Hosn", "kamal.aboulhosn@gmail.com")
addPerson("Kamil Kisiel", "kamil@kamilkisiel.net", "@kisielk")
addPerson("Kamil Rytarowski", "krytarowski@users.noreply.github.com", "@krytarowski")
addPerson("Kang Hu", "hukangustc@gmail.com", "@mkhu")
addPerson("Kanitkorn S", "k.sujautra@gmail.com")
addPerson("Karan Misra", "kidoman@gmail.com")
addPerson("Karel Pazdera", "pazderak@gmail.com", "@pazderak")
addPerson("Karsten Köhler", "karsten.koehler95@gmail.com", "@SchiffFlieger")
addPerson("Karthik Karanth", "karanth.karthik@gmail.com")
addPerson("Kashav Madan", "kshvmdn@gmail.com", "@kshvmdn")
addPerson("Kasper Nilsson", "kaspern@google.com")
addPerson("Kate Manson", "kate.manson@izettle.com", "@kamanson")
addPerson("Katie Hockman", "katie@golang.org", "28759@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Katie Hockman", "katie@golang.org", "@katiehockman")
addPerson("Kato Kazuyoshi", "kato.kazuyoshi@gmail.com", "@kzys")
addPerson("Katrina Owen", "katrina.owen@gmail.com", "10395@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Katrina Owen", "katrina.owen@gmail.com", "@kytrinyx")
addPerson("Katsuya Miyachi", "kattu0426@gmail.com")
addPerson("Kaviraj", "kavirajkanagaraj@gmail.com", "@kavirajk")
addPerson("Kazuhiro Kubota", "k2.wanko@gmail.com")
addPerson("Kazuhiro Sera", "seratch@gmail.com", "@seratch")
addPerson("Keegan Carruthers-Smith", "keegan.csmith@gmail.com", "@keegancsmith")
addPerson("Kei Son", "hey.calmdown@gmail.com", "@heycalmdown")
addPerson("Keiji Yoshida", "keijiyoshida.mail@gmail.com", "@keijiyoshida")
addPerson("Keith Ball", "inflatablewoman@gmail.com")
addPerson("Keith Randall", "khr@golang.org", "5200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Keith Randall", "khr@golang.org", "khr@google.com", "keithr@alum.mit.edu", "@randall77")
addPerson("Keith Rarick", "kr@xph.us", "@kr")
addPerson("Kelsey Hightower", "kelsey.hightower@gmail.com", "5491@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kelsey Hightower", "kelsey.hightower@gmail.com", "@kelseyhightower")
addPerson("Ken Friedenbach", "kenliz@cruzio.com", "@Ken1JF")
addPerson("Ken Rockot", "ken@oz.gs", "@krockot")
addPerson("Ken Sedgwick", "ken@bonsai.com", "@ksedgwic")
addPerson("Ken Thompson", "ken@golang.org", "@ken")
addPerson("Kenji Kaneda", "kenji.kaneda@gmail.com", "@kkaneda")
addPerson("Kenji Yano", "kenji.yano@gmail.com", "@yanolab")
addPerson("Kenneth Shaw", "kenshaw@gmail.com", "@kenshaw")
addPerson("Kenny Grant", "kennygrant@gmail.com", "10235@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kenny Grant", "kennygrant@gmail.com", "@kennygrant")
addPerson("Ketan Parmar", "ketanbparmar@gmail.com")
addPerson("Ketan Parmar", "ketanbparmar@gmail.com", "@kpbird")
addPerson("Kevin Ballard", "kevin@sb.org", "@kballard")
addPerson("Kevin Burke", "kev@inburke.com", "13437@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kevin Burke", "kev@inburke.com", "@kevinburke")
addPerson("Kevin Kirsche", "kev.kirsche@gmail.com", "@kkirsche")
addPerson("Kevin Klues", "klueska@gmail.com", "@klueska")
addPerson("Kevin Lozandier", "lozandier@gmail.com")
addPerson("Kevin Malachowski", "chowski@google.com")
addPerson("Kevin Ruffin", "kruffin@gmail.com")
addPerson("Kevin Vu", "kevin.m.vu@gmail.com", "@kvu787")
addPerson("Kevin Wang", "kevin@kevinwang.com")
addPerson("Kevin Zita", "bleedgreenandgold@gmail.com", "@kzisme")
addPerson("Khramov Anton", "anton@endocode.com")
addPerson("Kieran Colford", "kieran@kcolford.com")
addPerson("Kim Shrier", "kshrier@racktopsystems.com", "@kim-racktop")
addPerson("Kim YongBin", "kybinz@gmail.com", "5154@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kir Kolyshkin", "kolyshkin@gmail.com", "@kolyshkin")
addPerson("Kirill Korotaev", "kirillx@gmail.com")
addPerson("Kirill Smelkov", "kirr@nexedi.com", "16286@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kirill Smelkov", "kirr@nexedi.com", "@navytux")
addPerson("Kirklin McDonald", "kirklin.mcdonald@gmail.com", "@KirkMcDonald")
addPerson("Klaus Post", "klauspost@gmail.com", "6545@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Klaus Post", "klauspost@gmail.com", "@klauspost")
addPerson("Koala Yeung", "koalay@gmail.com")
addPerson("Kodie", "kodiegoodwin@gmail.com")
addPerson("Koen Rouwhorst", "info@koenrouwhorst.nl")
addPerson("Koichi Shiraishi", "zchee.io@gmail.com", "10420@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Koichi Shiraishi", "zchee.io@gmail.com", "@zchee")
addPerson("Koki Ide", "niconegoto@yahoo.co.jp", "@niconegoto")
addPerson("Konstantin Shaposhnikov", "k.shaposhnikov@gmail.com", "8065@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Konstantin Shaposhnikov", "k.shaposhnikov@gmail.com", "@kostya-sh")
addPerson("Kris Kwiatkowski", "kris@cloudflare.com", "27471@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kris Nova", "kris@nivenly.com", "@kris-nova")
addPerson("Kris", "krousey@google.com")
addPerson("Krish Munot", "krishmunot@gmail.com")
addPerson("Kristopher Watts", "traetox@gmail.com", "@traetox")
addPerson("Kropekk", "kamilkropiewnicki@gmail.com")
addPerson("Kun", "likunarmstrong@gmail.com", "@cnbuff410")
addPerson("Kunpei Sakai", "namusyaka@gmail.com", "23250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Kunpei Sakai", "namusyaka@gmail.com", "@namusyaka")
addPerson("Kyle Consalus", "consalus@gmail.com", "@kcons")
addPerson("Kyle Isom", "kyle@gokyle.net", "@kisom")
addPerson("Kyle Jones", "kyle@kyledj.com")
addPerson("Kyle Lemons", "kyle@kylelemons.net", "@kylelemons")
addPerson("Kyle Shannon", "kyle@pobox.com", "@ksshannon")
addPerson("Kyle Spiers", "eiais@google.com")
addPerson("Kyle Wood", "kyle@kylewood.cc", "@DemonWav")
addPerson("Kyohei Kadota", "lufia@lufia.org")
addPerson("Kyrylo Silin", "silin@kyrylo.org", "@kyrylo")
addPerson("L Campbell", "unpantsu@gmail.com", "@lye")
addPerson("L. Alberto Giménez", "lagimenez@gmail.com")
addPerson("LE Manh Cuong", "cuong.manhle.vn@gmail.com", "@Gnouc")
addPerson("Lai Jiangshan", "eag0628@gmail.com", "@laijs")
addPerson("Lakshay Garg", "lakshay.garg.1996@gmail.com", "21860@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lakshay Garg", "lakshay.garg.1996@gmail.com", "@lakshayg")
addPerson("Landon Jones", "lbj@landonjones.com")
addPerson("Lann Martin", "lannm@google.com")
addPerson("Lanre Adelowo", "yo@lanre.wtf")
addPerson("Lantao Liu", "lantaol@google.com")
addPerson("Larry Clapp", "larry@theclapp.org", "@theclapp")
addPerson("Larry Hosken", "lahosken@gmail.com", "@lahosken")
addPerson("Lars Jeppesen", "jeppesen.lars@gmail.com")
addPerson("Lars Lehtonen", "lars.lehtonen@gmail.com")
addPerson("Lars Wiegman", "lars@namsral.com", "@namsral")
addPerson("Larz Conwell", "larzconwell@gmail.com", "@larzconwell")
addPerson("Laurent Voisin", "lpvoisin@gmail.com")
addPerson("Laurie Clark-Michalek", "laurie@qubit.com", "@lclarkmichalek")
addPerson("Laurynas", "LaurynasUsas@gmail.com")
addPerson("Lee Hinman", "hinman@gmail.com", "@hinman")
addPerson("Lee Packham", "lpackham@gmail.com", "@leepa")
addPerson("Lehner Florian", "dev@der-flo.net")
addPerson("Lehner Florian", "dev@der-flo.net", "@florianl")
addPerson("Leigh McCulloch", "leigh@mcchouse.com")
addPerson("Leigh McCulloch", "leighmcc@gmail.com", "21426@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Leigh McCulloch", "leighmcc@gmail.com", "@leighmcculloch")
addPerson("Leo Antunes", "leo@costela.net")
addPerson("Leo Rudberg", "ljr@google.com")
addPerson("Leon Klingele", "git@leonklingele.de", "16005@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Leon Klingele", "git@leonklingele.de", "@leonklingele")
addPerson("Leonel Quinteros", "leonel.quinteros@gmail.com")
addPerson("Letian Yi", "letian0805@gmail.com")
addPerson("Lev Shamardin", "shamardin@gmail.com", "@abbot")
addPerson("Lewin Bormann", "lbo@spheniscida.de")
addPerson("Lewin Bormann", "lewin.bormann@gmail.com", "@dermesser")
addPerson("Liam Missin", "liam.missin@gmail.com")
addPerson("Lifu Huang", "lifu.hlf@gmail.com")
addPerson("Lin Haowen", "linhaowen99@gmail.com")
addPerson("Lineu Felipe", "lineufelipe@gmail.com")
addPerson("Lion Yang", "lion@aosc.xyz", "@LionNatsu")
addPerson("Liz Rice", "liz@lizrice.com")
addPerson("Lloyd Dewolf", "foolswisdom@gmail.com", "@lloydde")
addPerson("Logan", "businesspapers@gmail.com")
addPerson("Lorenz Bauer", "lmb@cloudflare.com", "14200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lorenz Bauer", "lmb@cloudflare.com", "@lmb")
addPerson("Lorenzo Masini", "rugginoso@develer.com", "17340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lorenzo Masini", "rugginoso@develer.com", "@rugginoso")
addPerson("Lorenzo Stoakes", "lstoakes@gmail.com", "@lorenzo-stoakes")
addPerson("LotusFenn", "fenn.lotus@gmail.com")
addPerson("LotusFenn", "fenn.lotus@gmail.com", "@LotusFenn")
addPerson("Luan Santos", "cfcluan@gmail.com", "@luan")
addPerson("Lubomir I. Ivanov (VMware)", "neolit123@gmail.com", "@neolit123")
addPerson("Lubomir I. Ivanov", "neolit123@gmail.com", "26534@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Luca Bruno", "luca.bruno@coreos.com", "@lucab")
addPerson("Luca Greco", "luca.greco@alcacoop.it", "@rpl")
addPerson("Lucas Bremgartner", "lucas.bremgartner@gmail.com", "16630@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lucas Bremgartner", "lucas.bremgartner@gmail.com", "@breml")
addPerson("Lucas Clemente", "lclemente@google.com", "@lucas-clemente")
addPerson("Lucas Garron", "lgarron@chromium.org", "*goog")
addPerson("Lucas Halim", "luketheflyingcoder@gmail.com")
addPerson("Lucien Stuker", "lucien.stuker@gmail.com", "@LStuker")
addPerson("Lucio De Re", "lucio.dere@gmail.com", "@lootch")
addPerson("Ludi Rehak", "ludi317@gmail.com", "@ludi317")
addPerson("Luigi Riefolo", "luigi.riefolo@gmail.com", "@luigi-riefolo")
addPerson("Luit van Drongelen", "luit@luit.it", "@Luit")
addPerson("Luit van Drongelen", "luitvd@gmail.com", "@Luit")
addPerson("Luka Zakrajšek", "tr00.g33k@gmail.com")
addPerson("Luka", "luka@blow.sh")
addPerson("Lukasz Dobrzanski", "lukasz.m.dobrzanski@gmail.com")
addPerson("Lukasz Milewski", "lmmilewski@gmail.com", "lmilewski@google.com", "@LMMilewski")
addPerson("Luke Curley", "qpingu@gmail.com", "@kixelated")
addPerson("Luke Granger-Brown", "git@lukegb.com")
addPerson("Luna Duclos", "luna.duclos@palmstonegames.com")
addPerson("Lyle Franklin", "lylejfranklin@gmail.com", "@ljfranklin")
addPerson("Lynn Boger", "laboger@linux.vnet.ibm.com", "6320@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Lynn Boger", "laboger@linux.vnet.ibm.com", "@laboger")
addPerson("Lyoness", "carmen.andoh@gmail.com")
addPerson("Maarten Bezemer", "maarten.bezemer@gmail.com")
addPerson("Maciej Dębski", "maciejd@google.com", "26521@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Maciej Galkowski", "maciejgalkowski@gmail.com")
addPerson("Maggie Nolan", "nolanmar@google.com")
addPerson("Magnus Hiie", "magnus.hiie@gmail.com")
addPerson("Mahmoud Bassiouny", "mbassiouny@google.com")
addPerson("Mak Kolybabi", "mak@kolybabi.com", "@mogigoma")
addPerson("Maksym Trykur", "maksym.trykur@gmail.com", "@mak73kur")
addPerson("Mal Curtis", "mal@mal.co.nz", "@snikch")
addPerson("Malcolm Rebughini", "malcolm.rebughini@gmail.com")
addPerson("Malhar Vora", "mlvora.2010@gmail.com")
addPerson("Manfred Touron", "m@42.am", "@moul")
addPerson("Maniacal", "mike.glenney@gmail.com")
addPerson("Manigandan Dharmalingam", "manigandan.jeff@gmail.com")
addPerson("Manish Goregaokar", "manishsmail@gmail.com", "@Manishearth")
addPerson("Mansour Rahimi", "rahimi.mnr@gmail.com", "25524@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mansour Rahimi", "rahimi.mnr@gmail.com", "@m4ns0ur")
addPerson("Manu Garg", "manugarg@gmail.com")
addPerson("Manu S Ajith", "neo@codingarena.in", "@manusajith")
addPerson("Manuel FernandezaaZ", "sourvivor@gmail.com")
addPerson("Marc Coury", "gloriphobia@gmail.com")
addPerson("Marc Lopez", "marc5.12@outlook.com")
addPerson("Marc Vandenbosch", "marc.vandenbosch@gmail.com")
addPerson("Marc-Antoine Ruel", "maruel@chromium.org", "7845@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marc-Antoine Ruel", "maruel@chromium.org", "@maruel", "*goog")
addPerson("Marcel Edmund Franke", "marcel.edmund.franke@gmail.com", "@donutloop")
addPerson("Marcel Lanz", "marcel.lanz@n-1.ch")
addPerson("Marcel van Lohuizen", "mpvl@golang.org", "5182@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marcel van Lohuizen", "mpvl@golang.org", "@mpvl")
addPerson("Marcelo Cantos", "marcelo.cantos@gmail.com")
addPerson("Marcio Feroni", "consultoria.feroni@gmail.com")
addPerson("Marco Hennings", "marco.hennings@freiheit.com", "@mhennings")
addPerson("Marco Peereboom", "marco@peereboom.us")
addPerson("Marcos Minond", "minond.marcos@gmail.com")
addPerson("Marcus Comstedt", "marcus@mc.pp.se")
addPerson("Marcus Willock", "crazcalm@gmail.com", "@crazcalm")
addPerson("Marek Polacek", "polacek@redhat.com", "@mpolacek")
addPerson("Marga Manterola", "marga@google.com", "@margamanterola")
addPerson("Marin", "marin.basic02@gmail.com", "@MarinX")
addPerson("Mario Arranz", "marioarranzr@gmail.com", "@marioarranzr")
addPerson("Marius Kittler", "mariuskittler@gmx.de")
addPerson("Marius Nuennerich", "mnu@google.com")
addPerson("Mark Adams", "mark@markadams.me", "@mark-adams")
addPerson("Mark Bucciarelli", "mkbucc@gmail.com", "@mbucc")
addPerson("Mark Harrison", "marhar@google.com")
addPerson("Mark Lee", "code0x9@gmail.com")
addPerson("Mark Pulford", "mark@kyne.com.au", "15920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mark Pulford", "mark@kyne.com.au", "@mpx")
addPerson("Mark Rushakoff", "mark.rushakoff@gmail.com", "@mark-rushakoff")
addPerson("Mark Rushakoff", "mark@influxdata.com")
addPerson("Mark Ryan", "mark.d.ryan@intel.com", "@markdryan")
addPerson("Mark Severson", "miquella@gmail.com", "11540@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mark Severson", "miquella@gmail.com", "@miquella")
addPerson("Mark Theunissen", "mark.theunissen@gmail.com", "@marktheunissen")
addPerson("Mark Wolfe", "mark@wolfe.id.au", "@wolfeidau")
addPerson("Mark Zavislak", "zavislak@google.com", "@zavislak")
addPerson("Marko Juhani Silokunnas", "marko.silokunnas@gmail.com", "@marant")
addPerson("Marko Kevac", "marko@kevac.org")
addPerson("Marko Kevac", "marko@kevac.org", "@mkevac")
addPerson("Marko Mudrinic", "mudrinic.mare@gmail.com", "@xmudrii")
addPerson("Marko Mudrinić", "mudrinic.mare@gmail.com", "17318@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marko Tiikkaja", "marko@joh.to", "5446@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Marko Tiikkaja", "marko@joh.to", "@johto")
addPerson("Markus Sonderegger", "marraison@gmail.com", "@mars9")
addPerson("Markus Zimmermann", "markus.zimmermann@nethead.at")
addPerson("Markus Zimmermann", "zimmski@gmail.com", "@zimmski")
addPerson("Markus", "m.walther97@gmail.com", "@markus-wa")
addPerson("Marten Seemann", "martenseemann@gmail.com")
addPerson("Marten Seemann", "martenseemann@gmail.com", "@marten-seemann")
addPerson("Martijn Janssen", "martijn9612+github@gmail.com")
addPerson("Martin Bertschler", "mbertschler@gmail.com", "@mbertschler")
addPerson("Martin Drlik", "martadrlik@gmail.com")
addPerson("Martin Garton", "garton@gmail.com", "13346@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Garton", "garton@gmail.com", "@MartinGarton")
addPerson("Martin Garton", "garton@gmail.com", "@mjgarton")
addPerson("Martin Habbecke", "marhab@google.com")
addPerson("Martin Hamrle", "martin.hamrle@gmail.com", "@mhamrle")
addPerson("Martin Hoefling", "martin.hoefling@gmx.de")
addPerson("Martin Kreichgauer", "martinkr@google.com", "16331@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Kreichgauer", "martinkr@google.com", "@kreichgauer")
addPerson("Martin Kunc", "martinkunc@users.noreply.github.com")
addPerson("Martin Kunc", "mk@Martins-MacBook-Pro.local")
addPerson("Martin Lee", "martin@martinlee.org")
addPerson("Martin Lindhe", "martin.j.lindhe@gmail.com", "@martinlindhe")
addPerson("Martin Möhrmann", "moehrmann@google.com", "martisch@uos.de", "@martisch")
addPerson("Martin Möhrmann", "martisch@uos.de", "5846@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Möhrmann", "moehrmann@google.com", "16006@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Martin Olsen", "github.com@martinolsen.net", "@martinolsen")
addPerson("Martin Olsson", "martin@minimum.se", "@mo")
addPerson("Martin Probst", "martin@probst.io")
addPerson("Martin Redmond", "mrtodo@gmail.com")
addPerson("Martin Sucha", "anty.sk+git@gmail.com", "@martin-sucha")
addPerson("Martin Tournoij", "martin@arp242.net")
addPerson("Martins Sipenko", "martins.sipenko@gmail.com", "@martinssipenko")
addPerson("Martynas Budriūnas", "mabu@google.com", "@mabu")
addPerson("Marvin Stenger", "marvin.stenger94@gmail.com")
addPerson("Marvin Stenger", "marvin.stenger94@gmail.com", "9850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Masa Sekimura", "sekimura@gmail.com")
addPerson("Masahiro Furudate", "masahiro.furudate@gmail.com")
addPerson("Massimiliano Ghilardi", "massimiliano.ghilardi@gmail.com")
addPerson("Mat Byczkowski", "mbyczkowski@gmail.com", "@mbyczkowski")
addPerson("Mat Evans", "matzhouse@gmail.com")
addPerson("Mat Ryer", "thatmatryer@gmail.com")
addPerson("Matej Baćo", "matejbaco@gmail.com", "@matejb")
addPerson("Mateus Amin", "mateus.amin@gmail.com")
addPerson("Mateusz Czapliński", "czapkofan@gmail.com", "10525@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mateusz Czapliński", "czapkofan@gmail.com", "@akavel")
addPerson("Mathias Beke", "git@denbeke.be", "7490@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mathias Hall-Andersen", "mathias@hall-andersen.dk")
addPerson("Mathias Leppich", "mleppich@muhqu.de", "@muhqu")
addPerson("MathiasB", "git@denbeke.be", "@DenBeke")
addPerson("Mathieu Lonjaret", "mathieu.lonjaret@gmail.com", "8466@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mats Lidell", "mats.lidell@cag.se", "@matsl")
addPerson("Matt Aimonetti", "mattaimonetti@gmail.com", "13882@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Aimonetti", "mattaimonetti@gmail.com", "@mattetti")
addPerson("Matt Blair", "me@matthewblair.net", "@mblair")
addPerson("Matt Bostock", "matt@mattbostock.com", "@mattbostock")
addPerson("Matt Dee", "mdee@hioscar.com")
addPerson("Matt Drollette", "matt@drollette.com", "@MDrollette")
addPerson("Matt DuVall", "matt@stripe.com")
addPerson("Matt Harden", "matt.harden@gmail.com")
addPerson("Matt Harden", "matt.harden@gmail.com", "8785@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Harden", "matt.harden@gmail.com", "@nerdatmath")
addPerson("Matt Jibson", "matt.jibson@gmail.com", "@mjibson")
addPerson("Matt Joiner", "anacrolix@gmail.com", "@anacrolix")
addPerson("Matt Jones", "matt@mhjones.org")
addPerson("Matt Juran", "thepciet@gmail.com", "@pciet")
addPerson("Matt Keenan", "github@mattkeenan.net", "@mattkeenan")
addPerson("Matt Layher", "mdlayher@gmail.com", "7860@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Layher", "mdlayher@gmail.com", "@mdlayher")
addPerson("Matt Proud", "matt.proud@gmail.com", "6400@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matt Reiferson", "mreiferson@gmail.com", "@mreiferson")
addPerson("Matt Robenolt", "matt@ydekproductions.com", "@mattrobenolt")
addPerson("Matt T. Proud", "matt.proud@gmail.com", "@matttproud")
addPerson("Matteo Croce", "matteo.croce@canonical.com")
addPerson("Matthew Allen Moltzau", "Matthew_Moltzau@comcast.com")
addPerson("Matthew Brennan", "matty.brennan@gmail.com", "@mattyb")
addPerson("Matthew Broberg", "gogetmb@gmail.com", "@mbbroberg")
addPerson("Matthew Broberg", "matthewbbroberg@gmail.com")
addPerson("Matthew Byrne", "mjw.byrne@gmail.com")
addPerson("Matthew Cottingham", "mattcottingham@gmail.com", "@mattrco")
addPerson("Matthew Dempsky", "mdempsky@google.com", "@mdempsky")
addPerson("Matthew Dempsky", "matthew@dempsky.org", "8715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Dempsky", "mdempsky@google.com", "5440@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Denton", "mdenton@skyportsystems.com", "@mdentonSkyport")
addPerson("Matthew Endsley", "mendsley@gmail.com")
addPerson("Matthew Herrmann", "mherr@google.com")
addPerson("Matthew Holt", "matthew.holt+git@gmail.com", "@mholt")
addPerson("Matthew Holt", "matthew.holt@gmail.com", "7611@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthew Hooker", "mwhooker@gmail.com")
addPerson("Matthew Horsnell", "matthew.horsnell@gmail.com", "@matt2909")
addPerson("Matthew LJ Smith", "matthew.lj.smith@gmail.com")
addPerson("Matthew Rudy Jacobs", "matthewrudyjacobs@gmail.com")
addPerson("Matthew Waters", "mwwaters@gmail.com", "@mwwaters")
addPerson("Matthew Whisenhunt", "matt.whisenhunt@gmail.com")
addPerson("Matthieu Hauglustaine", "matt.hauglustaine@gmail.com", "@MattHauglustaine")
addPerson("Matthieu Sarter", "matthieu.sarter.external@atos.net", "16325@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Matthieu Sarter", "matthieu.sarter.external@atos.net", "@MatthieuSarter")
addPerson("Matthijs Kooijman", "matthijs@stdin.nl", "@matthijskooijman")
addPerson("Max Moroz", "maxmoroz@gmail.com")
addPerson("Max Renaud", "maxrd@google.com")
addPerson("Max Riveiro", "kavu13@gmail.com", "@kavu")
addPerson("Max Schmitt", "max@schmitt.mx")
addPerson("Max Ushakov", "ushmax@gmail.com", "@ushakov")
addPerson("Maxime de Roucy", "maxime.deroucy@gmail.com", "@tchernomax")
addPerson("Maxwell Krohn", "themax@gmail.com", "@maxtaco")
addPerson("Mayank Kumar", "krmayankk@gmail.com", "@krmayankk")
addPerson("Mayank Sharma", "maksharma231@gmail.com")
addPerson("Mayank Sharma", "mayank.sharma@tokopedia.com")
addPerson("Mayank Sharma", "mayank@qlogic.io")
addPerson("Mayra Cabrera", "mcabrera1087@gmail.com")
addPerson("Mehul Choube", "mchoube@gmail.com")
addPerson("Meir Fischer", "meirfischer@gmail.com", "@meirf")
addPerson("Meng Zhuo", "mengzhuo1203@gmail.com", "@mengzhuo")
addPerson("Menghan Li", "menghanl@google.com")
addPerson("Mhd Sulhan", "m.shulhan@gmail.com", "@shuLhan")
addPerson("MiLk", "hello@emilienkenler.com", "@MiLk")
addPerson("Michael Andersen", "michael@steelcode.com")
addPerson("Michael Anthony Knyszek", "mknyszek@google.com", "@mknyszek")
addPerson("Michael Brandenburg", "mbrandenburg@bolste.com")
addPerson("Michael Chaten", "mchaten@gmail.com", "@chaten")
addPerson("Michael D Henderson", "mdhender@mdhender.com")
addPerson("Michael Darakananda", "pongad@gmail.com")
addPerson("Michael Darakananda", "pongad@google.com")
addPerson("Michael Darakananda", "pongad@google.com", "@pongad")
addPerson("Michael Darakananda", "pongad@pongad-linuxworkstation1.sea.corp.google.com")
addPerson("Michael Dorner", "mail@michaeldorner.de")
addPerson("Michael Edwards", "medwards@walledcity.ca")
addPerson("Michael Elkins", "michael.elkins@gmail.com", "@sigpipe")
addPerson("Michael Ellis", "micellis@justin.tv", "@mellis")
addPerson("Michael Folkson", "michael@riskbazaar.org")
addPerson("Michael Fraenkel", "michael.fraenkel@gmail.com", "5889@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Fraenkel", "michael.fraenkel@gmail.com", "@fraenkel")
addPerson("Michael Gehring", "mg@ebfe.org", "6715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Gehring", "mg@ebfe.org", "@ebfe")
addPerson("Michael Henderson", "mdhender@users.noreply.github.com", "@mdhender")
addPerson("Michael Hendricks", "michael@ndrix.org", "@mndrix")
addPerson("Michael Hoisie", "hoisie@gmail.com", "@hoisie")
addPerson("Michael Hudson-Doyle", "michael.hudson@canonical.com", "5153@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Hudson-Doyle", "michael.hudson@canonical.com", "@mwhudson")
addPerson("Michael Kasch", "michael.kasch@gmail.com", "@MyChaOS87")
addPerson("Michael Kuryshev", "me@mk9.name")
addPerson("Michael Käufl", "golang@c.michael-kaeufl.de", "@michael-k")
addPerson("Michael Lewis", "mikelikespie@gmail.com", "@mikelikespie")
addPerson("Michael MacInnis", "michael.p.macinnis@gmail.com", "6355@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael MacInnis", "michael.p.macinnis@gmail.com", "@michaelmacinnis")
addPerson("Michael Marineau", "michael.marineau@coreos.com", "mike@marineau.org", "@marineam")
addPerson("Michael Matloob", "matloob@golang.org", "10033@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Matloob", "matloob@golang.org", "matloob@google.com", "@matloob")
addPerson("Michael Matloob", "michaelmatloob@gmail.com", "5270@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael McConville", "momcconville@gmail.com")
addPerson("Michael McGreevy", "mcgreevy@golang.org", "@mcgreevy")
addPerson("Michael McLoughlin", "mmcloughlin@gmail.com", "@mmcloughlin")
addPerson("Michael Munday", "mike.munday@ibm.com", "11990@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Munday", "mike.munday@ibm.com", "munday@ca.ibm.com", "@mundaym")
addPerson("Michael Pearson", "mipearson@gmail.com", "@mipearson")
addPerson("Michael Pratt", "mpratt@google.com", "12120@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Pratt", "mpratt@google.com", "@prattmic")
addPerson("Michael Schaller", "michael@5challer.de", "@michael-schaller")
addPerson("Michael Schurter", "michael.schurter@gmail.com", "@schmichael")
addPerson("Michael Shields", "mshields@google.com", "@shields")
addPerson("Michael Smith", "mikejsmitty@gmail.com")
addPerson("Michael Spiegel", "michael.m.spiegel@gmail.com")
addPerson("Michael Stapelberg", "stapelberg@google.com", "8470@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michael Stapelberg", "stapelberg@google.com", "@stapelberg")
addPerson("Michael Steinert", "mike.steinert@gmail.com", "@msteinert")
addPerson("Michael Sterle-Contala", "mike.sterle@gmail.com")
addPerson("Michael Teichgräber", "mteichgraeber@gmx.de", "@knieriem")
addPerson("Michael Vetter", "g.bluehut@gmail.com", "@jubalh")
addPerson("Michal Bohuslávek", "mbohuslavek@gmail.com", "9715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michal Bohuslávek", "mbohuslavek@gmail.com", "@mibk")
addPerson("Michal Cierniak", "cierniak@google.com", "@cierniak")
addPerson("Michal Franc", "lam.michal.franc@gmail.com")
addPerson("Michal Pristas", "michal.pristas@gmail.com")
addPerson("Michalis Kargakis", "michaliskargakis@gmail.com")
addPerson("Michel Lespinasse", "walken@google.com", "12938@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Michel Lespinasse", "walken@google.com", "@walken-google")
addPerson("Miek Gieben", "miek@miek.nl", "@miekg", "*goog")
addPerson("Miguel Molina", "hi@mvader.me", "@erizocosmico")
addPerson("Miguel Perez", "miguel250@gmail.com")
addPerson("Mihai Borobocea", "MihaiBorobocea@gmail.com")
addPerson("Mihai Todor", "todormihai@gmail.com", "@mihaitodor")
addPerson("Mike Appleby", "mike@app.leby.org", "14930@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Appleby", "mike@app.leby.org", "@appleby")
addPerson("Mike Danese", "mikedanese@google.com", "@mikedanese")
addPerson("Mike Dour", "mdour@google.com")
addPerson("Mike Graf", "mikegraf000@gmail.com")
addPerson("Mike Houston", "mike@kothar.net", "@kothar")
addPerson("Mike Kabischev", "kabischev@gmail.com")
addPerson("Mike Lloyd", "kevin.michael.lloyd@gmail.com")
addPerson("Mike Lloyd", "kevin.michael.lloyd@gmail.com", "10091@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Rosset", "mike.rosset@gmail.com", "@mrosset")
addPerson("Mike Samuel", "mikesamuel@gmail.com", "17511@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mike Samuel", "mikesamuel@gmail.com", "@mikesamuel")
addPerson("Mike Solomon", "msolo@gmail.com", "@msolo")
addPerson("Mike Strosaker", "strosake@us.ibm.com")
addPerson("Mike Tsao", "mike@sowbug.com")
addPerson("Mike Wiacek", "mjwiacek@google.com", "@mikewiacek")
addPerson("Mike Wiacek", "mjwiacek@mjwiacek-macbookpro.roam.corp.google.com")
addPerson("Mikhail Gusarov", "dottedmag@dottedmag.net", "@dottedmag")
addPerson("Miki Habryn", "dichro@rcpt.to")
addPerson("Miki Tebeka", "miki.tebeka@gmail.com", "@tebeka")
addPerson("Mikio Hara", "mikioh.public.networking@gmail.com", "mikioh.mikioh@gmail.com", "@mikioh", "29736@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mikkel Krautz", "mikkel@krautz.dk", "@mkrautz")
addPerson("Mikołaj Baranowski", "m.baranowski@travelaudience.com")
addPerson("Milan Knezevic", "milan.knezevic@mips.com", "@milanknezevic")
addPerson("Minaev Mike", "minaev.mike@gmail.com", "23800@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Minaev Mike", "minaev.mike@gmail.com", "@minaevmike")
addPerson("Minux Ma", "minux@golang.org", "5055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Miquel Sabaté Solà", "mikisabate@gmail.com", "@mssola")
addPerson("Miroslav Genov", "mgenov@gmail.com", "@mgenov")
addPerson("Misty De Meo", "mistydemeo@gmail.com", "@mistydemeo")
addPerson("Mitchell-Riley", "tug72074@temple.edu")
addPerson("Mithun Sasidharan", "mithunsasidharan89@gmail.com")
addPerson("Miyakawa Taku", "miyakawa.taku@gmail.com")
addPerson("Mofizur Rahman", "moficodes@gmail.com")
addPerson("Mohan Pawar", "mohanpawary1990@gmail.com")
addPerson("Mohit Agarwal", "mohit@sdf.org", "10715@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mohit Agarwal", "mohit@sdf.org", "@0xmohit")
addPerson("Mohit kumar Bajoria", "mohitbajo36@gmail.com", "@mbj36")
addPerson("Momchil Velikov", "momchil.velikov@gmail.com", "@momchil-velikov")
addPerson("Monis Khan", "mkhan@redhat.com", "@enj")
addPerson("Monty Taylor", "mordred@inaugust.com", "@emonty")
addPerson("Moriyoshi Koizumi", "mozo@mozo.jp", "@moriyoshi")
addPerson("Morten Siebuhr", "sbhr@sbhr.dk", "10928@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Morten Siebuhr", "sbhr@sbhr.dk", "@msiebuhr")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se", "18980@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mostyn Bramley-Moore", "mostyn@antipode.se", "@mostynb")
addPerson("Mrunal Patel", "mrunalp@gmail.com", "@mrunalp")
addPerson("Muhammad Falak R Wani", "falakreyaz@gmail.com", "23560@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Muhammad Falak R Wani", "falakreyaz@gmail.com", "@mfrw")
addPerson("Muhammed Uluyol", "uluyol0@gmail.com", "@uluyol")
addPerson("Mura Li", "mura_li@castech.com.tw", "10925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Mura Li", "mura_li@castech.com.tw", "@typeless")
addPerson("Máximo Cuadros Ortiz", "mcuadros@gmail.com", "@mcuadros")
addPerson("NODA, Kai", "nodakai@gmail.com")
addPerson("Nan Deng", "monnand@gmail.com", "@monnand")
addPerson("Naoki INADA", "songofacandy@gmail.com", "5895@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Naoki Kanatani", "k12naoki@gmail.com", "@kanata2")
addPerson("Nate Wilkinson", "nathanwilk7@gmail.com", "@nathanwilk7")
addPerson("Nathan Cantelmo", "n.cantelmo@gmail.com", "@ncantelmo")
addPerson("Nathan Caza", "mastercactapus@gmail.com", "@mastercactapus")
addPerson("Nathan Davies", "nathanjamesdavies@gmail.com")
addPerson("Nathan John Youngman", "nj@nathany.com", "@nathany")
addPerson("Nathan Otterness", "otternes@cs.unc.edu")
addPerson("Nathan P Finch", "nate.finch@gmail.com", "@natefinch")
addPerson("Nathan VanBenschoten", "nvanbenschoten@gmail.com", "@nvanbenschoten")
addPerson("Nathan Youngman", "git@nathany.com", "@nathany")
addPerson("Nathan Youngman", "hello@nathany.com", "5235@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nathan(yinian) Hu", "nathanhu@google.com", "@nathandfox")
addPerson("Nathaniel Caza", "mastercactapus@gmail.com", "17183@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Naveen Kumar Sangi", "naveenkumarsangi@protonmail.com")
addPerson("Neelesh Chandola", "neelesh.c98@gmail.com")
addPerson("Neil Basu", "nbasu02@gmail.com")
addPerson("Neil Lyons", "nwjlyons@googlemail.com", "@nwjlyons")
addPerson("Neil Owen", "neil.anthony.owen@gmail.com")
addPerson("Nelz", "nelz9999@gmail.com")
addPerson("Nemin Sun", "sunnemin@gmail.com")
addPerson("Neven Sajko", "nsajko@gmail.com", "15048@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Neven Sajko", "nsajko@gmail.com", "@nsajko")
addPerson("Nevins Bartolomeo", "nevins.bartolomeo@gmail.com", "@nevins-b")
addPerson("Niall Sheridan", "nsheridan@gmail.com", "13755@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Niall Sheridan", "nsheridan@gmail.com", "@nsheridan")
addPerson("Nic Day", "nic.day@me.com", "@nicday")
addPerson("Nicholas Anderson", "nick@miletwo.net")
addPerson("Nicholas Katsaros", "nick@nickkatsaros.com", "@nkatsaros")
addPerson("Nicholas Maniscalco", "nicholas@maniscalco.com")
addPerson("Nicholas Maniscalco", "nicholas@maniscalco.com", "@nicholasmaniscalco")
addPerson("Nicholas Ng", "nickng@nickng.io")
addPerson("Nicholas Presta", "nick@nickpresta.ca", "@nickpresta")
addPerson("Nicholas Rawlings", "nicholasorenrawlings@gmail.com")
addPerson("Nicholas Waples", "nwaples@gmail.com", "@nwaples")
addPerson("Nick Cooper", "nmvc@google.com")
addPerson("Nick Cooper", "nmvc@google.com", "5776@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Craig-Wood", "nick@craig-wood.com", "@ncw")
addPerson("Nick Craig-Wood", "nickcw@gmail.com", "5175@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Harper", "nharper@google.com")
addPerson("Nick Kubala", "nkubala@google.com", "@nkubala")
addPerson("Nick McCrory", "nickmhc14@gmail.com")
addPerson("Nick Miyake", "nmiyake@gmail.com")
addPerson("Nick Patavalis", "nick.patavalis@gmail.com", "9880@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nick Patavalis", "nick.patavalis@gmail.com", "@npat-efault")
addPerson("Nick Petroni", "npetroni@cs.umd.edu")
addPerson("Nick Sullivan", "nicholas.sullivan@gmail.com", "@grittygrease")
addPerson("Nickolay Turpitko", "nikolay@turpitko.com", "7015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nicolas BRULEZ", "n.brulez@gmail.com", "@N-Bz")
addPerson("Nicolas S. Dade", "nic.dade@gmail.com", "@nsd20463")
addPerson("Niek Sanders", "niek.sanders@gmail.com", "19925@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Niek Sanders", "niek.sanders@gmail.com", "@nieksand")
addPerson("Nigel Kerr", "nigel.kerr@gmail.com", "@nigelkerr")
addPerson("Nigel Tao", "nigeltao@golang.org", "5899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nigel Tao", "nigeltao@golang.org", "@nigeltao")
addPerson("Nik Nyby", "nnyby@columbia.edu", "@nikolas")
addPerson("Nikhil Benesch", "nikhil.benesch@gmail.com", "25418@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nikhil Benesch", "nikhil.benesch@gmail.com", "@benesch")
addPerson("Nikhita Raghunath", "nikitaraghunath@gmail.com")
addPerson("Niklas Lindblad", "niklas@lindblad.info")
addPerson("Niklas Schnelle", "niklas.schnelle@gmail.com", "@niklas88")
addPerson("Niko Dziemba", "niko@dziemba.com", "@dziemba")
addPerson("Nikolay Ponomarev", "itsnikolay@gmail.com")
addPerson("Nikolay Turpitko", "nikolay@turpitko.com", "@nikolay-turpitko")
addPerson("Nilesh Jagnik", "nileshj@google.com")
addPerson("Nils Larsgård", "nilsmagnus@gmail.com", "@nilsmagnus")
addPerson("Nir Soffer", "nirsof@gmail.com")
addPerson("Nishanth Shanmugham", "nishanth.gerrard@gmail.com")
addPerson("Nitin Patil", "patil16nit@gmail.com")
addPerson("Noah Campbell", "noahcampbell@gmail.com")
addPerson("Noble Johnson", "noblepoly@gmail.com")
addPerson("Nodir Turakulov", "nodir@google.com", "7877@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Nodir Turakulov", "nodir@google.com", "@nodirt")
addPerson("Noel Georgi", "git@frezbo.com")
addPerson("Norberto Lopes", "nlopes.ml@gmail.com", "@nlopes")
addPerson("Nyah Check", "check.nyah@gmail.com", "22747@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("ObesePenguin", "srburnham@gmail.com")
addPerson("Odin Ugedal", "odin@ugedal.com", "@odinuge")
addPerson("Oleg Bulatov", "dmage@yandex-team.ru", "@dmage")
addPerson("Oleg Bulatov", "oleg@bulatov.me", "@dmage")
addPerson("Oleg Vakheta", "helginet@gmail.com", "@helginet")
addPerson("Oleku Konko", "oleku.konko@gmail.com", "@olekukonko")
addPerson("OlgaVlPetrova", "ovppetrova@gmail.com", "@OlgaVlPetrova")
addPerson("Oling Cat", "olingcat@gmail.com", "5136@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Oling Cat", "olingcat@gmail.com", "@OlingCat")
addPerson("Oliver Hookins", "ohookins@gmail.com", "@ohookins")
addPerson("Oliver Skånberg-Tippen", "oliverskanbergtippen@gmail.com")
addPerson("Oliver Stenbom", "ostenbom@pivotal.io", "@ostenbom")
addPerson("Oliver Tonnhofer", "olt@bogosoft.com", "@olt")
addPerson("Olivier Duperray", "duperray.olivier@gmail.com", "@dupoxy")
addPerson("Olivier Mengué", "olivier.mengue@gmail.com")
addPerson("Olivier Poitrey", "rs@netflix.com", "rs@dailymotion.com", "rs@rhapsodyk.net", "10610@62eb7196-b449-3ce5-99f1-c037f21e1705", "@rs")
addPerson("Olivier Saingre", "osaingre@gmail.com", "@osaingre")
addPerson("Olivier", "desylva@gmail.com")
addPerson("Omar Jarjur", "ojarjur@google.com")
addPerson("OneOfOne", "oneofone@gmail.com", "@OneOfOne")
addPerson("Oryan Moshe", "iamoryanmoshe@gmail.com", "28422@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Oryan Moshe", "iamoryanmoshe@gmail.com", "@oryanmoshe")
addPerson("Osamu TONOMORI", "osamingo@gmail.com")
addPerson("Osamu TONOMORI", "osamingo@gmail.com", "@osamingo")
addPerson("Oscar Forner Martinez", "oscar.forner.martinez@gmail.com")
addPerson("Otto Giron", "ottog2486@gmail.com")
addPerson("Owen Marshall", "om@om.ht")
addPerson("Pablo Lalloni", "plalloni@gmail.com", "@lalloni")
addPerson("Pablo Santiago Blum de Aguiar", "scorphus@gmail.com", "@scorphus")
addPerson("Paddy Foran", "paddy@secondbit.org", "@paddyforan")
addPerson("Paddy Steed", "jarktasaa@gmail.com")
addPerson("Padraig Kitterick", "padraigkitterick@gmail.com", "@padraigkitterick")
addPerson("Panagiotis Xynos", "panagiotis.xinos@gmail.com")
addPerson("Paolo Giarrusso", "p.giarrusso@gmail.com", "@Blaisorblade")
addPerson("Paolo Martini", "mrtnpaolo@gmail.com", "@ear")
addPerson("Parker Moore", "parkrmoore@gmail.com", "6501@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Parker Moore", "parkrmoore@gmail.com", "@parkr")
addPerson("Pascal Corpet", "lascap@google.com")
addPerson("Pascal Muetschard", "pmuetschard@google.com")
addPerson("Pascal S. de Kloe", "pascal@quies.net", "@pascaldekloe")
addPerson("Pascal de Kloe", "pascal@quies.net", "8310@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Pat Moroney", "pat@pat.email", "@pmoroney")
addPerson("Patrick Bennett", "patrick@thebennetts.com")
addPerson("Patrick Edgett", "pedgett@gmail.com")
addPerson("Patrick Gavlin", "pgavlin@gmail.com", "@pgavlin")
addPerson("Patrick Higgins", "patrick.allen.higgins@gmail.com", "@patrick-higgins")
addPerson("Patrick Lee", "pattyshack101@gmail.com", "@pattyshack")
addPerson("Patrick Mezard", "patrick@mezard.eu", "@pmezard")
addPerson("Patrick Mylund Nielsen", "patrick@patrickmn.com", "@patrickmn")
addPerson("Patrick Mézard", "patrick@mezard.eu", "7915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Patrick Pelletier", "pp.pelletier@gmail.com", "@skinp")
addPerson("Patrick Riley", "pfr@google.com", "@pfrstg")
addPerson("Patrick Smith", "pat42smith@gmail.com", "@pat42smith")
addPerson("Patrick Uiterwijk", "patrick@puiterwijk.org")
addPerson("Patrik Nyblom", "pnyb@google.com")
addPerson("Paul A Querna", "paul.querna@gmail.com", "@pquerna")
addPerson("Paul Borman", "borman@google.com", "@pborman")
addPerson("Paul Boyd", "boyd.paul2@gmail.com", "@pboyd")
addPerson("Paul Chang", "paulchang@google.com", "@pchx")
addPerson("Paul Gier", "pgier@redhat.com")
addPerson("Paul Hankin", "paulhankin@google.com")
addPerson("Paul Jolly", "paul@myitcv.org.uk", "@myitcv")
addPerson("Paul Jolly", "paul@myitcv.io", "@myitcv")
addPerson("Paul Jolly", "paul@myitcv.org.uk", "16375@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Lalonde", "paul.a.lalonde@gmail.com", "@paul-lalonde")
addPerson("Paul M Furley", "paul@paulfurley.com")
addPerson("Paul Marks", "pmarks@google.com", "6050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Marks", "pmarks@google.com", "@pmarks-net")
addPerson("Paul Meyer", "paul.meyer@microsoft.com", "@paulmey")
addPerson("Paul Nasrat", "pnasrat@google.com")
addPerson("Paul PISCUC", "paul.piscuc@gmail.com", "@ppiscuc")
addPerson("Paul Querna", "pquerna@apache.org", "@pquerna", "paul@querna.org", "14273@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul Rosania", "paul.rosania@gmail.com", "@paulrosania")
addPerson("Paul Sbarra", "sbarra.paul@gmail.com", "@tones111")
addPerson("Paul Smith", "paulsmith@pobox.com", "@paulsmith")
addPerson("Paul Tyng", "paul@paultyng.net")
addPerson("Paul Tyng", "ptyng@underarmour.com")
addPerson("Paul Wankadia", "junyer@google.com", "@junyer")
addPerson("Paul van Brouwershaven", "paul@vanbrouwershaven.com", "5920@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Paul van Brouwershaven", "paul@vanbrouwershaven.com", "@vanbroup")
addPerson("Paulo Casaretto", "pcasaretto@gmail.com", "@pcasaretto")
addPerson("Paulo Flabiano Smorigo", "pfsmorigo@linux.vnet.ibm.com")
addPerson("Paulo Flabiano Smorigo", "pfsmorigo@linux.vnet.ibm.com", "@pfsmorigo")
addPerson("Pavlo Sumkin", "psumkin@mirantis.com")
addPerson("Pavlo Sumkin", "ymkins@gmail.com")
addPerson("Pawel Knap", "pawelknap88@gmail.com", "@ppknap")
addPerson("Pawel Pisarzewski", "morris@morris.io")
addPerson("Peng Gao", "peng.gao.dut@gmail.com")
addPerson("Percy Wegmann", "ox.to.a.cart@gmail.com", "@oxtoacart")
addPerson("Perry Abbott", "perry.j.abbott@gmail.com", "@pabbott0")
addPerson("Petar Maymounkov", "petarm@gmail.com", "@petar")
addPerson("Peter Armitage", "peter.armitage@gmail.com", "@pja")
addPerson("Peter Armitage", "pja@google.com")
addPerson("Peter Bourgon", "peter@bourgon.org", "@peterbourgon")
addPerson("Peter Collingbourne", "pcc@google.com", "5535@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Collingbourne", "pcc@google.com", "@pcc")
addPerson("Peter Ebden", "peter.ebden@gmail.com")
addPerson("Peter Froehlich", "peter.hans.froehlich@gmail.com", "@phf")
addPerson("Peter Gonda", "pgonda@google.com")
addPerson("Peter Gonda", "pgonda@google.com", "@pgonda")
addPerson("Peter Gonda", "ptrgonda@gmail.com")
addPerson("Peter Götz", "peter.gtz@gmail.com")
addPerson("Peter Hoyes", "pahoyes@gmail.com")
addPerson("Peter Kieltyka", "peter.kieltyka@pressly.com")
addPerson("Peter Kleiweg", "pkleiweg@xs4all.nl", "@pebbe")
addPerson("Peter Mattis", "petermattis@gmail.com")
addPerson("Peter McKenzie", "petermck@google.com", "@peter-mckenzie")
addPerson("Peter Moody", "peter.moody@gmail.com")
addPerson("Peter Moody", "pmoody@uber.com", "8905@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Moody", "pmoody@uber.com", "@pmoody-")
addPerson("Peter Morjan", "pmorjan@gmail.com", "@pmorjan")
addPerson("Peter Mundy", "go.peter.90@gmail.com", "@peterGo")
addPerson("Peter Nguyen", "peter@mictis.com", "@pengux")
addPerson("Peter S", "speter.go1@gmail.com", "5351@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Sanford", "psanford@sanford.io")
addPerson("Peter Sutherland", "peter@pedrosland.co.uk")
addPerson("Peter Teichman", "peter@teichman.org")
addPerson("Peter Teichman", "pteichman@fastly.com", "@pteichman")
addPerson("Peter Tseng", "ptseng@squareup.com", "@petertseng")
addPerson("Peter Waldschmidt", "peter@waldschmidt.com", "6340@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Waldschmidt", "peter@waldschmidt.com", "@peterwald")
addPerson("Peter Waller", "p@pwaller.net", "5822@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Waller", "p@pwaller.net", "@pwaller")
addPerson("Peter Wathall", "peter.wathall@gmail.com")
addPerson("Peter Weinberger", "pjw@golang.org", "pjw@google.com", "@pjweinb", "@pjweinbgo")
addPerson("Peter Weinberger", "pjw@google.com", "5260@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Williams", "pwil3058@gmail.com", "@pwil3058")
addPerson("Peter Wu", "peter@lekensteyn.nl", "24681@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Peter Wu", "pwu@cloudflare.com", "@Lekensteyn")
addPerson("Peterson, David", "davidtpeterson@gmail.com")
addPerson("Phil Pearl", "philip.j.r.pearl@gmail.com", "@philpearl")
addPerson("Philip Brown", "phil@bolthole.com")
addPerson("Philip Børgesen", "philip.borgesen@gmail.com", "@PhilipBorgesen")
addPerson("Philip Børgesen", "philipborgesen@users.noreply.github.com", "@PhilipBorgesen")
addPerson("Philip Hofer", "phofer@umich.edu")
addPerson("Philip Hofer", "phofer@umich.edu", "9055@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Philip K. Warren", "pkwarren@gmail.com", "@pkwarren")
addPerson("Philip Pearl", "philip.j.r.pearl@gmail.com", "27852@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Philipp Kern", "pkern@google.com")
addPerson("Philippe Lafoucrière", "philippe.lafoucriere@gmail.com")
addPerson("Pierre Durand", "pierredurand@gmail.com", "@pierrre")
addPerson("Pierre Prinetti", "pierreprinetti@gmail.com", "@pierreprinetti")
addPerson("Pierre Roullon", "pierre.roullon@gmail.com", "@proullon")
addPerson("Pieter Droogendijk", "pieter@binky.org.uk", "@PieterD")
addPerson("Pieterjan Lambein", "pieterjan@otainsight.com")
addPerson("Pietro Gagliardi", "pietro10@mac.com", "9190@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Pietro Gagliardi", "pietro10@mac.com", "@andlabs")
addPerson("Piotr Kowalczuk", "p.kowalczuk.priv@gmail.com")
addPerson("Piyush Mishra", "piyush@codeitout.com", "@ofpiyush")
addPerson("Plekhanov Maxim", "kishtatix@gmail.com")
addPerson("Plekhanov Maxim", "kishtatix@gmail.com", "@kishtatik")
addPerson("Pontus Leitzler", "leitzler@gmail.com")
addPerson("Pontus Leitzler", "leitzler@users.noreply.github.com", "@leitzler")
addPerson("Pradeep Singh", "rautelap@gmail.com")
addPerson("Prasanna Swaminathan", "prasanna@mediamath.com", "@pswaminathan")
addPerson("Prasanna V. Loganathar", "pvl@prasannavl.com")
addPerson("Prashant Varanasi", "prashant@prashantv.com", "@prashantv")
addPerson("Prashanth Pai", "ppai@redhat.com")
addPerson("Praveen Bathala", "praveen.bathala@gmail.com")
addPerson("Pravendra Singh", "hackpravj@gmail.com", "@pravj")
addPerson("Preetam Jinka", "pj@preet.am", "@Preetam")
addPerson("ProhtMeyhet", "sebastian@prohtmeyhet.de")
addPerson("Péter Surányi", "speter.go1@gmail.com", "@speter")
addPerson("Péter Szilágyi", "peterke@gmail.com", "5786@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Péter Szilágyi", "peterke@gmail.com", "@karalabe")
addPerson("Qais Patankar", "qaisjp@gmail.com", "@qaisjp")
addPerson("Qi Zhao", "zhaoq@google.com")
addPerson("Qi Zhao", "zhaoq@google.com", "9480@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Qiu", "ilsh1022@gmail.com", "21240@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Qiuxuan Zhu", "ilsh1022@gmail.com", "@Kinghack")
addPerson("QtRoS", "mrqtros@gmail.com", "@QtRoS")
addPerson("Quan Yong Zhai", "qyzhai@gmail.com", "@qyzhai")
addPerson("Quentin Perez", "qperez@ocs.online.net", "@QuentinPerez")
addPerson("Quentin Perez", "quentin@zen.ly")
addPerson("Quentin Renard", "contact@asticode.com", "12775@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Quentin Renard", "contact@asticode.com", "@asticode")
addPerson("Quentin Smith", "quentin@golang.org")
addPerson("Quentin Smith", "quentin@golang.org", "13020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Quentin Smith", "quentin@golang.org", "@quentinmit")
addPerson("Quinn Slack", "sqs@sourcegraph.com", "@sqs")
addPerson("Quoc-Viet Nguyen", "afelion@gmail.com", "@nqv")
addPerson("R Primus", "rprimus@gmail.com")
addPerson("Radek Sohlich", "sohlich@gmail.com", "@sohlich")
addPerson("Radu Berinde", "radu@cockroachlabs.com", "12530@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Radu Berinde", "radu@cockroachlabs.com", "@RaduBerinde")
addPerson("Rafal Jeczalik", "rjeczalik@gmail.com", "@rjeczalik")
addPerson("Raghavendra Nagaraj", "jamdagni86@gmail.com")
addPerson("Raghavendra Nagaraj", "jamdagni86@gmail.com", "@jamdagni86")
addPerson("Rahul Chaudhry", "rahulchaudhry@chromium.org", "@rahulchaudhry", "*goog")
addPerson("Rahul Chaudhry", "rahulchaudhry@google.com", "5211@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Raj Mahey", "raj.axisos@gmail.com")
addPerson("Rajat Goel", "rajat.goel2010@gmail.com", "@rajatgoel")
addPerson("Rajath Agasthya", "rajathagasthya@gmail.com", "24258@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rajath Agasthya", "rajathagasthya@gmail.com", "@rajathagasthya")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk")
addPerson("Ralph Corderoy", "ralph.corderoy@gmail.com", "7020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk", "10961@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ralph Corderoy", "ralph@inputplus.co.uk", "@RalphCorderoy")
addPerson("Ralph Ligtenberg", "ralph.ligtenberg@gmail.com")
addPerson("Ramazan AYYILDIZ", "rayyildiz@gmail.com", "@rayyildiz")
addPerson("Ramesh Dharan", "dharan@google.com")
addPerson("Ramon Nogueira", "rmn@google.com")
addPerson("Randy Reddig", "ydnar@shaderlab.com")
addPerson("Raph Levien", "raph@google.com", "@raphlinus")
addPerson("Raphael Geronimi", "raphael.geronimi@gmail.com", "@rgeronimi")
addPerson("Raul Silvera", "rsilvera@google.com", "@rauls5382")
addPerson("Raul Silvera", "rauls5382@gmail.com")
addPerson("Raul Silvera", "rsilvera@google.com", "10031@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("RaviTeja", "ravi.tezu@gmail.com")
addPerson("Ray Tung", "rtung@thoughtworks.com", "@raytung")
addPerson("Raymond Kazlauskas", "raima220@gmail.com", "@Rhymond")
addPerson("Rebecca Stambler", "rstambler@golang.org", "16140@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rebecca Stambler", "rstambler@golang.org", "@stamblerre")
addPerson("Reese Wilson", "reese@shinymayhem.com")
addPerson("Reilly Watson", "reillywatson@gmail.com", "@reillywatson")
addPerson("Reinaldo de Souza Jr", "juniorz@gmail.com", "@juniorz")
addPerson("Remi Gillig", "remigillig@gmail.com", "@speps")
addPerson("Rens Rikkerink", "ikkerens@users.noreply.github.com", "@ikkerens")
addPerson("Rhett Garber", "rhettg@gmail.com")
addPerson("Rhys Hiltner", "rhys@justin.tv", "9210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rhys Hiltner", "rhys@justin.tv", "@rhysh")
addPerson("Ricardo Padilha", "ricardospadilha@gmail.com", "@ricardopadilha")
addPerson("Ricardo Rey", "rrey@google.com")
addPerson("Ricardo Smania", "ricsmania@gmail.com")
addPerson("Ricardo Vegas", "ricardovegas@gmail.com")
addPerson("Riccardo Paccagnella", "ricpacca@gmail.com")
addPerson("Richard Barnes", "rlb@ipv.sx")
addPerson("Richard Crowley", "r@rcrowley.org", "@rcrowley")
addPerson("Richard Dingwall", "rdingwall@gmail.com", "@rdingwall")
addPerson("Richard Eric Gavaletz", "gavaletz@gmail.com", "@gavaletz")
addPerson("Richard Gibson", "richard.gibson@gmail.com", "13081@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Richard Gibson", "richard.gibson@gmail.com", "@gibson042")
addPerson("Richard Miller", "millerresearch@gmail.com", "12217@62eb7196-b449-3ce5-99f1-c037f21e1705", "miller.research@gmail.com")
addPerson("Richard Musiol", "mail@richard-musiol.de", "@neelance", "neelance@gmail.com", "13620@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rick Arnold", "rickarnoldjr@gmail.com", "@rickar")
addPerson("Rick Hudson", "rlh@golang.org", "5186@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rick Hudson", "rlh@golang.org", "@RLH")
addPerson("Rick Sayre", "whorfin@gmail.com")
addPerson("Rijnard van Tonder", "rvantonder@gmail.com", "@rvantonder")
addPerson("Rik van der Heijden", "rikvdh@users.noreply.github.com")
addPerson("Riku Voipio", "riku.voipio@linaro.org", "12765@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Riku Voipio", "riku.voipio@linaro.org", "@suihkulokki")
addPerson("Risto Jaakko Saarelma", "rsaarelm@gmail.com", "@rsaarelm")
addPerson("Rob Bradford", "robert.bradford@intel.com")
addPerson("Rob Earhart", "earhart@google.com")
addPerson("Rob Phoenix", "rob@robphoenix.com", "17946@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rob Phoenix", "rob@robphoenix.com", "@robphoenix")
addPerson("Rob Pike", "r@golang.org", "5015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Rob Pike", "r@golang.org", "@robpike")
addPerson("Rob Strong", "robert.a.strong@gmail.com")
addPerson("Robert Anthony Bellamy", "rabellamy@gmail.com")
addPerson("Robert Bittle", "guywithnose@gmail.com")
addPerson("Robert Figueiredo", "robfig@gmail.com", "@robfig")
addPerson("Robert Gogolok", "gogolok@gmail.com")
addPerson("Robert Griesemer", "gri@golang.org", "5210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Robert Griesemer", "gri@golang.org", "gri@gri-macbookair.roam.corp.google.com", "@griesemer")
addPerson("Robert Hencke", "robert.hencke@gmail.com", "@rhencke")
addPerson("Robert Iannucci", "iannucci@google.com")
addPerson("Robert Kuska", "rkuska@gmail.com")
addPerson("Robert Obryk", "robryk@gmail.com", "@robryk")
addPerson("Robert Sesek", "rsesek@google.com", "@rsesek")
addPerson("Robert Stepanek", "robert.stepanek@gmail.com", "6062@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Robert Stepanek", "robert.stepanek@gmail.com", "@rsto")
addPerson("Robert Weber", "robertweber95@gmail.com")
addPerson("Robert Xu", "robxu9@gmail.com")
addPerson("Robert-André Mauchin", "zebob.m@gmail.com")
addPerson("Roberto Clapis", "robclap8@gmail.com", "@empijei")
addPerson("Roberto Lublinerman Reitzes", "rluble@google.com", "@rluble")
addPerson("Roberto Selbach", "roberto@selbach.ca")
addPerson("Roberto Selbach", "roberto@selbach.ca", "@robteix")
addPerson("Roberto", "empijei@users.noreply.github.com", "@empijei")
addPerson("Robin Eklind", "r.eklind.87@gmail.com")
addPerson("Rodolfo Carvalho", "rhcarvalho@gmail.com", "@rhcarvalho")
addPerson("Rodolfo Rodriguez", "rodolfobgibson@gmail.com", "@techmexdev")
addPerson("Rodrigo Moraes de Oliveira", "rodrigo.moraes@gmail.com", "@moraes")
addPerson("Rodrigo Rafael Monti Kochenburger", "divoxx@gmail.com", "@divoxx")
addPerson("Roger Guldbrandsen", "roger@kinbiko.com")
addPerson("Roger Pau Monné", "royger@gmail.com", "@royger")
addPerson("Roger Peppe", "rogpeppe@gmail.com", "@rogpeppe")
addPerson("Roger Simms", "roger.simms@gmail.com")
addPerson("Rohit Agarwal", "agarwalrohit@google.com")
addPerson("Roland Illig", "roland.illig@gmx.de")
addPerson("Roland Illig", "roland.illig@gmx.de", "@rillig")
addPerson("Roland Shoemaker", "rolandshoemaker@gmail.com", "12545@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Roland Shoemaker", "rolandshoemaker@gmail.com", "@rolandshoemaker")
addPerson("Rollie Ma", "rollie.ma@gmail.com")
addPerson("Roman Budnikov", "romanyx90@yandex.ru", "23999@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ron Hashimoto", "mail@h2so5.net", "@h2so5")
addPerson("Ronald G. Minnich", "rminnich@gmail.com", "@rminnich")
addPerson("Ronan Guilloux", "ronan.guilloux@gmail.com")
addPerson("Ross Chater", "rdchater@gmail.com", "@rdcx")
addPerson("Ross Light", "light@google.com", "8285@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ross Light", "light@google.com", "@zombiezen")
addPerson("Rowan Marshall", "rowanajmarshall@gmail.com", "@RowanAJMarshall")
addPerson("Rowan Worth", "sqweek@gmail.com", "@sqweek")
addPerson("Ruben Vermeersch", "ruben@rocketeer.be")
addPerson("Rudi Kramer", "rudi.kramer@gmail.com")
addPerson("Rui Ueyama", "ruiu@google.com", "@rui314")
addPerson("Ruslan Nigmatullin", "elessar@dropbox.com")
addPerson("Russ Cox", "rsc@golang.org", "5056@62eb7196-b449-3ce5-99f1-c037f21e1705", "@rsc")
addPerson("Russell Haering", "russellhaering@gmail.com", "@russellhaering")
addPerson("Ryan Boehning", "ryan.boehning@apcera.com", "@y0ssar1an")
addPerson("Ryan Brown", "ribrdb@google.com", "6136@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ryan Brown", "ribrdb@google.com", "@ribrdb")
addPerson("Ryan Canty", "jrcanty@gmail.com")
addPerson("Ryan Dahl", "ry@tinyclouds.org", "@ry")
addPerson("Ryan Hitchman", "hitchmanr@gmail.com", "@rmmh")
addPerson("Ryan Lower", "rpjlower@gmail.com", "@ryanlower")
addPerson("Ryan Seys", "ryan@ryanseys.com", "@ryanseys")
addPerson("Ryan Slade", "ryanslade@gmail.com", "@ryanslade")
addPerson("Ryan Thomas", "rnt@google.com")
addPerson("Ryan Zhang", "ryan.zhang@docker.com")
addPerson("Ryoichi KATO", "ryo1kato@gmail.com", "@ryo1kato")
addPerson("Ryuji Iwata", "qt.luigi@gmail.com", "@qt-luigi")
addPerson("Ryuma Yoshida", "ryuma.y1117@gmail.com", "@ryysud")
addPerson("Ryuzo Yamamoto", "ryuzo.yamamoto@gmail.com", "@dragon3")
addPerson("Rémy Oudompheng", "oudomphe@phare.normalesup.org", "@remyoudompheng")
addPerson("S.Çağlar Onur", "caglar@10ur.org", "@caglar10ur")
addPerson("Sabin Mihai Rapan", "sabin.rapan@gmail.com", "@sabin-rapan")
addPerson("Sagiv Ofek", "sagiv4@gmail.com")
addPerson("Sai Cheemalapati", "saicheems@google.com")
addPerson("Salman Aljammaz", "s@0x65.net", "5220@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Salman Aljammaz", "s@0x65.net", "@saljam")
addPerson("Sam Broughton", "sambroughton@hotmail.co.uk")
addPerson("Sam Hug", "samuel.b.hug@gmail.com", "@samuelhug")
addPerson("Sam Mulube", "sam@thingful.net")
addPerson("Sam Sendelbach", "sbsends@gmail.com")
addPerson("Sam Thorogood", "thorogood@google.com", "@samthor")
addPerson("Sam Whited", "sam@samwhited.com", "11106@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sam Whited", "sam@samwhited.com", "@SamWhited")
addPerson("Saman Barghi", "saman.b@gmail.com")
addPerson("Sameer Ajmani", "sameer@golang.org", "5265@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sameer Ajmani", "sameer@golang.org", "@Sajmani")
addPerson("Sami Commerot", "samic@google.com")
addPerson("Sami Pönkänen", "sami.ponkanen@gmail.com")
addPerson("Samuel Cochran", "sj26@sj26.com")
addPerson("Samuel Kelemen", "sckelemen@users.noreply.github.com", "@SCKelemen")
addPerson("Samuel Tan", "samueltan@google.com", "16020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Samuel Tan", "samueltan@google.com", "@stjj89")
addPerson("Samuele Pedroni", "pedronis@lucediurna.net", "@pedronis")
addPerson("Sandip Bhattacharya", "sandipb@sandipb.net")
addPerson("Sandy McPherson", "sandyzwin6@gmail.com")
addPerson("Sandy", "openset.wang@gmail.com", "@openset")
addPerson("Sanjay Menakuru", "balasanjay@gmail.com", "@balasanjay")
addPerson("Santhosh Kumar Tekuri", "santhosh.tekuri@gmail.com", "@santhosh-tekuri")
addPerson("Santosh Ananthakrishnan", "santosh@dropbox.com")
addPerson("Sarah Adams", "shadams@google.com", "16850@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sarah Adams", "shadams@google.com", "shadams@shadams0.mtv.corp.google.com", "@adams-sarah")
addPerson("Sarah Chacko", "SJC1982.1992@gmail.com")
addPerson("Sascha Brawer", "sascha@brawer.ch", "@brawer")
addPerson("Sasha Lionheart", "lionhearts@google.com")
addPerson("Satyajit Ranjeev", "s@ranjeev.in")
addPerson("Sawood Alam", "ibnesayeed@gmail.com")
addPerson("Scott Bell", "scott@sctsm.com", "13380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Scott Bell", "scott@sctsm.com", "@sctb")
addPerson("Scott Crunkleton", "crunk1@gmail.com")
addPerson("Scott Ferguson", "scottwferg@gmail.com", "@scottferg")
addPerson("Scott Lawrence", "bytbox@gmail.com", "@bytbox")
addPerson("Scott Mansfield", "smansfield@netflix.com", "@ScottMansfield")
addPerson("Scott Pakin", "scott+gpg@pakin.org")
addPerson("Sean Chittenden", "seanc@joyent.com", "@sean-")
addPerson("Sean Christopherson", "sean.j.christopherson@intel.com", "@sean-jc")
addPerson("Sean Dolphin", "Sean.Dolphin@kpcompass.com")
addPerson("Sean Harger", "sharger@google.com")
addPerson("Sean Rees", "sean@erifax.org", "@seanrees")
addPerson("SeanBurford", "sburford@google.com", "@sburford")
addPerson("Sebastiaan van Stijn", "github@gone.nl", "@thaJeztah")
addPerson("Sebastian Schmidt", "mrschmidt@google.com")
addPerson("Sebastian Schuberth", "sschuberth@gmail.com")
addPerson("Sebastian Willing", "sewi.de@gmail.com")
addPerson("Sebastien Binet", "seb.binet@gmail.com", "@sbinet")
addPerson("Sebastien Binet", "binet@cern.ch")
addPerson("Sebastien Binet", "seb.binet@gmail.com", "5810@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Seebs", "seebs@sourcegraph.com", "@seebs")
addPerson("Seiji Takahashi", "timaki.st@gmail.com", "15570@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Seiji Takahashi", "timaki.st@gmail.com", "@timakin")
addPerson("Sergey 'SnakE' Gromov", "snake.scaly@gmail.com", "@snake-scaly")
addPerson("Sergey Frolov", "sfrolov@google.com")
addPerson("Sergey Lemeshkin", "sergeilem@gmail.com")
addPerson("Sergey Lukjanov", "me@slukjanov.name")
addPerson("Sergey Mishin", "sergeymishine@gmail.com", "@dartkron")
addPerson("Sergey Mudrik", "sergey.mudrik@gmail.com")
addPerson("Sergey Rogulenko", "rogulenko@laserlike.com")
addPerson("Sergey Semin", "gray12511@gmail.com", "@Gray5")
addPerson("Sergiusz Bazanski", "bazanski@gmail.com")
addPerson("Serhat Şevki Dinçer", "jfcgauss@gmail.com")
addPerson("Serhii Aheienko", "serhii.aheienko@gmail.com")
addPerson("Serhii Bratus", "sergiibratus@gmail.com")
addPerson("Seshachalam Malisetti", "abbiya@gmail.com")
addPerson("Seth Greenstein", "sgreens@google.com")
addPerson("Seth Hoenig", "seth.a.hoenig@gmail.com", "@shoenig")
addPerson("Seth Hollyman", "shollyman@google.com")
addPerson("Seth Shelnutt", "Shelnutt2@gmail.com")
addPerson("Seth Vargo", "sethvargo@gmail.com", "@sethvargo")
addPerson("Shahar Kohanim", "skohanim@gmail.com")
addPerson("Shahar Kohanim", "skohanim@gmail.com", "12700@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shamil Garatuev", "garatuev@gmail.com", "@FluorescentTouch")
addPerson("Shane Hansen", "shanemhansen@gmail.com", "@shanemhansen")
addPerson("Shannon Wynter", "freman@users.noreply.github.com")
addPerson("Shaun Dunning", "shaun.dunning@uservoice.com")
addPerson("Shawn Pearce", "sop@google.com")
addPerson("Shawn Smith", "shawn.p.smith@gmail.com", "7245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shawn Walker-Salas", "shawn.walker@oracle.com", "7291@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shawn Walker-Salas", "shawn.walker@oracle.com", "@binarycrusader")
addPerson("Shenghou Ma", "minux@golang.org", "minux.ma@gmail.com", "@minux")
addPerson("Shengyu Zhang", "shengyu.zhang@chaitin.com", "@SilverRainZ")
addPerson("Shi Han Ng", "shihanng@gmail.com")
addPerson("Shinichi Nishimura", "nshmura.s@gmail.com")
addPerson("Shinji Tanaka", "shinji.tanaka@gmail.com", "@stanaka")
addPerson("Shintaro Kaneko", "kaneshin0120@gmail.com", "@kaneshin")
addPerson("Shivakumar GN", "shivakumar.gn@gmail.com", "@shivakumargn")
addPerson("Shivansh Rai", "shivansh@freebsd.org", "@shivansh")
addPerson("Shubheksha Jalan", "jshubheksha@gmail.com")
addPerson("Shun Fan", "sfan@google.com")
addPerson("Shushan Chai", "chaishushan@gmail.com", "5095@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Shuvo Debnath", "shuvo.debnath@gmail.com")
addPerson("Silvan Jegen", "s.jegen@gmail.com", "@Shugyousha")
addPerson("Simon Inman", "simoninman@google.com")
addPerson("Simon Jefford", "simon.jefford@gmail.com", "@simonjefford")
addPerson("Simon Johansson", "simon@simonjohansson.com")
addPerson("Simon Ordish", "simon.ordish@masagi.co.uk")
addPerson("Simon Rawet", "simon@rawet.se", "@KilledKenny")
addPerson("Simon Thulbourn", "simon+github@thulbourn.com", "@sthulb")
addPerson("Simon Whitehead", "chemnova@gmail.com", "@simon-whitehead")
addPerson("Simone Carletti", "weppos@gmail.com")
addPerson("Sina Siadat", "siadat@gmail.com", "14140@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sina Siadat", "siadat@gmail.com", "@siadat")
addPerson("Sokolov Yura", "funny.falcon@gmail.com", "@funny-falcon")
addPerson("Song Gao", "song@gao.io", "@songgao")
addPerson("Spencer Nelson", "s@spenczar.com", "10000@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Spencer Nelson", "s@spenczar.com", "@spenczar")
addPerson("Spencer Tung", "spencertung@google.com", "20245@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Spring Mc", "heresy.mc@gmail.com", "@mcspring")
addPerson("Srdjan Petrovic", "spetrovic@google.com", "6605@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Srdjan Petrovic", "spetrovic@google.com", "@spetrovic77")
addPerson("Sridhar Venkatakrishnan", "sridhar@laddoo.net", "9665@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Sridhar Venkatakrishnan", "sridhar@laddoo.net", "@sridharv")
addPerson("StalkR", "stalkr@stalkr.net")
addPerson("Stan Chan", "stanchan@gmail.com")
addPerson("Stan Schwertly", "stan@schwertly.com", "@Stantheman")
addPerson("Stanislav Afanasev", "php.progger@gmail.com", "@superstas")
addPerson("Stanislav Paskalev", "kshorg@gmail.com")
addPerson("Stanislav Petrov", "s.e.petrov@gmail.com")
addPerson("Steeve Morin", "steeve.morin@gmail.com", "@steeve")
addPerson("Stefan Schmidt", "stschmidt@google.com")
addPerson("Stepan Shabalin", "neverliberty@gmail.com", "@Neverik")
addPerson("Stephan Renatus", "srenatus@chef.io", "@srenatus")
addPerson("Stephen Gutekanst", "stephen.gutekanst@gmail.com")
addPerson("Stephen L", "36011612+steuhs@users.noreply.github.com", "@steuhs")
addPerson("Stephen Lewis", "stephen@sock.org.uk")
addPerson("Stephen McQuay (smcquay)", "stephen@mcquay.me", "@smcquay")
addPerson("Stephen McQuay", "stephen@mcquay.me", "13960@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Stephen Searles", "stephens2424@gmail.com", "stephen.searles@gmail.com", "@stephens2424")
addPerson("Stephen Solka", "stephen0q@gmail.com")
addPerson("Stephen Sugden", "glurgle@gmail.com")
addPerson("Stephen Weinberg", "stephen@q5comm.com", "stephenmw@google.com", "@stephenmw")
addPerson("Stephen Weinberg", "stephenmw@google.com", "13156@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steve Francia", "spf@golang.org", "14840@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steve Francia", "spf@golang.org", "@spf13")
addPerson("Steve Gilbert", "stevegilbert23@gmail.com")
addPerson("Steve McCoy", "mccoyst@gmail.com", "@mccoyst")
addPerson("Steve Phillips", "steve@tryingtobeawesome.com", "@elimisteve")
addPerson("Steve Reed", "sreed@zulily.com")
addPerson("Steve Streeting", "steve@stevestreeting.com", "@sinbad")
addPerson("Steve Wills", "steve@mouf.net")
addPerson("Steven Berlanga", "zabawaba99@gmail.com")
addPerson("Steven Buss", "sbuss@google.com")
addPerson("Steven Elliot Harris", "seharris@gmail.com", "@seh")
addPerson("Steven Erenst", "stevenerenst@gmail.com")
addPerson("Steven Hartland", "steven.hartland@multiplay.co.uk", "10210@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Steven Hartland", "steven.hartland@multiplay.co.uk", "@stevenh")
addPerson("Steven Kabbes", "stevenkabbes@gmail.com")
addPerson("Steven Ruckdashel", "steve.ruckdashel@gmail.com")
addPerson("Steven Selph", "sselph@google.com")
addPerson("Steven Wilkin", "stevenwilkin@gmail.com", "@stevenwilkin")
addPerson("Stéphane Travostino", "stephane.travostino@gmail.com", "@1player")
addPerson("Sue Spence", "virtuallysue@gmail.com")
addPerson("Sugu Sougoumarane", "ssougou@gmail.com", "@sougou")
addPerson("Suharsh Sivakumar", "suharshs@google.com", "@suharshs")
addPerson("Suriyaa Sundararuban", "isc.suriyaa@gmail.com")
addPerson("Suriyaa Sundararuban", "suriyaasundararuban@gmail.com", "27899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suriyaa Sundararuban", "suriyaasundararuban@gmail.com", "@SuriyaaKudoIsc")
addPerson("Surma Surma", "surma@google.com")
addPerson("Sutton Yamanashi", "syamanashi@gmail.com")
addPerson("Suyash", "dextrous93@gmail.com", "15015@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suyash", "dextrous93@gmail.com", "@suyash")
addPerson("Suzy Mueller", "suzmue@golang.org", "21300@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Suzy Mueller", "suzmue@golang.org", "@suzmue")
addPerson("Sven Almgren", "sven@tras.se", "@blindmatrix")
addPerson("Sven Blumenstein", "svbl@google.com")
addPerson("Sven Dowideit", "svendowideit@home.org.au")
addPerson("Sylvain Zimmer", "sylvain@sylvainzimmer.com", "@sylvinus")
addPerson("Syohei YOSHIDA", "syohex@gmail.com", "@syohex")
addPerson("Sébastien Paolacci", "sebastien.paolacci@gmail.com", "@spaolacci")
addPerson("Sébastien Portebois", "sportebois@gmail.com")
addPerson("TSUYUSATO Kitsune", "make.just.on@gmail.com")
addPerson("Tad Fisher", "tadfisher@gmail.com")
addPerson("Tad Glines", "tad.glines@gmail.com", "@tadglines")
addPerson("Taesu Pyo", "pyotaesu@gmail.com", "@bigflood")
addPerson("Tair Sabirgaliev", "tair.sabirgaliev@gmail.com")
addPerson("Taj Khattra", "taj.khattra@gmail.com", "@tkhattra")
addPerson("Takashi Matsuo", "tmatsuo@google.com")
addPerson("Takayoshi Nishida", "takayoshi.nishida@gmail.com", "@takp")
addPerson("Takuto Ikuta", "tikuta@google.com", "@atetubou")
addPerson("Takuya Sato", "takuya0219@gmail.com")
addPerson("Takuya Ueda", "uedatakuya@gmail.com", "@tenntenn")
addPerson("Tal Shprecher", "tshprecher@gmail.com", "11915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tal Shprecher", "tshprecher@gmail.com", "@tshprecher")
addPerson("Tamir Duberstein", "tamird@gmail.com", "7955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tamir Duberstein", "tamird@gmail.com", "@tamird")
addPerson("Tamás Gulácsi", "tgulacsi78@gmail.com")
addPerson("Tao Wang", "twang2218@gmail.com")
addPerson("Tardis Xu", "xiaoxubeii@gmail.com")
addPerson("Tarmigan Casebolt", "tarmigan@gmail.com")
addPerson("Tarmigan Casebolt", "tarmigan@gmail.com", "9697@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Taro Aoki", "aizu.s1230022@gmail.com", "@ktr0731")
addPerson("Tarrant", "tarrant@keyneston.com", "@tarrant")
addPerson("Taru Karttunen", "taruti@taruti.net", "@taruti")
addPerson("Tatsuhiro Tsujikawa", "tatsuhiro.t@gmail.com", "@tatsuhiro-t")
addPerson("Taufiq Rahman", "taufiqrx8@gmail.com", "@Inconnu08")
addPerson("Ted Hahn", "teh@uber.com")
addPerson("Ted Kornish", "golang@tedkornish.com", "@tedkornish")
addPerson("Tejasvi Nareddy", "tejunareddy@gmail.com")
addPerson("Terin Stock", "terinjokes@gmail.com", "25203@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Terin Stock", "terinjokes@gmail.com", "@terinjokes")
addPerson("Terrel Shumway", "gopher@shumway.us")
addPerson("Terry Wong", "terry.wong2@yahoo.com")
addPerson("Tess Rinearson", "tess.rinearson@gmail.com")
addPerson("Tetsuo Kiso", "tetsuokiso9@gmail.com", "@tetsuok")
addPerson("Than McIntosh", "thanm@google.com", "14020@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Than McIntosh", "thanm@google.com", "@thanm")
addPerson("Thanabodee Charoenpiriyakij", "wingyminus@gmail.com", "19095@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Thanabodee Charoenpiriyakij", "wingyminus@gmail.com", "@wingyplus")
addPerson("Theo Schlossnagle", "jesus@lethargy.org")
addPerson("Thiago Farina", "tfarina@chromium.org")
addPerson("Thiago Fransosi Farina", "thiago.farina@gmail.com", "@thiagofarina")
addPerson("Thibault Falque", "thibault_falque@ens.univ-artois.fr")
addPerson("Thibaut Colar", "tcolar@colar.net")
addPerson("Thomas Alan Copeland", "talan.copeland@gmail.com", "@talanc")
addPerson("Thomas Bonfort", "thomas.bonfort@gmail.com", "@tbonfort")
addPerson("Thomas Bouldin", "inlined@google.com")
addPerson("Thomas Bruyelle", "thomas.bruyelle@gmail.com", "@tbruyelle")
addPerson("Thomas Bushnell, BSG", "tbushnell@google.com")
addPerson("Thomas Desrosiers", "thomasdesr@gmail.com", "@thomaso-mirodin")
addPerson("Thomas Habets", "habets@google.com", "@ThomasHabets")
addPerson("Thomas Johnson", "NTmatter@gmail.com")
addPerson("Thomas Kappler", "tkappler@gmail.com", "@thomas11")
addPerson("Thomas Meson", "zllak@hycik.org")
addPerson("Thomas Sauvaget", "sauvaget.thomas@gmail.com")
addPerson("Thomas Wanielista", "tomwans@gmail.com", "@tomwans")
addPerson("Thomas de Zeeuw", "thomasdezeeuw@gmail.com", "@Thomasdezeeuw")
addPerson("Thorben Krueger", "thorben.krueger@gmail.com", "@benthor")
addPerson("Thordur Bjornsson", "thorduri@secnorth.net", "@thorduri")
addPerson("Tiago Queiroz", "contato@tiago.eti.br")
addPerson("Tilman Dilo", "tilman.dilo@gmail.com", "@tdilo")
addPerson("Tim 'mithro' Ansell", "tansell@google.com")
addPerson("Tim Burks", "timburks@google.com")
addPerson("Tim Cooijmans", "timcooijmans@gmail.com", "@timcooijmans")
addPerson("Tim Cooper", "tim.cooper@layeh.com", "24935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tim Cooper", "tim.cooper@layeh.com", "@bontibon")
addPerson("Tim Ebringer", "tim.ebringer@gmail.com")
addPerson("Tim Heckman", "t@heckman.io", "@theckman")
addPerson("Tim Henderson", "tim.tadh@gmail.com", "@timtadh")
addPerson("Tim Hockin", "thockin@google.com", "@thockin")
addPerson("Tim Shen", "timshen@google.com", "@timshen91")
addPerson("Tim St. Clair", "stclair@google.com")
addPerson("Tim Swast", "swast@google.com", "@tswast")
addPerson("Tim Wright", "tenortim@gmail.com", "25424@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tim Wright", "tenortim@gmail.com", "@tenortim")
addPerson("Tim Xu", "xiaoxubeii@gmail.com", "@xiaoxubeii")
addPerson("Tim", "tdhutt@gmail.com", "@Timmmm")
addPerson("Timo Savola", "timo.savola@gmail.com", "@tsavola")
addPerson("Timothy Raymond", "xtjraymondx@gmail.com")
addPerson("Timothy Studd", "tim@timstudd.com", "@timstudd")
addPerson("Tipp Moseley", "tipp@google.com", "@tippjammer")
addPerson("Tobias Assarsson", "tobias.assarsson@gmail.com")
addPerson("Tobias Columbus", "tobias.columbus@gmail.com", "@tc-0")
addPerson("Tobias Klauser", "tobias.klauser@gmail.com", "@tklauser")
addPerson("Tobias Klauser", "tklauser@distanz.ch", "@tklauser")
addPerson("Tobias Klauser", "tobias.klauser@gmail.com", "19560@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tobias Schottdorf", "tobias.schottdorf@gmail.com")
addPerson("Toby Burress", "kurin@google.com")
addPerson("Todd Neal", "todd@tneal.org", "12836@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Todd Neal", "todd@tneal.org", "@tzneal")
addPerson("Todd Neal", "tolchz@gmail.com", "8481@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Todd Rafferty", "webRat@gmail.com")
addPerson("Todd Wang", "toddwang@gmail.com", "@tatatodd")
addPerson("Tom Bergan", "tombergan@google.com", "10820@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tom Bergan", "tombergan@google.com", "@tombergan")
addPerson("Tom Elliott", "tom.w.elliott@gmail.com")
addPerson("Tom Heng", "zhm20070928@gmail.com", "7380@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tom Heng", "zhm20070928@gmail.com", "@tomheng")
addPerson("Tom Holmes", "tom@wandb.com")
addPerson("Tom Lanyon", "tomlanyon@google.com", "@tomlanyon")
addPerson("Tom Levy", "tomlevy93@gmail.com", "@tom93")
addPerson("Tom Limoncelli", "tal@whatexit.org", "@TomOnTime")
addPerson("Tom Linford", "tomlinford@gmail.com", "@tomlinford")
addPerson("Tom Thorogood", "me+google@tomthorogood.co.uk")
addPerson("Tom Thorogood", "me+google@tomthorogood.co.uk", "@tmthrgd")
addPerson("Tom Wilkie", "tom.wilkie@gmail.com", "tom@weave.works", "@tomwilkie")
addPerson("Tomas Basham", "tomasbasham@gmail.com")
addPerson("Tommy Schaefer", "tommy.schaefer@teecom.com", "@tommyschaefer")
addPerson("Tonis Tiigi", "tonistiigi@gmail.com", "@tonistiigi")
addPerson("Tony Reix", "Tony.Reix@bull.net", "16326@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tony Walker", "walkert.uk@gmail.com", "@walkert")
addPerson("Tooru Takahashi", "tooru.takahashi134@gmail.com", "@tooru")
addPerson("Tor Andersson", "tor.andersson@gmail.com", "@ccxvii")
addPerson("Tormod Erevik Lea", "tormodlea@gmail.com", "@tormoder")
addPerson("Toshiki Shima", "haya14busa@gmail.com", "16861@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Totoro W", "tw19881113@gmail.com", "5975@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Travis Beatty", "travisby@gmail.com")
addPerson("Travis Bischel", "travis.bischel@gmail.com", "26898@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Travis Bischel", "travis.bischel@gmail.com", "@twmb")
addPerson("Travis Cline", "travis.cline@gmail.com", "@tmc")
addPerson("Trevor Prater", "trevor.prater@gmail.com")
addPerson("Trey Lawrence", "lawrence.trey@gmail.com", "@TreyLawrence")
addPerson("Tristan Colgate", "tcolgate@gmail.com", "@tcolgate")
addPerson("Tristan Ooohry", "ooohry@gmail.com", "@golantrevize")
addPerson("Tristan Rice", "rice@fn.lc")
addPerson("Troels Thomsen", "troels@thomsen.io", "@tt")
addPerson("Trung Nguyen", "trung.n.k@gmail.com")
addPerson("Tugdual Saunier", "tucksaun@gmail.com", "23797@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tugdual Saunier", "tugdual.saunier@gmail.com")
addPerson("Tuo Shan", "shantuo@google.com", "12855@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tuo Shan", "shantuo@google.com", "@shantuo")
addPerson("Tuo Shan", "sturbo89@gmail.com")
addPerson("Tuo Shan", "sturbo89@gmail.com", "12857@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Tw", "tw19881113@gmail.com", "@tw4452852")
addPerson("Tyler Bui-Palsulich", "tbp@google.com")
addPerson("Tyler Bui-Palsulich", "tpalsulich@google.com", "@tbpg")
addPerson("Tyler Bunnell", "tylerbunnell@gmail.com", "@tylerb")
addPerson("Tyler Compton", "xaviosx@gmail.com")
addPerson("Tyler Treat", "ttreat31@gmail.com")
addPerson("Tyler Treat", "tyler.treat@apcera.com")
addPerson("Tyler Yahn", "tyler.yahn@urbanairship.com")
addPerson("Tzu-Jung Lee", "roylee17@currant.com", "@roylee17")
addPerson("Ugorji Nwoke", "ugorji@gmail.com", "@ugorji")
addPerson("Ulderico Cirello", "uldericofilho@gmail.com", "7250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ulrich Kunitz", "uli.kunitz@gmail.com", "@ulikunitz")
addPerson("Umang Parmar", "umangjparmar@gmail.com", "@darkLord19")
addPerson("Uriel Mangado", "uriel@berlinblue.org", "@uriel")
addPerson("Urvil Patel", "patelurvil38@gmail.com", "@urvil38")
addPerson("Uttam C Pawar", "uttam.c.pawar@intel.com", "@uttampawar")
addPerson("Vadim Grek", "vadimprog@gmail.com", "@brainiac84")
addPerson("Val Polouchkine", "vpolouch@justin.tv")
addPerson("Valentin Vidic", "vvidic@valentin-vidic.from.hr", "@vvidic")
addPerson("Vanesa", "mail@vanesaortiz.com")
addPerson("Vega Garcia Luis Alfonso", "vegacom@gmail.com", "@vegacom")
addPerson("Venil Noronha", "veniln@vmware.com", "@venilnoronha")
addPerson("Veselkov Konstantin", "kostozyb@gmail.com", "@KosToZyB")
addPerson("Viacheslav Poturaev", "vearutop@gmail.com", "@vearutop")
addPerson("Vicki Niu", "vicki.niu@gmail.com")
addPerson("Victor Chudnovsky", "vchudnov@google.com")
addPerson("Victor Vrantchan", "vrancean+github@gmail.com", "@groob")
addPerson("Vignesh Ramachandra", "vickyramachandra@gmail.com")
addPerson("Vikas Kedia", "vikask@google.com")
addPerson("Vikram Jadhav", "vikramcse.10@gmail.com")
addPerson("Vince0000", "522341976@qq.com")
addPerson("Vincent Batts", "vbatts@hashbangbash.com", "@vbatts")
addPerson("Vincent Bernat", "vincent@bernat.ch")
addPerson("Vincent Demeester", "vinc.demeester@gmail.com")
addPerson("Vincent Vanackere", "vincent.vanackere@gmail.com", "@vanackere")
addPerson("Vincenzo Pupillo", "v.pupillo@gmail.com", "24134@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vinu Rajashekhar", "vinutheraj@gmail.com", "@vinuraja")
addPerson("Vishvananda Ishaya", "vishvananda@gmail.com", "@vishvananda")
addPerson("Vitor De Mario", "vitordemario@gmail.com", "@vdemario")
addPerson("Vitor De Mario", "vitor.demario@mendelics.com.br")
addPerson("Vivek Ayer", "vivek@restlessbandit.com")
addPerson("Vivek Sekhar", "vivek@viveksekhar.ca")
addPerson("Vlad Krasnov", "vlad@cloudflare.com", "7601@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vlad Krasnov", "vlad@cloudflare.com", "@vkrasnov")
addPerson("Vladimir Kovpak", "cn007b@gmail.com", "@cn007b")
addPerson("Vladimir Kuzmin", "vkuzmin@uber.com", "26409@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vladimir Kuzmin", "vkuzmin@uber.com", "@vkuzmin-uber")
addPerson("Vladimir Mezentsev", "vladimir.mezentsev@oracle.com")
addPerson("Vladimir Mihailenco", "vladimir.webdev@gmail.com", "@vmihailenco")
addPerson("Vladimir Nikishenko", "vova616@gmail.com", "@vova616")
addPerson("Vladimir Stefanovic", "vladimir.stefanovic@imgtec.com", "@vstefanovic")
addPerson("Vladimir Stefanovic", "vladimir.stefanovic@mips.com", "15150@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Vladimir Varankin", "nek.narqo@gmail.com")
addPerson("Vladimir Varankin", "vladimir@varank.in", "@narqo")
addPerson("Volker Dobler", "dr.volker.dobler@gmail.com", "5050@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Volker Dobler", "dr.volker.dobler@gmail.com", "@vdobler")
addPerson("Volodymyr Paprotski", "vpaprots@ca.ibm.com", "@vpaprots")
addPerson("W. Mark Kubacki", "wmark@hurrikane.de")
addPerson("W. Trevor King", "wking@tremily.us")
addPerson("Wade Simmons", "wade@wades.im", "@wadey")
addPerson("Waldemar Quevedo", "waldemar.quevedo@gmail.com")
addPerson("Walter Poupore", "wpoupore@google.com")
addPerson("Wander Lairson Costa", "wcosta@mozilla.com", "@walac")
addPerson("Warren Fernandes", "warren.f.fernandes@gmail.com")
addPerson("Warren Fernandes", "warren.f.fernandes@gmail.com", "@wfernandes")
addPerson("Warren Harper", "warrenjharper@gmail.com")
addPerson("Wayne Ashley Berry", "wayneashleyberry@gmail.com", "@wayneashleyberry")
addPerson("Wedson Almeida Filho", "wedsonaf@google.com", "12200@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wedson Almeida Filho", "wedsonaf@google.com", "@wedsonaf")
addPerson("Weerasak Chongnguluam", "singpor@gmail.com")
addPerson("Weerasak Chongnguluam", "singpor@gmail.com", "@iporsut")
addPerson("Wei Fu", "fhfuwei@163.com")
addPerson("Wei Guangjing", "vcc.163@gmail.com", "@wgj-zz")
addPerson("Wei Xiao", "Wei.Xiao@arm.com", "16227@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wei Xiao", "wei.xiao@arm.com", "@williamweixiao")
addPerson("Weichao Tang", "tevic.tt@gmail.com")
addPerson("Weichao Tang", "tevic.tt@gmail.com", "@Tevic")
addPerson("Wembley G. Leach, Jr", "wembley.gl@gmail.com", "@wemgl")
addPerson("Wes Widner", "kai5263499@gmail.com")
addPerson("Wesley Hill", "hakobyte@gmail.com")
addPerson("WhisperRain", "2516435583@qq.com", "@WhisperRain")
addPerson("Wil Selwood", "wselwood@gmail.com")
addPerson("Wil Selwood", "wselwood@gmail.com", "@wselwood")
addPerson("Wilfried Teiken", "wteiken@google.com")
addPerson("Will Beason", "willbeason@gmail.com", "@willbeason")
addPerson("Will Bond", "will@wbond.net", "9815@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Will Chan", "willchan@google.com")
addPerson("Will Faught", "will.faught@gmail.com", "@willfaught")
addPerson("Will Madison", "wmadisonDev@GMail.com")
addPerson("Will Morrow", "wmorrow.qdt@qualcommdatacenter.com")
addPerson("Will Norris", "willnorris@google.com", "@willnorris")
addPerson("Will Storey", "will@summercat.com", "@horgh")
addPerson("Will", "willow.pine.2011@gmail.com")
addPerson("Willem van der Schyff", "willemvds@gmail.com", "@willemvds")
addPerson("William Chan", "willchan@chromium.org", "@willchan", "*goog")
addPerson("William Chang", "mr.williamchang@gmail.com", "27627@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("William Orr", "will@worrbase.com", "@worr")
addPerson("Wisdom Omuya", "deafgoat@gmail.com", "@deafgoat")
addPerson("Wèi Cōngruì", "crvv.mail@gmail.com", "22895@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Wèi Cōngruì", "crvv.mail@gmail.com", "@crvv")
addPerson("XAX", "xaxiclouddev@gmail.com")
addPerson("Xargin", "cao1988228@163.com")
addPerson("Xi Ruoyao", "xry23333@gmail.com")
addPerson("Xia Bin", "snyh@snyh.org", "12161@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Xia Bin", "snyh@snyh.org", "@snyh")
addPerson("Xing Xing", "mikespook@gmail.com", "@mikespook")
addPerson("Xudong Zhang", "felixmelon@gmail.com")
addPerson("Xudong Zheng", "7pkvm5aw@slicealias.com", "@xudongzheng")
addPerson("Xuyang Kang", "xuyang@google.com")
addPerson("Xuyang Kang", "xuyangkang@gmail.com", "@xuyangkang")
addPerson("Yaacov Akiba Slama", "yaslama@gmail.com")
addPerson("Yamagishi Kazutoshi", "ykzts@desire.sh")
addPerson("Yann Hodique", "yhodique@google.com", "@sigma")
addPerson("Yann Kerhervé", "yann.kerherve@gmail.com", "@yannk")
addPerson("Yaron de Leeuw", "jarondl@google.com")
addPerson("Yasha Bubnov", "girokompass@gmail.com")
addPerson("Yasha Bubnov", "girokompass@gmail.com", "@ybubnov")
addPerson("Yasser Abdolmaleki", "yasser@yasser.ca", "@spring1843")
addPerson("Yasuharu Goto", "matope.ono@gmail.com", "8070@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yasuharu Goto", "matope.ono@gmail.com", "@matope")
addPerson("Yasuhiro MATSUMOTO", "mattn.jp@gmail.com", "5025@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yasuhiro Matsumoto", "mattn.jp@gmail.com", "@mattn")
addPerson("Yazen2017", "yazen.shunnar@gmail.com", "@yazsh")
addPerson("Yestin", "ylh@pdx.edu", "@ylih")
addPerson("Yesudeep Mangalapilly", "yesudeep@google.com", "@gorakhargosh")
addPerson("Ying Zou", "xpzouying@gmail.com")
addPerson("Yissakhar Z. Beck", "yissakhar.beck@gmail.com", "@DeedleFake")
addPerson("Yogesh Desai", "er.yogeshdesai@gmail.com")
addPerson("Yongjian Xu", "i3dmaster@gmail.com", "@i3d")
addPerson("Yoon", "learder@gmail.com")
addPerson("Yoshi Yamaguchi", "ymotongpoo@gmail.com")
addPerson("Yoshiya Hinosawa", "stibium121@gmail.com")
addPerson("Yoshiyuki Kanno", "nekotaroh@gmail.com", "@mocchira")
addPerson("Yuki Yugui Sonoda", "yugui@google.com")
addPerson("Yury Smolsky", "yury@smolsky.by", "26536@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yury Smolsky", "yury@smolsky.by", "@ysmolsky")
addPerson("Yusuke Kagiwada", "block.rxckin.beats@gmail.com", "@Jxck")
addPerson("Yuusei Kuwana", "kuwana@kumama.org", "@kumama")
addPerson("Yuval Pavel Zholkover", "paulzhol@gmail.com", "5781@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Yuval Pavel Zholkover", "paulzhol@gmail.com", "@paulzhol")
addPerson("Yuwei Ba", "xiaobayuwei@gmail.com")
addPerson("Yuya Kusakabe", "yuya.kusakabe@gmail.com")
addPerson("Yves Junqueira", "yves.junqueira@gmail.com", "@nictuku")
addPerson("ZZMarquis", "zhonglingjian3821@163.com", "@ZZMarquis")
addPerson("Zac Bergquist", "zbergquist99@gmail.com", "9250@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zac Bergquist", "zbergquist99@gmail.com", "@zmb3")
addPerson("Zach Auclair", "zach101@gmail.com")
addPerson("Zach Bintliff", "zbintliff@gmail.com", "@zbintliff")
addPerson("Zach Gershman", "zachgersh@gmail.com", "6360@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zachary Madigan", "zachary.madigan@apollovideo.com")
addPerson("Zachary Amsden", "zach@thundertoken.com")
addPerson("Zachary Amsden", "zach@thundertoken.com", "@zamsden")
addPerson("Zachary Gershman", "zgershman@pivotal.io")
addPerson("Zachary Madigan", "zacharywmadigan@gmail.com", "25899@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zachary Romero", "zacromero3@gmail.com")
addPerson("Zacharya", "zacharya19@gmail.com")
addPerson("Zaq? Wiedmann", "zaquestion@gmail.com")
addPerson("Zero King", "l2d4y3@gmail.com")
addPerson("Zev Goldstein", "zev.goldstein@gmail.com", "@zevdg")
addPerson("Zezhou Yu", "ray.zezhou@gmail.com")
addPerson("Zhang Qiang", "dotslash.lu@gmail.com")
addPerson("Zhang Wei", "zhangwei198900@gmail.com")
addPerson("Zheng Dayu", "davidzheng23@gmail.com", "@ceshihao")
addPerson("Zheng Xu", "zheng.xu@arm.com")
addPerson("Zheng Xu", "zheng.xu@arm.com", "@Zheng-Xu")
addPerson("Zheng Yang", "zhengyang4k@gmail.com")
addPerson("Zhengyu He", "hzy@google.com")
addPerson("ZhiFeng Hu", "hufeng1987@gmail.com")
addPerson("Zhongpeng Lin", "zplin@uber.com", "@linzhp")
addPerson("Zhongwei Yao", "zhongwei.yao@arm.com", "@zhongweiy")
addPerson("Zhou Peng", "p@ctriple.cn", "26955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Zhou Peng", "p@ctriple.cn", "@ctriple")
addPerson("Zhuo Meng", "mengzhuo1203@gmail.com", "7530@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("Ziad Hatahet", "hatahet@gmail.com", "@hatahet")
addPerson("Zorion Arrizabalaga", "zorionk@gmail.com", "@zorion")
addPerson("a.lukinykh", "a.lukinykh@xsolla.com")
addPerson("abdul.mannan", "abdul.mannan@thirdbridge.com")
addPerson("acoshift", "acoshift@gmail.com")
addPerson("adrienpetel", "peteladrien@gmail.com", "@feliixx")
addPerson("aecdanjun", "aeciodantasjunior@gmail.com", "@aecdanjun")
addPerson("ajackura", "ajackura@localhost")
addPerson("ajnirp", "ajnirp@users.noreply.github.com", "@ajnirp")
addPerson("akushman", "zeusakm@gmail.com")
addPerson("alexpantyukhin", "apantykhin@gmail.com", "@alexpantyukhin")
addPerson("alkesh26", "alkesh26@gmail.com", "@alkesh26")
addPerson("alokc", "alokkr1090@gmail.com")
addPerson("alpha.wong", "alpha.wong@lalamove.com")
addPerson("amandhora", "aman.usa07@gmail.com")
addPerson("amirrezaask", "raskarpour@gmail.com")
addPerson("anatoly techtonik", "techtonik@gmail.com")
addPerson("andrew werner", "andrew@upthere.com")
addPerson("andrey mirtchovski", "mirtchovski@gmail.com", "@mirtchovski")
addPerson("andrius4669", "andrius4669@gmail.com", "@andrius4669")
addPerson("andy", "andyjgarfield@gmail.com")
addPerson("apoorvam", "app.apoorva@gmail.com")
addPerson("areski", "areski@gmail.com", "@areski")
addPerson("as", "as.utf8@gmail.com", "@as")
addPerson("asgaines", "andrew.s.gaines@gmail.com")
addPerson("avi", "hi@avi.im")
addPerson("aviau", "alexandre@alexandreviau.net")
addPerson("avsharapov", "analytics.kzn@gmail.com", "@avsharapov")
addPerson("awaw fumin", "awawfumin@gmail.com", "@fumin")
addPerson("ayanamist", "ayanamist@gmail.com", "@ayanamist")
addPerson("azat", "kaumov.a.r@gmail.com", "@akaumov")
addPerson("azretkenzhaliev", "azret.kenzhaliev@gmail.com")
addPerson("bbrodriges", "bender.rodriges@gmail.com")
addPerson("benjamin-rood", "bisr@icloud.com")
addPerson("berkant ipek", "41230766+0xbkt@users.noreply.github.com", "@0xbkt")
addPerson("bogem", "albertnigma@gmail.com", "@bogem")
addPerson("bontequero", "bontequero@gmail.com", "@bontequero")
addPerson("boreq", "boreq@sourcedrops.com")
addPerson("buddhamagnet", "buddhamagnet@gmail.com")
addPerson("c9s", "yoanlin93@gmail.com", "@c9s")
addPerson("calerogers", "cale.rogers.m@gmail.com")
addPerson("caosz", "cszznbb@gmail.com")
addPerson("catatsuy", "m.ddotx.f@gmail.com", "@catatsuy")
addPerson("cch123", "buaa.cch@gmail.com", "@cch123")
addPerson("chanxuehong", "chanxuehong@gmail.com", "@chanxuehong")
addPerson("christopher-henderson", "chris@chenderson.org", "@christopher-henderson")
addPerson("cia-rana", "kiwamura0314@gmail.com", "@cia-rana")
addPerson("closs", "the.cody.oss@gmail.com", "@codyoss")
addPerson("conorbroderick", "cjayjayb@gmail.com")
addPerson("cyacco", "cyacco@gmail.com")
addPerson("dalyk", "dalyk@google.com")
addPerson("danoscarmike", "danom@google.com")
addPerson("datianshi", "dsz0111@gmail.com", "@datianshi")
addPerson("dchenk", "dcherchenko@gmail.com", "@dchenk")
addPerson("dechen-sherpa", "Dechen.Sherpa@dal.ca")
addPerson("delioda", "delioda@consenteye.com")
addPerson("diana ortega", "dicaormu@gmail.com")
addPerson("diplozoon", "huyuumi.dev@gmail.com", "@JohnTitor")
addPerson("djherbis", "djherbis@gmail.com", "@djherbis")
addPerson("dsivalingam", "dayansivalingam@gmail.com")
addPerson("dupoxy", "dupoxy@users.noreply.github.com", "@dupoxy")
addPerson("elmar", "ktye78@gmail.com")
addPerson("elpinal", "6elpinal@gmail.com", "@elpinal")
addPerson("emersion", "contact@emersion.fr")
addPerson("epkann", "epkann@gmail.com")
addPerson("erdi", "erdi@google.com")
addPerson("eric fang", "eric.fang@arm.com", "24534@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("erifan01", "eric.fang@arm.com", "@erifan")
addPerson("esell", "eujon.sellers@gmail.com")
addPerson("esell", "eujon.sellers@gmail.com", "@esell")
addPerson("fannie zhang", "Fannie.Zhang@arm.com", "21345@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("fanzha02", "fannie.zhang@arm.com", "@zhangfannie")
addPerson("feilengcui008", "feilengcui008@gmail.com", "@feilengcui008")
addPerson("feng pengfei", "mountainfpf@gmail.com")
addPerson("fenwickelliott", "charles@fenwickelliott.io")
addPerson("ferhat elmas", "elmas.ferhat@gmail.com")
addPerson("filewalkwithme", "maiconscosta@gmail.com", "@filewalkwithme")
addPerson("gangachris", "ganga.chris@gmail.com")
addPerson("garrickevans", "garrick@google.com")
addPerson("gbbr", "ga@stripetree.co.uk", "@gbbr")
addPerson("glorieux", "lorieux.g@gmail.com", "@glorieux")
addPerson("gmarik", "gmarik@gmail.com", "@gmarik")
addPerson("go101", "tapir.liu@gmail.com", "@TapirLiu")
addPerson("guitarbum722", "johnkenneth.moore@gmail.com")
addPerson("gulyasm", "mgulyas86@gmail.com", "@gulyasm")
addPerson("guyfedwards", "guyfedwards@gmail.com")
addPerson("hagen1778", "hagen1778@gmail.com", "@hagen1778")
addPerson("halfcrazy", "hackzhuyan@gmail.com")
addPerson("halgrimur", "douga@google.com")
addPerson("hanyang.tay", "htay@wesleyan.edu")
addPerson("haormj", "haormj@gmail.com", "@haormj")
addPerson("harshit777", "harshit.g.0702@gmail.com")
addPerson("haya14busa", "haya14busa@gmail.com", "@haya14busa")
addPerson("haya14busa", "hayabusa1419@gmail.com", "@haya14busa")
addPerson("hearot", "gabriel@hearot.it", "@hearot")
addPerson("helloPiers", "google@hellopiers.pro")
addPerson("hellozee", "hellozee@disroot.org", "@hellozee")
addPerson("hengwu0", "41297446+hengwu0@users.noreply.github.com", "@hengwu0")
addPerson("hertzbach", "rhertzbach@gmail.com")
addPerson("hezhenwei", "3711971@qq.com")
addPerson("hsinhoyeh", "yhh92u@gmail.com")
addPerson("huangyonglin", "1249107551@qq.com")
addPerson("ia", "isaac.ardis@gmail.com")
addPerson("iamqizhao", "toqizhao@gmail.com")
addPerson("ianzapolsky", "ianzapolsky@gmail.com", "@ianzapolsky")
addPerson("irfan sharif", "irfanmahmoudsharif@gmail.com")
addPerson("ivan parra", "ivantrips1@gmail.com")
addPerson("jaredculp", "jculp14@gmail.com", "@jaredculp")
addPerson("jerome-laforge", "jerome.laforge@gmail.Com")
addPerson("jimmy frasche", "soapboxcicero@gmail.com", "13220@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("jimmyfrasche", "soapboxcicero@gmail.com", "@jimmyfrasche")
addPerson("jirawat001", "paoji@icloud.com")
addPerson("joshua stein", "jcs@jcs.org")
addPerson("kanapuliAthavan", "athavankanapuli@gmail.com")
addPerson("kargakis", "mkargaki@redhat.com", "@kargakis")
addPerson("khr", "khr@khr-glaptop.roam.corp.google.com")
addPerson("kim yongbin", "kybinz@gmail.com", "@kybin")
addPerson("kirinrastogi", "kirin.rastogi@shopify.com")
addPerson("kirk", "kirk91.han@gmail.com", "@kirk91")
addPerson("knqyf263", "knqyf263@gmail.com")
addPerson("komuW", "komuw05@gmail.com", "@komuw")
addPerson("komuw", "komuw05@gmail.com")
addPerson("konstantin8105", "konstantin8105@gmail.com", "@Konstantin8105")
addPerson("kortschak", "dan.kortschak@adelaide.edu.au", "@kortschak")
addPerson("kujenga", "ataylor0123@gmail.com")
addPerson("lcd1232", "8745863+lcd1232@users.noreply.github.com")
addPerson("leigh schrandt", "leigh@null.net")
addPerson("linatiantamade", "linqiyo@gmail.com")
addPerson("lotus.wu", "lotus.wu@outlook.com")
addPerson("lsytj0413", "511121939@qq.com")
addPerson("ltnwgl", "ltnwgl@gmail.com")
addPerson("ltnwgl", "ltnwgl@gmail.com", "@gengliangwang")
addPerson("lucor", "lu.corbo@gmail.com")
addPerson("ludweeg", "mursalimovemeel@gmail.com", "@ludweeg")
addPerson("lukechampine", "luke.champine@gmail.com")
addPerson("lukechampine", "luke.champine@gmail.com", "@lukechampine")
addPerson("maiyang", "yangwen.yw@gmail.com")
addPerson("majiang", "ma.jiang@zte.com.cn", "@zte-majiang")
addPerson("mapeiqi", "mapeiqi2017@gmail.com")
addPerson("marwan-at-work", "marwan.sameer@gmail.com", "@marwan-at-work")
addPerson("matematik7", "domen@ipavec.net")
addPerson("mattyw", "gh@mattyw.net", "@mattyw")
addPerson("mdp", "m@mdp.im")
addPerson("meir fischer", "meirfischer@gmail.com", "8955@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("mewmew", "rnd0x00@gmail.com", "@mewmew")
addPerson("mihasya", "m@mihasya.com", "@mihasya")
addPerson("mike andrews", "mra@xoba.com", "@xoba")
addPerson("milad arabi", "milad.arabi@gmail.com")
addPerson("mingrammer", "mingrammer@gmail.com", "@mingrammer")
addPerson("mischief", "mischief@offblast.org", "@mischief")
addPerson("mmaldo329", "michael_maldonado@comcast.com")
addPerson("molivier", "olivier.matthieu@gmail.com", "@molivier")
addPerson("monkeybutter", "pablo.larraondo@anu.edu.au")
addPerson("moznion", "moznion@gmail.com")
addPerson("mpl", "mathieu.lonjaret@gmail.com", "@mpl")
addPerson("mstrong", "mstrong1341@gmail.com", "@xmattstrongx")
addPerson("musgravejw", "musgravejw@gmail.com")
addPerson("nicerobot", "golang@nicerobot.org")
addPerson("nick.grange", "nicolas.grange@retrievercommunications.com")
addPerson("nkhumphreys", "nkhumphreys@gmail.com")
addPerson("nobonobo", "irieda@gmail.com", "@nobonobo")
addPerson("nogoegst", "nogoegst@users.noreply.github.com", "@nogoegst")
addPerson("nwidger", "niels.widger@gmail.com", "@nwidger")
addPerson("oiooj", "nototon@gmail.com")
addPerson("omarvides", "omarvides@gmail.com")
addPerson("pallat", "yod.pallat@gmail.com")
addPerson("pamelin", "amelin.paul@gmail.com")
addPerson("pankona", "yosuke.akatsuka@gmail.com")
addPerson("pavel-paulau", "pavel.paulau@gmail.com", "@pavel-paulau")
addPerson("pbberlin", "peter.buchmann@web.de")
addPerson("peter zhang", "i@ddatsh.com")
addPerson("phayes", "patrick.d.hayes@gmail.com")
addPerson("philhofer", "phofer@umich.edu", "@philhofer")
addPerson("pityonline", "pityonline@gmail.com", "@pityonline")
addPerson("prateekgogia", "prateekgogia42@gmail.com")
addPerson("pvoicu", "pvoicu@paypal.com", "@pvoicu")
addPerson("pytimer", "lixin20101023@gmail.com")
addPerson("qeed", "qeed.quan@gmail.com", "@qeedquan")
addPerson("ragavendra", "ragavendra.bn@gmail.com")
addPerson("rajender", "rajenderreddykompally@gmail.com", "@rajender")
addPerson("rajnikant", "rajnikant12345@gmail.com")
addPerson("rhysd", "lin90162@yahoo.co.jp")
addPerson("robnorman", "rob.norman@infinitycloud.com", "@robnorman")
addPerson("roger peppe", "rogpeppe@gmail.com", "6010@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("romanyx", "romanyx90@yandex.ru", "@romanyx")
addPerson("ron minnich", "rminnich@gmail.com", "12935@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("rubyist", "scott.barron@github.com", "@rubyist")
addPerson("rust", "pruest@gmail.com", "@gazed")
addPerson("rwaweber", "rwaweber@gmail.com")
addPerson("saberuster", "saberuster@gmail.com")
addPerson("sagarkrkv", "sagarkrkv@gmail.com")
addPerson("sam boyer", "tech@samboyer.org", "@sdboyer")
addPerson("sandyskies", "chenmingjie0828@163.com")
addPerson("sasha-s", "sasha@scaledinference.com")
addPerson("sayden", "mariocaster@gmail.com")
addPerson("sbramin", "s@sbramin.com")
addPerson("sdheisenberg", "nicholasleli@gmail.com", "@smugcloud")
addPerson("sergey", "sngasuan@gmail.com", "@Asuan")
addPerson("sergey.arseev", "sergey.arseev@intel.com", "@sergeyarseev")
addPerson("sergey.dobrodey", "sergey.dobrodey@synesis.ru")
addPerson("sevki", "s@sevki.org", "@sevki")
addPerson("shaharko", "skohanim@gmail.com", "@skohanim")
addPerson("shawnps", "shawnpsmith@gmail.com", "@shawnps")
addPerson("shinofara", "shinofara@gmail.com")
addPerson("shogo-ma", "choroma194@gmail.com", "@shogo-ma")
addPerson("shwsun", "jethro.sun7@gmail.com")
addPerson("slene", "vslene@gmail.com")
addPerson("softctrl", "carlostimoshenkorodrigueslopes@gmail.com")
addPerson("soluchok", "isoluchok@gmail.com", "@soluchok")
addPerson("spring1843", "yasser@yasser.ca")
addPerson("stephane benoit", "stefb965@gmail.com")
addPerson("stxmendez", "stxmendez@gmail.com")
addPerson("sukrithanda", "sukrit.handa@utoronto.ca")
addPerson("tal@whatexit.org", "tal@whatexit.org")
addPerson("taylorza", "taylorza@gmail.com")
addPerson("tbunyk", "tbunyk@gmail.com", "@bunyk")
addPerson("teague", "tnc1443@gmail.com", "@teaguecole")
addPerson("telecoda", "robbaines@gmail.com")
addPerson("templexxx", "lucas1x1x@gmail.com", "@templexxx")
addPerson("tengufromsky", "nick27surgut@gmail.com", "@tengufromsky")
addPerson("theairkit", "theairkit@gmail.com")
addPerson("themester", "Garriga975@gmail.com")
addPerson("themester", "dgrripoll@gmail.com")
addPerson("themihai", "mihai@epek.com")
addPerson("thoeni", "thoeni@gmail.com")
addPerson("thoeni", "thoeni@gmail.com", "@thoeni")
addPerson("thor wolpert", "thor@wolpert.ca")
addPerson("tkivisik", "taavi.kivisik@gmail.com", "@tkivisik")
addPerson("tliu", "terry.liu.y@gmail.com")
addPerson("tnt", "alkaloid.btx@gmail.com", "@trtstm")
addPerson("tom", "tommiemeyer290@gmail.com")
addPerson("tro3", "trey.roessig@gmail.com", "@tro3")
addPerson("ttacon", "ttacon@gmail.com", "@ttacon")
addPerson("ttyh061", "ttyh061@gmail.com")
addPerson("tuxpy", "q8886888@qq.com")
addPerson("unknown", "daria.kolistratova@intel.com", "@DarKol13")
addPerson("unknown", "geon0250@gmail.com", "@KimMachineGun")
addPerson("unknown", "nonamezeil@gmail.com", "@zeil")
addPerson("uropek", "uropek@gmail.com", "@uropek")
addPerson("vabr-g", "vabr@google.com")
addPerson("viswesr", "r.visweswara@gmail.com")
addPerson("voutasaurus", "voutasaurus@gmail.com", "@voutasaurus")
addPerson("vvakame", "vvakame+dev@gmail.com")
addPerson("wbond", "will@wbond.net")
addPerson("weeellz", "weeellz12@gmail.com", "@weeellz")
addPerson("wheelcomplex yin", "wheelcomplex@gmail.com")
addPerson("woodsaj", "awoods@raintank.io", "@woodsaj")
addPerson("wozz", "wozz@users.noreply.github.com")
addPerson("wrfly", "mr.wrfly@gmail.com")
addPerson("wu-heng", "41297446+wu-heng@users.noreply.github.com")
addPerson("wuyunzhou", "yunzhouwu@gmail.com", "@wuyunzhou")
addPerson("wzshiming", "wzshiming@foxmail.com")
addPerson("xiezhenye", "xiezhenye@gmail.com")
addPerson("xufei_Alex", "badgangkiller@gmail.com", "18915@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("xufei_Alex", "badgangkiller@gmail.com", "@knightXun")
addPerson("yansal", "yannsalaun1@gmail.com", "@yansal")
addPerson("yanyiwu", "wuyanyi09@gmail.com")
addPerson("yazver", "ya.zver@gmail.com")
addPerson("yo-tak", "yo.tak0812@gmail.com", "@yo-tak")
addPerson("yuuji.yaginuma", "yuuji.yaginuma@gmail.com", "@y-yagi")
addPerson("zachgersh", "zachgersh@gmail.com")
addPerson("zaq1tomo", "zaq1tomo@gmail.com", "@zaq1tomo")
addPerson("zhongtao.chen", "chenzhongtao@126.com", "@chenzhongtao")
addPerson("zhoujun", "dev.zhoujun@gmail.com")
addPerson("Özgür Kesim", "oec-go@kesim.org")
addPerson("Максим Федосеев", "max.faceless.frei@gmail.com", "@codesenberg")
addPerson("Фахриддин Балтаев", "faxriddinjon@gmail.com", "@faxriddin")
addPerson("Юрий Соколов", "funny.falcon@gmail.com", "7215@62eb7196-b449-3ce5-99f1-c037f21e1705")
addPerson("一痕 刘", "liuyihen@gmail.com")
addPerson("张嵩", "zs349596@gmail.com", "@zs1379")
addPerson("沈涛", "shentaoskyking@gmail.com", "@smileusd")
addPerson("祥曦 徐", "lucas1x1x@gmail.com", "28434@62eb7196-b449-3ce5-99f1-c037f21e1705")
}
// GithubOfGomoteUser returns the GitHub username for the provided gomote user.
func GithubOfGomoteUser(gomoteUser string) (githubUser string) {
switch gomoteUser {
case "austin":
return "aclements"
case "cbro":
return "broady"
case "cherryyz":
return "cherrymui"
case "cmang":
return "paranoiacblack"
case "drchase":
return "dr2chase"
case "gri":
return "griesemer"
case "hakim":
return "hyangah"
case "herbie":
return "cybrcodr"
case "iant":
return "ianlancetaylor"
case "jbd":
return "rakyll"
case "joetsai":
return "dsnet"
case "jrjohnson":
return "johnsonj"
case "khr":
return "randall77"
case "lazard":
return "davidlazar"
case "pjw":
return "pjweinbgo"
case "r":
return "robpike"
case "rstambler":
return "stamblerre"
case "sameer":
return "Sajmani"
case "shadams":
return "adams-sarah"
case "spf":
return "spf13"
case "valsorda":
return "FiloSottile"
}
return gomoteUser
}
|
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util"
)
func validateNonEmptyField(field, fieldName, structName string) error {
if field == "" {
return fmt.Errorf("value '%s' in '%s' structure cannot be empty", fieldName, structName)
}
return nil
}
func validateRbdSnap(rbdSnap *rbdSnapshot) error {
var err error
if err = validateNonEmptyField(rbdSnap.RequestName, "RequestName", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.Monitors, "Monitors", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.Pool, "Pool", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.RbdImageName, "RbdImageName", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.ClusterID, "ClusterID", "rbdSnapshot"); err != nil {
return err
}
return err
}
func validateRbdVol(rbdVol *rbdVolume) error {
var err error
if err = validateNonEmptyField(rbdVol.RequestName, "RequestName", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.Monitors, "Monitors", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.Pool, "Pool", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.ClusterID, "ClusterID", "rbdVolume"); err != nil {
return err
}
if rbdVol.VolSize == 0 {
return errors.New("value 'VolSize' in 'rbdVolume' structure cannot be 0")
}
return err
}
/*
checkSnapCloneExists, and its counterpart checkVolExists, function checks if
the passed in rbdSnapshot or rbdVolume exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information
regarding volume names as requested by the CSI drivers. Hence, these need to be
invoked only when the respective CSI driver generated snapshot or volume name
based locks are held, as otherwise racy access to these omaps may end up
leaving them in an inconsistent state.
These functions need enough information about cluster and pool (ie, Monitors,
Pool, IDs filled in) to operate. They further require that the RequestName
element of the structure have a valid value to operate on and determine if the
said RequestName already exists on the backend.
These functions populate the snapshot or the image name, its attributes and the
CSI snapshot/volume ID for the same when successful.
These functions also cleanup omap reservations that are stale. I.e when omap
entries exist and backing images or snapshots are missing, or one of the omaps
exist and the next is missing. This is because, the order of omap creation and
deletion are inverse of each other, and protected by the request name lock, and
hence any stale omaps are leftovers from incomplete transactions and are hence
safe to garbage collect.
*/
func checkSnapCloneExists(ctx context.Context, parentVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) (bool, error) {
err := validateRbdSnap(rbdSnap)
if err != nil {
return false, err
}
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return false, err
}
defer j.Destroy()
snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "")
if err != nil {
return false, err
}
if snapData == nil {
return false, nil
}
snapUUID := snapData.ImageUUID
rbdSnap.RbdSnapName = snapData.ImageAttributes.ImageName
rbdSnap.ImageID = snapData.ImageAttributes.ImageID
// it should never happen that this disagrees, but check
if rbdSnap.Pool != snapData.ImagePool {
return false, fmt.Errorf("stored snapshot pool (%s) and expected snapshot pool (%s) mismatch",
snapData.ImagePool, rbdSnap.Pool)
}
vol := generateVolFromSnap(rbdSnap)
defer vol.Destroy()
err = vol.Connect(cr)
if err != nil {
return false, err
}
vol.ReservedID = snapUUID
// Fetch on-disk image attributes
err = vol.getImageInfo()
if err != nil {
if errors.Is(err, ErrImageNotFound) {
err = parentVol.deleteSnapshot(ctx, rbdSnap)
if err != nil {
if !errors.Is(err, ErrSnapNotFound) {
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err)
return false, err
}
}
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
}
return false, err
}
// Snapshot creation transaction is rolled forward if rbd clone image
// representing the snapshot is found. Any failures till finding the image
// causes a roll back of the snapshot creation transaction.
// Code from here on, rolls the transaction forward.
rbdSnap.CreatedAt = vol.CreatedAt
rbdSnap.SizeBytes = vol.VolSize
// found a snapshot already available, process and return its information
rbdSnap.SnapID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, snapData.ImagePoolID, rbdSnap.Pool,
rbdSnap.ClusterID, snapUUID, volIDVersion)
if err != nil {
return false, err
}
// check snapshot exists if not create it
err = vol.checkSnapExists(rbdSnap)
if errors.Is(err, ErrSnapNotFound) {
// create snapshot
sErr := vol.createSnapshot(ctx, rbdSnap)
if sErr != nil {
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
}
if err != nil {
return false, err
}
if vol.ImageID == "" {
sErr := vol.getImageID()
if sErr != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID, cr)
if sErr != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
}
if err != nil {
return false, err
}
util.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)",
rbdSnap.SnapID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return true, nil
}
/*
Check comment on checkSnapExists, to understand how this function behaves
**NOTE:** These functions manipulate the rados omaps that hold information
regarding volume names as requested by the CSI drivers. Hence, these need to be
invoked only when the respective CSI snapshot or volume name based locks are
held, as otherwise racy access to these omaps may end up leaving the omaps in
an inconsistent state.
parentVol is required to check the clone is created from the requested parent
image or not, if temporary snapshots and clones created for the volume when the
content source is volume we need to recover from the stale entries or complete
the pending operations.
*/
func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, error) {
err := validateRbdVol(rv)
if err != nil {
return false, err
}
kmsID := ""
if rv.Encrypted {
kmsID = rv.KMS.GetID()
}
j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds)
if err != nil {
return false, err
}
defer j.Destroy()
imageData, err := j.CheckReservation(
ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID)
if err != nil {
return false, err
}
if imageData == nil {
return false, nil
}
rv.ReservedID = imageData.ImageUUID
rv.RbdImageName = imageData.ImageAttributes.ImageName
rv.ImageID = imageData.ImageAttributes.ImageID
// check if topology constraints match what is found
rv.Topology, err = util.MatchTopologyForPool(rv.TopologyPools, rv.TopologyRequirement,
imageData.ImagePool)
if err != nil {
// TODO check if need any undo operation here, or ErrVolNameConflict
return false, err
}
// update Pool, if it was topology constrained
if rv.Topology != nil {
rv.Pool = imageData.ImagePool
}
// NOTE: Return volsize should be on-disk volsize, not request vol size, so
// save it for size checks before fetching image data
requestSize := rv.VolSize
// Fetch on-disk image attributes and compare against request
err = rv.getImageInfo()
if err != nil {
if errors.Is(err, ErrImageNotFound) {
// Need to check cloned info here not on createvolume,
if parentVol != nil {
found, cErr := rv.checkCloneImage(ctx, parentVol)
if found && cErr == nil {
return true, nil
}
if cErr != nil {
return false, cErr
}
}
err = j.UndoReservation(ctx, rv.JournalPool, rv.Pool,
rv.RbdImageName, rv.RequestName)
return false, err
}
return false, err
}
if rv.ImageID == "" {
err = rv.getImageID()
if err != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
return false, err
}
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID, rv.conn.Creds)
if err != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
return false, err
}
}
if err != nil {
util.ErrorLog(ctx, "failed to get stored image id: %v", err)
return false, err
}
// size checks
if rv.VolSize < requestSize {
return false, fmt.Errorf("%w: image with the same name (%s) but with different size already exists",
ErrVolNameConflict, rv.RbdImageName)
}
// TODO: We should also ensure image features and format is the same
// found a volume already available, process and return it!
rv.VolID, err = util.GenerateVolID(ctx, rv.Monitors, rv.conn.Creds, imageData.ImagePoolID, rv.Pool,
rv.ClusterID, rv.ReservedID, volIDVersion)
if err != nil {
return false, err
}
util.DebugLog(ctx, "found existing volume (%s) with image name (%s) for request (%s)",
rv.VolID, rv.RbdImageName, rv.RequestName)
return true, nil
}
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
// volume ID for the generated name.
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
var (
err error
)
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
if err != nil {
return err
}
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, "")
if err != nil {
return err
}
rbdSnap.SnapID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, imagePoolID, rbdSnap.Pool,
rbdSnap.ClusterID, rbdSnap.ReservedID, volIDVersion)
if err != nil {
return err
}
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdSnap.SnapID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return nil
}
func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
var err error
if rbdSnap != nil {
// check if topology constraints matches snapshot pool
rbdVol.Topology, err = util.MatchTopologyForPool(rbdVol.TopologyPools,
rbdVol.TopologyRequirement, rbdSnap.Pool)
if err != nil {
return err
}
// update Pool, if it was topology constrained
if rbdVol.Topology != nil {
rbdVol.Pool = rbdSnap.Pool
}
return nil
}
// update request based on topology constrained parameters (if present)
poolName, dataPoolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
if err != nil {
return err
}
if poolName != "" {
rbdVol.Pool = poolName
rbdVol.DataPool = dataPoolName
rbdVol.Topology = topology
}
return nil
}
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the
// volume ID for the generated name.
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
var (
err error
)
err = updateTopologyConstraints(rbdVol, rbdSnap)
if err != nil {
return err
}
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdVol.Monitors, rbdVol.JournalPool, rbdVol.Pool, cr)
if err != nil {
return err
}
kmsID := ""
if rbdVol.Encrypted {
kmsID = rbdVol.KMS.GetID()
}
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID)
if err != nil {
return err
}
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
if err != nil {
return err
}
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
return nil
}
// undoSnapReservation is a helper routine to undo a name reservation for rbdSnapshot.
func undoSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
err = j.UndoReservation(
ctx, rbdSnap.JournalPool, rbdSnap.Pool, rbdSnap.RbdSnapName,
rbdSnap.RequestName)
return err
}
// undoVolReservation is a helper routine to undo a name reservation for rbdVolume.
func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
err = j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool,
rbdVol.RbdImageName, rbdVol.RequestName)
return err
}
rbd: remove false error condition check in rbdVol.Exists()
Signed-off-by: Humble Chirammal <dc8ba7e82a7df22725dedec9b10c7aaab9ec713f@redhat.com>
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util"
)
func validateNonEmptyField(field, fieldName, structName string) error {
if field == "" {
return fmt.Errorf("value '%s' in '%s' structure cannot be empty", fieldName, structName)
}
return nil
}
func validateRbdSnap(rbdSnap *rbdSnapshot) error {
var err error
if err = validateNonEmptyField(rbdSnap.RequestName, "RequestName", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.Monitors, "Monitors", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.Pool, "Pool", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.RbdImageName, "RbdImageName", "rbdSnapshot"); err != nil {
return err
}
if err = validateNonEmptyField(rbdSnap.ClusterID, "ClusterID", "rbdSnapshot"); err != nil {
return err
}
return err
}
func validateRbdVol(rbdVol *rbdVolume) error {
var err error
if err = validateNonEmptyField(rbdVol.RequestName, "RequestName", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.Monitors, "Monitors", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.Pool, "Pool", "rbdVolume"); err != nil {
return err
}
if err = validateNonEmptyField(rbdVol.ClusterID, "ClusterID", "rbdVolume"); err != nil {
return err
}
if rbdVol.VolSize == 0 {
return errors.New("value 'VolSize' in 'rbdVolume' structure cannot be 0")
}
return err
}
/*
checkSnapCloneExists, and its counterpart checkVolExists, function checks if
the passed in rbdSnapshot or rbdVolume exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information
regarding volume names as requested by the CSI drivers. Hence, these need to be
invoked only when the respective CSI driver generated snapshot or volume name
based locks are held, as otherwise racy access to these omaps may end up
leaving them in an inconsistent state.
These functions need enough information about cluster and pool (ie, Monitors,
Pool, IDs filled in) to operate. They further require that the RequestName
element of the structure have a valid value to operate on and determine if the
said RequestName already exists on the backend.
These functions populate the snapshot or the image name, its attributes and the
CSI snapshot/volume ID for the same when successful.
These functions also cleanup omap reservations that are stale. I.e when omap
entries exist and backing images or snapshots are missing, or one of the omaps
exist and the next is missing. This is because, the order of omap creation and
deletion are inverse of each other, and protected by the request name lock, and
hence any stale omaps are leftovers from incomplete transactions and are hence
safe to garbage collect.
*/
func checkSnapCloneExists(ctx context.Context, parentVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) (bool, error) {
err := validateRbdSnap(rbdSnap)
if err != nil {
return false, err
}
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return false, err
}
defer j.Destroy()
snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "")
if err != nil {
return false, err
}
if snapData == nil {
return false, nil
}
snapUUID := snapData.ImageUUID
rbdSnap.RbdSnapName = snapData.ImageAttributes.ImageName
rbdSnap.ImageID = snapData.ImageAttributes.ImageID
// it should never happen that this disagrees, but check
if rbdSnap.Pool != snapData.ImagePool {
return false, fmt.Errorf("stored snapshot pool (%s) and expected snapshot pool (%s) mismatch",
snapData.ImagePool, rbdSnap.Pool)
}
vol := generateVolFromSnap(rbdSnap)
defer vol.Destroy()
err = vol.Connect(cr)
if err != nil {
return false, err
}
vol.ReservedID = snapUUID
// Fetch on-disk image attributes
err = vol.getImageInfo()
if err != nil {
if errors.Is(err, ErrImageNotFound) {
err = parentVol.deleteSnapshot(ctx, rbdSnap)
if err != nil {
if !errors.Is(err, ErrSnapNotFound) {
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err)
return false, err
}
}
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
}
return false, err
}
// Snapshot creation transaction is rolled forward if rbd clone image
// representing the snapshot is found. Any failures till finding the image
// causes a roll back of the snapshot creation transaction.
// Code from here on, rolls the transaction forward.
rbdSnap.CreatedAt = vol.CreatedAt
rbdSnap.SizeBytes = vol.VolSize
// found a snapshot already available, process and return its information
rbdSnap.SnapID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, snapData.ImagePoolID, rbdSnap.Pool,
rbdSnap.ClusterID, snapUUID, volIDVersion)
if err != nil {
return false, err
}
// check snapshot exists if not create it
err = vol.checkSnapExists(rbdSnap)
if errors.Is(err, ErrSnapNotFound) {
// create snapshot
sErr := vol.createSnapshot(ctx, rbdSnap)
if sErr != nil {
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
}
if err != nil {
return false, err
}
if vol.ImageID == "" {
sErr := vol.getImageID()
if sErr != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID, cr)
if sErr != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
return false, err
}
}
if err != nil {
return false, err
}
util.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)",
rbdSnap.SnapID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return true, nil
}
/*
Check comment on checkSnapExists, to understand how this function behaves
**NOTE:** These functions manipulate the rados omaps that hold information
regarding volume names as requested by the CSI drivers. Hence, these need to be
invoked only when the respective CSI snapshot or volume name based locks are
held, as otherwise racy access to these omaps may end up leaving the omaps in
an inconsistent state.
parentVol is required to check the clone is created from the requested parent
image or not, if temporary snapshots and clones created for the volume when the
content source is volume we need to recover from the stale entries or complete
the pending operations.
*/
func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, error) {
err := validateRbdVol(rv)
if err != nil {
return false, err
}
kmsID := ""
if rv.Encrypted {
kmsID = rv.KMS.GetID()
}
j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds)
if err != nil {
return false, err
}
defer j.Destroy()
imageData, err := j.CheckReservation(
ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID)
if err != nil {
return false, err
}
if imageData == nil {
return false, nil
}
rv.ReservedID = imageData.ImageUUID
rv.RbdImageName = imageData.ImageAttributes.ImageName
rv.ImageID = imageData.ImageAttributes.ImageID
// check if topology constraints match what is found
rv.Topology, err = util.MatchTopologyForPool(rv.TopologyPools, rv.TopologyRequirement,
imageData.ImagePool)
if err != nil {
// TODO check if need any undo operation here, or ErrVolNameConflict
return false, err
}
// update Pool, if it was topology constrained
if rv.Topology != nil {
rv.Pool = imageData.ImagePool
}
// NOTE: Return volsize should be on-disk volsize, not request vol size, so
// save it for size checks before fetching image data
requestSize := rv.VolSize
// Fetch on-disk image attributes and compare against request
err = rv.getImageInfo()
if err != nil {
if errors.Is(err, ErrImageNotFound) {
// Need to check cloned info here not on createvolume,
if parentVol != nil {
found, cErr := rv.checkCloneImage(ctx, parentVol)
if found && cErr == nil {
return true, nil
}
if cErr != nil {
return false, cErr
}
}
err = j.UndoReservation(ctx, rv.JournalPool, rv.Pool,
rv.RbdImageName, rv.RequestName)
return false, err
}
return false, err
}
if rv.ImageID == "" {
err = rv.getImageID()
if err != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
return false, err
}
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID, rv.conn.Creds)
if err != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
return false, err
}
}
// size checks
if rv.VolSize < requestSize {
return false, fmt.Errorf("%w: image with the same name (%s) but with different size already exists",
ErrVolNameConflict, rv.RbdImageName)
}
// TODO: We should also ensure image features and format is the same
// found a volume already available, process and return it!
rv.VolID, err = util.GenerateVolID(ctx, rv.Monitors, rv.conn.Creds, imageData.ImagePoolID, rv.Pool,
rv.ClusterID, rv.ReservedID, volIDVersion)
if err != nil {
return false, err
}
util.DebugLog(ctx, "found existing volume (%s) with image name (%s) for request (%s)",
rv.VolID, rv.RbdImageName, rv.RequestName)
return true, nil
}
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
// volume ID for the generated name.
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
var (
err error
)
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
if err != nil {
return err
}
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, "")
if err != nil {
return err
}
rbdSnap.SnapID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, imagePoolID, rbdSnap.Pool,
rbdSnap.ClusterID, rbdSnap.ReservedID, volIDVersion)
if err != nil {
return err
}
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdSnap.SnapID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return nil
}
func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
var err error
if rbdSnap != nil {
// check if topology constraints matches snapshot pool
rbdVol.Topology, err = util.MatchTopologyForPool(rbdVol.TopologyPools,
rbdVol.TopologyRequirement, rbdSnap.Pool)
if err != nil {
return err
}
// update Pool, if it was topology constrained
if rbdVol.Topology != nil {
rbdVol.Pool = rbdSnap.Pool
}
return nil
}
// update request based on topology constrained parameters (if present)
poolName, dataPoolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
if err != nil {
return err
}
if poolName != "" {
rbdVol.Pool = poolName
rbdVol.DataPool = dataPoolName
rbdVol.Topology = topology
}
return nil
}
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the
// volume ID for the generated name.
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
var (
err error
)
err = updateTopologyConstraints(rbdVol, rbdSnap)
if err != nil {
return err
}
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdVol.Monitors, rbdVol.JournalPool, rbdVol.Pool, cr)
if err != nil {
return err
}
kmsID := ""
if rbdVol.Encrypted {
kmsID = rbdVol.KMS.GetID()
}
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID)
if err != nil {
return err
}
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
if err != nil {
return err
}
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
return nil
}
// undoSnapReservation is a helper routine to undo a name reservation for rbdSnapshot.
func undoSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
err = j.UndoReservation(
ctx, rbdSnap.JournalPool, rbdSnap.Pool, rbdSnap.RbdSnapName,
rbdSnap.RequestName)
return err
}
// undoVolReservation is a helper routine to undo a name reservation for rbdVolume.
func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil {
return err
}
defer j.Destroy()
err = j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool,
rbdVol.RbdImageName, rbdVol.RequestName)
return err
}
|
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package opa
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
wasm "github.com/wasmerio/go-ext-wasm/wasmer"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/topdown"
"github.com/open-policy-agent/opa/topdown/builtins"
)
type vm struct {
instance *wasm.Instance // Pointer to avoid unintented destruction (triggering finalizers within).
policy []byte
data []byte
memory *wasm.Memory
memoryMin uint32
memoryMax uint32
bctx *topdown.BuiltinContext
builtins map[int32]topdown.BuiltinFunc
builtinResult *ast.Term
entrypointIDs map[string]EntrypointID
baseHeapPtr int32
dataAddr int32
evalHeapPtr int32
eval func(...interface{}) (wasm.Value, error)
evalCtxGetResult func(...interface{}) (wasm.Value, error)
evalCtxNew func(...interface{}) (wasm.Value, error)
evalCtxSetData func(...interface{}) (wasm.Value, error)
evalCtxSetInput func(...interface{}) (wasm.Value, error)
evalCtxSetEntrypoint func(...interface{}) (wasm.Value, error)
heapPtrGet func(...interface{}) (wasm.Value, error)
heapPtrSet func(...interface{}) (wasm.Value, error)
heapTopGet func(...interface{}) (wasm.Value, error)
heapTopSet func(...interface{}) (wasm.Value, error)
jsonDump func(...interface{}) (wasm.Value, error)
jsonParse func(...interface{}) (wasm.Value, error)
valueDump func(...interface{}) (wasm.Value, error)
valueParse func(...interface{}) (wasm.Value, error)
malloc func(...interface{}) (wasm.Value, error)
free func(...interface{}) (wasm.Value, error)
valueAddPath func(...interface{}) (wasm.Value, error)
valueRemovePath func(...interface{}) (wasm.Value, error)
}
type vmOpts struct {
policy []byte
data []byte
parsedData []byte
parsedDataAddr int32
memoryMin uint32
memoryMax uint32
}
func newVM(opts vmOpts) (*vm, error) {
memory, err := wasm.NewMemory(opts.memoryMin, opts.memoryMax)
if err != nil {
return nil, err
}
imports, err := opaFunctions(wasm.NewImports())
if err != nil {
return nil, err
}
imports, err = imports.AppendMemory("memory", memory)
if err != nil {
panic(err)
}
i, err := wasm.NewInstanceWithImports(opts.policy, imports)
if err != nil {
return nil, err
}
v := &vm{
instance: &i,
policy: opts.policy,
memory: memory,
memoryMin: opts.memoryMin,
memoryMax: opts.memoryMax,
builtins: make(map[int32]topdown.BuiltinFunc),
entrypointIDs: make(map[string]EntrypointID),
dataAddr: 0,
eval: i.Exports["eval"],
evalCtxGetResult: i.Exports["opa_eval_ctx_get_result"],
evalCtxNew: i.Exports["opa_eval_ctx_new"],
evalCtxSetData: i.Exports["opa_eval_ctx_set_data"],
evalCtxSetInput: i.Exports["opa_eval_ctx_set_input"],
evalCtxSetEntrypoint: i.Exports["opa_eval_ctx_set_entrypoint"],
free: i.Exports["opa_free"],
heapPtrGet: i.Exports["opa_heap_ptr_get"],
heapPtrSet: i.Exports["opa_heap_ptr_set"],
heapTopGet: i.Exports["opa_heap_top_get"],
heapTopSet: i.Exports["opa_heap_top_set"],
jsonDump: i.Exports["opa_json_dump"],
jsonParse: i.Exports["opa_json_parse"],
valueDump: i.Exports["opa_value_dump"],
valueParse: i.Exports["opa_value_parse"],
malloc: i.Exports["opa_malloc"],
valueAddPath: i.Exports["opa_value_add_path"],
valueRemovePath: i.Exports["opa_value_remove_path"],
}
// Initialize the heap.
if _, err := v.malloc(0); err != nil {
return nil, err
}
if v.baseHeapPtr, err = v.getHeapState(); err != nil {
return nil, err
}
// Optimization for cloning a vm, if provided a parsed data memory buffer
// insert it directly into the new vm's buffer and set pointers accordingly.
// This only works because the placement is deterministic (eg, for a given policy
// the base heap pointer and parsed data layout will always be the same).
if opts.parsedData != nil {
if memory.Length()-uint32(v.baseHeapPtr) < uint32(len(opts.parsedData)) {
delta := uint32(len(opts.parsedData)) - (memory.Length() - uint32(v.baseHeapPtr))
err := memory.Grow(pages(delta))
if err != nil {
return nil, err
}
}
mem := memory.Data()
for src, dest := 0, v.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
mem[dest] = opts.parsedData[src]
}
v.dataAddr = opts.parsedDataAddr
v.evalHeapPtr = v.baseHeapPtr + int32(len(opts.parsedData))
err := v.setHeapState(v.evalHeapPtr)
if err != nil {
return nil, err
}
} else if opts.data != nil {
if v.dataAddr, err = v.toRegoJSON(opts.data, true); err != nil {
return nil, err
}
}
if v.evalHeapPtr, err = v.getHeapState(); err != nil {
return nil, err
}
// For the opa builtin functions to access the instance.
i.SetContextData(v)
// Construct the builtin id to name mappings.
val, err := i.Exports["builtins"]()
if err != nil {
return nil, err
}
builtins, err := v.fromRegoJSON(val.ToI32(), true)
if err != nil {
return nil, err
}
for name, id := range builtins.(map[string]interface{}) {
f := topdown.GetBuiltin(name)
if f == nil {
return nil, fmt.Errorf("builtin '%s' not found", name)
}
n, err := id.(json.Number).Int64()
if err != nil {
panic(err)
}
v.builtins[int32(n)] = f
}
// Extract the entrypoint ID's
val, err = i.Exports["entrypoints"]()
if err != nil {
return nil, err
}
epMap, err := v.fromRegoJSON(val.ToI32(), true)
if err != nil {
return nil, err
}
for ep, value := range epMap.(map[string]interface{}) {
id, err := value.(json.Number).Int64()
if err != nil {
return nil, err
}
v.entrypointIDs[ep] = EntrypointID(id)
}
return v, nil
}
// Eval performs an evaluation of the specified entrypoint, with any provided
// input, and returns the resulting value dumped to a string.
func (i *vm) Eval(ctx context.Context, entrypoint EntrypointID, input *interface{}, metrics metrics.Metrics) ([]byte, error) {
metrics.Timer("wasm_vm_eval").Start()
defer metrics.Timer("wasm_vm_eval").Stop()
metrics.Timer("wasm_vm_eval_prepare_input").Start()
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return nil, err
}
defer func() {
i.bctx = nil
}()
// Parse the input JSON and activate it with the data.
addr, err := i.evalCtxNew()
if err != nil {
return nil, err
}
ctxAddr := addr.ToI32()
if i.dataAddr != 0 {
if _, err := i.evalCtxSetData(ctxAddr, i.dataAddr); err != nil {
return nil, err
}
}
_, err = i.evalCtxSetEntrypoint(ctxAddr, int32(entrypoint))
if err != nil {
return nil, err
}
if input != nil {
inputAddr, err := i.toRegoJSON(*input, false)
if err != nil {
return nil, err
}
if _, err := i.evalCtxSetInput(ctxAddr, inputAddr); err != nil {
return nil, err
}
}
metrics.Timer("wasm_vm_eval_prepare_input").Stop()
// Evaluate the policy.
metrics.Timer("wasm_vm_eval_execute").Start()
func() {
defer func() {
if e := recover(); e != nil {
switch e := e.(type) {
case abortError:
err = errors.New(e.message)
case builtinError:
err = e.err
default:
panic(e)
}
}
}()
_, err = i.eval(ctxAddr)
}()
metrics.Timer("wasm_vm_eval_execute").Stop()
if err != nil {
return nil, err
}
metrics.Timer("wasm_vm_eval_prepare_result").Start()
resultAddr, err := i.evalCtxGetResult(ctxAddr)
if err != nil {
return nil, err
}
serialized, err := i.valueDump(resultAddr)
if err != nil {
return nil, err
}
data := i.memory.Data()[serialized.ToI32():]
n := bytes.IndexByte(data, 0)
if n < 0 {
n = 0
}
metrics.Timer("wasm_vm_eval_prepare_result").Stop()
// Skip free'ing input and result JSON as the heap will be reset next round anyway.
return data[0:n], err
}
func (i *vm) SetPolicyData(opts vmOpts) error {
if !bytes.Equal(opts.policy, i.policy) {
// Swap the instance to a new one, with new policy.
n, err := newVM(opts)
if err != nil {
return err
}
i.Close()
*i = *n
return nil
}
i.dataAddr = 0
var err error
if err = i.setHeapState(i.baseHeapPtr); err != nil {
return err
}
if opts.parsedData != nil {
if i.memory.Length()-uint32(i.baseHeapPtr) < uint32(len(opts.parsedData)) {
delta := uint32(len(opts.parsedData)) - (i.memory.Length() - uint32(i.baseHeapPtr))
err := i.memory.Grow(pages(delta))
if err != nil {
return err
}
}
mem := i.memory.Data()
for src, dest := 0, i.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
mem[dest] = opts.parsedData[src]
}
i.dataAddr = opts.parsedDataAddr
i.evalHeapPtr = i.baseHeapPtr + int32(len(opts.parsedData))
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return err
}
} else if opts.data != nil {
if i.dataAddr, err = i.toRegoJSON(opts.data, true); err != nil {
return err
}
}
if i.evalHeapPtr, err = i.getHeapState(); err != nil {
return err
}
return nil
}
func (i *vm) Close() {
i.memory.Close()
i.instance.Close()
}
type abortError struct {
message string
}
// Abort is invoked by the policy if an internal error occurs during
// the policy execution.
func (i *vm) Abort(arg int32) {
data := i.memory.Data()[arg:]
n := bytes.IndexByte(data, 0)
if n == -1 {
panic("invalid abort argument")
}
panic(abortError{message: string(data[0:n])})
}
type builtinError struct {
err error
}
// Builtin executes a builtin for the policy.
func (i *vm) Builtin(builtinID, ctx int32, args ...int32) int32 {
// TODO: Returning proper errors instead of panicing.
// TODO: To avoid growing the heap with every built-in call, recycle the JSON buffers since the free implementation is no-op.
convertedArgs := make([]*ast.Term, len(args))
for j, arg := range args {
x, err := i.fromRegoJSON(arg, true)
if err != nil {
panic(builtinError{err: err})
}
y, err := ast.InterfaceToValue(x)
if err != nil {
panic(builtinError{err: err})
}
convertedArgs[j] = ast.NewTerm(y)
}
if i.bctx == nil {
i.bctx = &topdown.BuiltinContext{
Context: context.Background(),
Cancel: nil,
Runtime: nil,
Time: ast.NumberTerm(json.Number(strconv.FormatInt(time.Now().UnixNano(), 10))),
Metrics: metrics.New(),
Cache: make(builtins.Cache),
Location: nil,
Tracers: nil,
QueryID: 0,
ParentID: 0,
}
}
err := i.builtins[builtinID](*i.bctx, convertedArgs, i.iter)
if err != nil {
panic(builtinError{err: err})
}
result, err := ast.JSON(i.builtinResult.Value)
if err != nil {
panic(builtinError{err: err})
}
addr, err := i.toRegoJSON(result, true)
if err != nil {
panic(builtinError{err: err})
}
return addr
}
// Entrypoints returns a mapping of entrypoint name to ID for use by Eval().
func (i *vm) Entrypoints() map[string]EntrypointID {
return i.entrypointIDs
}
func (i *vm) SetDataPath(path []string, value interface{}) error {
// Reset the heap ptr before patching the vm to try and keep any
// new allocations safe from subsequent heap resets on eval.
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return err
}
valueAddr, err := i.toRegoJSON(value, true)
if err != nil {
return err
}
pathAddr, err := i.toRegoJSON(path, true)
if err != nil {
return err
}
result, err := i.valueAddPath(i.dataAddr, pathAddr, valueAddr)
if err != nil {
return err
}
// We don't need to free the value, assume it is "owned" as part of the
// overall data object now.
// We do need to free the path
_, err = i.free(pathAddr)
if err != nil {
return err
}
// Update the eval heap pointer to accommodate for any new allocations done
// while patching.
i.evalHeapPtr, err = i.getHeapState()
if err != nil {
return err
}
errc := result.ToI32()
if errc != 0 {
return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
}
return nil
}
func (i *vm) RemoveDataPath(path []string) error {
pathAddr, err := i.toRegoJSON(path, true)
if err != nil {
return err
}
result, err := i.valueRemovePath(i.dataAddr, pathAddr)
if err != nil {
return err
}
errc := result.ToI32()
if errc != 0 {
return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
}
return nil
}
func (i *vm) iter(result *ast.Term) error {
i.builtinResult = result
return nil
}
// fromRegoJSON converts Rego JSON to go native JSON.
func (i *vm) fromRegoJSON(addr int32, free bool) (interface{}, error) {
serialized, err := i.jsonDump(addr)
if err != nil {
return nil, err
}
data := i.memory.Data()[serialized.ToI32():]
n := bytes.IndexByte(data, 0)
if n < 0 {
n = 0
}
// Parse the result into go types.
decoder := json.NewDecoder(bytes.NewReader(data[0:n]))
decoder.UseNumber()
var result interface{}
if err := decoder.Decode(&result); err != nil {
return nil, err
}
if free {
if _, err := i.free(serialized.ToI32()); err != nil {
return nil, err
}
}
return result, nil
}
// toRegoJSON converts go native JSON to Rego JSON.
func (i *vm) toRegoJSON(v interface{}, free bool) (int32, error) {
raw, ok := v.([]byte)
if !ok {
var err error
raw, err = json.Marshal(v)
if err != nil {
return 0, err
}
}
n := int32(len(raw))
pos, err := i.malloc(n)
if err != nil {
return 0, err
}
p := pos.ToI32()
copy(i.memory.Data()[p:p+n], raw)
addr, err := i.valueParse(p, n)
if err != nil {
return 0, err
}
if free {
if _, err := i.free(p); err != nil {
return 0, err
}
}
return addr.ToI32(), nil
}
func (i *vm) getHeapState() (int32, error) {
ptr, err := i.heapPtrGet()
if err != nil {
return 0, err
}
return ptr.ToI32(), nil
}
func (i *vm) setHeapState(ptr int32) error {
_, err := i.heapPtrSet(ptr)
return err
}
func (i *vm) cloneDataSegment() (int32, []byte) {
// The parsed data values sit between the base heap address and end
// at the eval heap pointer address.
srcData := i.memory.Data()[i.baseHeapPtr:i.evalHeapPtr]
patchedData := make([]byte, len(srcData))
copy(patchedData, srcData)
return i.dataAddr, patchedData
}
internal/wasm/sdk: Stringify ast values instead of JSON marshal
This allows for preserving set values which would otherwise be
converted into arrays from the json marshal.
Signed-off-by: Patrick East <1a015b2c287ee8e96f2ec59ce4953b89b37de4a8@gmail.com>
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package opa
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
wasm "github.com/wasmerio/go-ext-wasm/wasmer"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/topdown"
"github.com/open-policy-agent/opa/topdown/builtins"
)
type vm struct {
instance *wasm.Instance // Pointer to avoid unintented destruction (triggering finalizers within).
policy []byte
data []byte
memory *wasm.Memory
memoryMin uint32
memoryMax uint32
bctx *topdown.BuiltinContext
builtins map[int32]topdown.BuiltinFunc
builtinResult *ast.Term
entrypointIDs map[string]EntrypointID
baseHeapPtr int32
dataAddr int32
evalHeapPtr int32
eval func(...interface{}) (wasm.Value, error)
evalCtxGetResult func(...interface{}) (wasm.Value, error)
evalCtxNew func(...interface{}) (wasm.Value, error)
evalCtxSetData func(...interface{}) (wasm.Value, error)
evalCtxSetInput func(...interface{}) (wasm.Value, error)
evalCtxSetEntrypoint func(...interface{}) (wasm.Value, error)
heapPtrGet func(...interface{}) (wasm.Value, error)
heapPtrSet func(...interface{}) (wasm.Value, error)
heapTopGet func(...interface{}) (wasm.Value, error)
heapTopSet func(...interface{}) (wasm.Value, error)
jsonDump func(...interface{}) (wasm.Value, error)
jsonParse func(...interface{}) (wasm.Value, error)
valueDump func(...interface{}) (wasm.Value, error)
valueParse func(...interface{}) (wasm.Value, error)
malloc func(...interface{}) (wasm.Value, error)
free func(...interface{}) (wasm.Value, error)
valueAddPath func(...interface{}) (wasm.Value, error)
valueRemovePath func(...interface{}) (wasm.Value, error)
}
type vmOpts struct {
policy []byte
data []byte
parsedData []byte
parsedDataAddr int32
memoryMin uint32
memoryMax uint32
}
func newVM(opts vmOpts) (*vm, error) {
memory, err := wasm.NewMemory(opts.memoryMin, opts.memoryMax)
if err != nil {
return nil, err
}
imports, err := opaFunctions(wasm.NewImports())
if err != nil {
return nil, err
}
imports, err = imports.AppendMemory("memory", memory)
if err != nil {
panic(err)
}
i, err := wasm.NewInstanceWithImports(opts.policy, imports)
if err != nil {
return nil, err
}
v := &vm{
instance: &i,
policy: opts.policy,
memory: memory,
memoryMin: opts.memoryMin,
memoryMax: opts.memoryMax,
builtins: make(map[int32]topdown.BuiltinFunc),
entrypointIDs: make(map[string]EntrypointID),
dataAddr: 0,
eval: i.Exports["eval"],
evalCtxGetResult: i.Exports["opa_eval_ctx_get_result"],
evalCtxNew: i.Exports["opa_eval_ctx_new"],
evalCtxSetData: i.Exports["opa_eval_ctx_set_data"],
evalCtxSetInput: i.Exports["opa_eval_ctx_set_input"],
evalCtxSetEntrypoint: i.Exports["opa_eval_ctx_set_entrypoint"],
free: i.Exports["opa_free"],
heapPtrGet: i.Exports["opa_heap_ptr_get"],
heapPtrSet: i.Exports["opa_heap_ptr_set"],
heapTopGet: i.Exports["opa_heap_top_get"],
heapTopSet: i.Exports["opa_heap_top_set"],
jsonDump: i.Exports["opa_json_dump"],
jsonParse: i.Exports["opa_json_parse"],
valueDump: i.Exports["opa_value_dump"],
valueParse: i.Exports["opa_value_parse"],
malloc: i.Exports["opa_malloc"],
valueAddPath: i.Exports["opa_value_add_path"],
valueRemovePath: i.Exports["opa_value_remove_path"],
}
// Initialize the heap.
if _, err := v.malloc(0); err != nil {
return nil, err
}
if v.baseHeapPtr, err = v.getHeapState(); err != nil {
return nil, err
}
// Optimization for cloning a vm, if provided a parsed data memory buffer
// insert it directly into the new vm's buffer and set pointers accordingly.
// This only works because the placement is deterministic (eg, for a given policy
// the base heap pointer and parsed data layout will always be the same).
if opts.parsedData != nil {
if memory.Length()-uint32(v.baseHeapPtr) < uint32(len(opts.parsedData)) {
delta := uint32(len(opts.parsedData)) - (memory.Length() - uint32(v.baseHeapPtr))
err := memory.Grow(pages(delta))
if err != nil {
return nil, err
}
}
mem := memory.Data()
for src, dest := 0, v.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
mem[dest] = opts.parsedData[src]
}
v.dataAddr = opts.parsedDataAddr
v.evalHeapPtr = v.baseHeapPtr + int32(len(opts.parsedData))
err := v.setHeapState(v.evalHeapPtr)
if err != nil {
return nil, err
}
} else if opts.data != nil {
if v.dataAddr, err = v.toRegoJSON(opts.data, true); err != nil {
return nil, err
}
}
if v.evalHeapPtr, err = v.getHeapState(); err != nil {
return nil, err
}
// For the opa builtin functions to access the instance.
i.SetContextData(v)
// Construct the builtin id to name mappings.
val, err := i.Exports["builtins"]()
if err != nil {
return nil, err
}
builtins, err := v.fromRegoJSON(val.ToI32(), true)
if err != nil {
return nil, err
}
for name, id := range builtins.(map[string]interface{}) {
f := topdown.GetBuiltin(name)
if f == nil {
return nil, fmt.Errorf("builtin '%s' not found", name)
}
n, err := id.(json.Number).Int64()
if err != nil {
panic(err)
}
v.builtins[int32(n)] = f
}
// Extract the entrypoint ID's
val, err = i.Exports["entrypoints"]()
if err != nil {
return nil, err
}
epMap, err := v.fromRegoJSON(val.ToI32(), true)
if err != nil {
return nil, err
}
for ep, value := range epMap.(map[string]interface{}) {
id, err := value.(json.Number).Int64()
if err != nil {
return nil, err
}
v.entrypointIDs[ep] = EntrypointID(id)
}
return v, nil
}
// Eval performs an evaluation of the specified entrypoint, with any provided
// input, and returns the resulting value dumped to a string.
func (i *vm) Eval(ctx context.Context, entrypoint EntrypointID, input *interface{}, metrics metrics.Metrics) ([]byte, error) {
metrics.Timer("wasm_vm_eval").Start()
defer metrics.Timer("wasm_vm_eval").Stop()
metrics.Timer("wasm_vm_eval_prepare_input").Start()
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return nil, err
}
defer func() {
i.bctx = nil
}()
// Parse the input JSON and activate it with the data.
addr, err := i.evalCtxNew()
if err != nil {
return nil, err
}
ctxAddr := addr.ToI32()
if i.dataAddr != 0 {
if _, err := i.evalCtxSetData(ctxAddr, i.dataAddr); err != nil {
return nil, err
}
}
_, err = i.evalCtxSetEntrypoint(ctxAddr, int32(entrypoint))
if err != nil {
return nil, err
}
if input != nil {
inputAddr, err := i.toRegoJSON(*input, false)
if err != nil {
return nil, err
}
if _, err := i.evalCtxSetInput(ctxAddr, inputAddr); err != nil {
return nil, err
}
}
metrics.Timer("wasm_vm_eval_prepare_input").Stop()
// Evaluate the policy.
metrics.Timer("wasm_vm_eval_execute").Start()
func() {
defer func() {
if e := recover(); e != nil {
switch e := e.(type) {
case abortError:
err = errors.New(e.message)
case builtinError:
err = e.err
default:
panic(e)
}
}
}()
_, err = i.eval(ctxAddr)
}()
metrics.Timer("wasm_vm_eval_execute").Stop()
if err != nil {
return nil, err
}
metrics.Timer("wasm_vm_eval_prepare_result").Start()
resultAddr, err := i.evalCtxGetResult(ctxAddr)
if err != nil {
return nil, err
}
serialized, err := i.valueDump(resultAddr)
if err != nil {
return nil, err
}
data := i.memory.Data()[serialized.ToI32():]
n := bytes.IndexByte(data, 0)
if n < 0 {
n = 0
}
metrics.Timer("wasm_vm_eval_prepare_result").Stop()
// Skip free'ing input and result JSON as the heap will be reset next round anyway.
return data[0:n], err
}
func (i *vm) SetPolicyData(opts vmOpts) error {
if !bytes.Equal(opts.policy, i.policy) {
// Swap the instance to a new one, with new policy.
n, err := newVM(opts)
if err != nil {
return err
}
i.Close()
*i = *n
return nil
}
i.dataAddr = 0
var err error
if err = i.setHeapState(i.baseHeapPtr); err != nil {
return err
}
if opts.parsedData != nil {
if i.memory.Length()-uint32(i.baseHeapPtr) < uint32(len(opts.parsedData)) {
delta := uint32(len(opts.parsedData)) - (i.memory.Length() - uint32(i.baseHeapPtr))
err := i.memory.Grow(pages(delta))
if err != nil {
return err
}
}
mem := i.memory.Data()
for src, dest := 0, i.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
mem[dest] = opts.parsedData[src]
}
i.dataAddr = opts.parsedDataAddr
i.evalHeapPtr = i.baseHeapPtr + int32(len(opts.parsedData))
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return err
}
} else if opts.data != nil {
if i.dataAddr, err = i.toRegoJSON(opts.data, true); err != nil {
return err
}
}
if i.evalHeapPtr, err = i.getHeapState(); err != nil {
return err
}
return nil
}
func (i *vm) Close() {
i.memory.Close()
i.instance.Close()
}
type abortError struct {
message string
}
// Abort is invoked by the policy if an internal error occurs during
// the policy execution.
func (i *vm) Abort(arg int32) {
data := i.memory.Data()[arg:]
n := bytes.IndexByte(data, 0)
if n == -1 {
panic("invalid abort argument")
}
panic(abortError{message: string(data[0:n])})
}
type builtinError struct {
err error
}
// Builtin executes a builtin for the policy.
func (i *vm) Builtin(builtinID, ctx int32, args ...int32) int32 {
// TODO: Returning proper errors instead of panicing.
// TODO: To avoid growing the heap with every built-in call, recycle the JSON buffers since the free implementation is no-op.
convertedArgs := make([]*ast.Term, len(args))
for j, arg := range args {
x, err := i.fromRegoJSON(arg, true)
if err != nil {
panic(builtinError{err: err})
}
y, err := ast.InterfaceToValue(x)
if err != nil {
panic(builtinError{err: err})
}
convertedArgs[j] = ast.NewTerm(y)
}
if i.bctx == nil {
i.bctx = &topdown.BuiltinContext{
Context: context.Background(),
Cancel: nil,
Runtime: nil,
Time: ast.NumberTerm(json.Number(strconv.FormatInt(time.Now().UnixNano(), 10))),
Metrics: metrics.New(),
Cache: make(builtins.Cache),
Location: nil,
Tracers: nil,
QueryID: 0,
ParentID: 0,
}
}
err := i.builtins[builtinID](*i.bctx, convertedArgs, i.iter)
if err != nil {
panic(builtinError{err: err})
}
result, err := ast.JSON(i.builtinResult.Value)
if err != nil {
panic(builtinError{err: err})
}
addr, err := i.toRegoJSON(result, true)
if err != nil {
panic(builtinError{err: err})
}
return addr
}
// Entrypoints returns a mapping of entrypoint name to ID for use by Eval().
func (i *vm) Entrypoints() map[string]EntrypointID {
return i.entrypointIDs
}
func (i *vm) SetDataPath(path []string, value interface{}) error {
// Reset the heap ptr before patching the vm to try and keep any
// new allocations safe from subsequent heap resets on eval.
err := i.setHeapState(i.evalHeapPtr)
if err != nil {
return err
}
valueAddr, err := i.toRegoJSON(value, true)
if err != nil {
return err
}
pathAddr, err := i.toRegoJSON(path, true)
if err != nil {
return err
}
result, err := i.valueAddPath(i.dataAddr, pathAddr, valueAddr)
if err != nil {
return err
}
// We don't need to free the value, assume it is "owned" as part of the
// overall data object now.
// We do need to free the path
_, err = i.free(pathAddr)
if err != nil {
return err
}
// Update the eval heap pointer to accommodate for any new allocations done
// while patching.
i.evalHeapPtr, err = i.getHeapState()
if err != nil {
return err
}
errc := result.ToI32()
if errc != 0 {
return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
}
return nil
}
func (i *vm) RemoveDataPath(path []string) error {
pathAddr, err := i.toRegoJSON(path, true)
if err != nil {
return err
}
result, err := i.valueRemovePath(i.dataAddr, pathAddr)
if err != nil {
return err
}
errc := result.ToI32()
if errc != 0 {
return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
}
return nil
}
func (i *vm) iter(result *ast.Term) error {
i.builtinResult = result
return nil
}
// fromRegoJSON converts Rego JSON to go native JSON.
func (i *vm) fromRegoJSON(addr int32, free bool) (interface{}, error) {
serialized, err := i.jsonDump(addr)
if err != nil {
return nil, err
}
data := i.memory.Data()[serialized.ToI32():]
n := bytes.IndexByte(data, 0)
if n < 0 {
n = 0
}
// Parse the result into go types.
decoder := json.NewDecoder(bytes.NewReader(data[0:n]))
decoder.UseNumber()
var result interface{}
if err := decoder.Decode(&result); err != nil {
return nil, err
}
if free {
if _, err := i.free(serialized.ToI32()); err != nil {
return nil, err
}
}
return result, nil
}
// toRegoJSON converts go native JSON to Rego JSON.
func (i *vm) toRegoJSON(v interface{}, free bool) (int32, error) {
var raw []byte
switch v := v.(type) {
case []byte:
raw = v
case ast.Value:
raw = []byte(v.String())
default:
var err error
raw, err = json.Marshal(v)
if err != nil {
return 0, err
}
}
n := int32(len(raw))
pos, err := i.malloc(n)
if err != nil {
return 0, err
}
p := pos.ToI32()
copy(i.memory.Data()[p:p+n], raw)
addr, err := i.valueParse(p, n)
if err != nil {
return 0, err
}
if free {
if _, err := i.free(p); err != nil {
return 0, err
}
}
return addr.ToI32(), nil
}
func (i *vm) getHeapState() (int32, error) {
ptr, err := i.heapPtrGet()
if err != nil {
return 0, err
}
return ptr.ToI32(), nil
}
func (i *vm) setHeapState(ptr int32) error {
_, err := i.heapPtrSet(ptr)
return err
}
func (i *vm) cloneDataSegment() (int32, []byte) {
// The parsed data values sit between the base heap address and end
// at the eval heap pointer address.
srcData := i.memory.Data()[i.baseHeapPtr:i.evalHeapPtr]
patchedData := make([]byte, len(srcData))
copy(patchedData, srcData)
return i.dataAddr, patchedData
}
|
package invoices
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/queue"
"github.com/lightningnetwork/lnd/record"
)
var (
// ErrInvoiceExpiryTooSoon is returned when an invoice is attempted to be
// accepted or settled with not enough blocks remaining.
ErrInvoiceExpiryTooSoon = errors.New("invoice expiry too soon")
// ErrInvoiceAmountTooLow is returned when an invoice is attempted to be
// accepted or settled with an amount that is too low.
ErrInvoiceAmountTooLow = errors.New("paid amount less than invoice amount")
// ErrShuttingDown is returned when an operation failed because the
// invoice registry is shutting down.
ErrShuttingDown = errors.New("invoice registry shutting down")
)
const (
// DefaultHtlcHoldDuration defines the default for how long mpp htlcs
// are held while waiting for the other set members to arrive.
DefaultHtlcHoldDuration = 120 * time.Second
)
// RegistryConfig contains the configuration parameters for invoice registry.
type RegistryConfig struct {
// FinalCltvRejectDelta defines the number of blocks before the expiry
// of the htlc where we no longer settle it as an exit hop and instead
// cancel it back. Normally this value should be lower than the cltv
// expiry of any invoice we create and the code effectuating this should
// not be hit.
FinalCltvRejectDelta int32
// HtlcHoldDuration defines for how long mpp htlcs are held while
// waiting for the other set members to arrive.
HtlcHoldDuration time.Duration
// Clock holds the clock implementation that is used to provide
// Now() and TickAfter() and is useful to stub out the clock functions
// during testing.
Clock clock.Clock
// AcceptKeySend indicates whether we want to accept spontaneous key
// send payments.
AcceptKeySend bool
// AcceptAMP indicates whether we want to accept spontaneous AMP
// payments.
AcceptAMP bool
// GcCanceledInvoicesOnStartup if set, we'll attempt to garbage collect
// all canceled invoices upon start.
GcCanceledInvoicesOnStartup bool
// GcCanceledInvoicesOnTheFly if set, we'll garbage collect all newly
// canceled invoices on the fly.
GcCanceledInvoicesOnTheFly bool
// KeysendHoldTime indicates for how long we want to accept and hold
// spontaneous keysend payments.
KeysendHoldTime time.Duration
}
// htlcReleaseEvent describes an htlc auto-release event. It is used to release
// mpp htlcs for which the complete set didn't arrive in time.
type htlcReleaseEvent struct {
// invoiceRef identifiers the invoice this htlc belongs to.
invoiceRef channeldb.InvoiceRef
// key is the circuit key of the htlc to release.
key channeldb.CircuitKey
// releaseTime is the time at which to release the htlc.
releaseTime time.Time
}
// Less is used to order PriorityQueueItem's by their release time such that
// items with the older release time are at the top of the queue.
//
// NOTE: Part of the queue.PriorityQueueItem interface.
func (r *htlcReleaseEvent) Less(other queue.PriorityQueueItem) bool {
return r.releaseTime.Before(other.(*htlcReleaseEvent).releaseTime)
}
// InvoiceRegistry is a central registry of all the outstanding invoices
// created by the daemon. The registry is a thin wrapper around a map in order
// to ensure that all updates/reads are thread safe.
type InvoiceRegistry struct {
sync.RWMutex
cdb *channeldb.DB
// cfg contains the registry's configuration parameters.
cfg *RegistryConfig
clientMtx sync.Mutex
nextClientID uint32
notificationClients map[uint32]*InvoiceSubscription
singleNotificationClients map[uint32]*SingleInvoiceSubscription
newSubscriptions chan *InvoiceSubscription
subscriptionCancels chan uint32
// invoiceEvents is a single channel over which both invoice updates and
// new single invoice subscriptions are carried.
invoiceEvents chan interface{}
// subscriptions is a map from a circuit key to a list of subscribers.
// It is used for efficient notification of links.
hodlSubscriptions map[channeldb.CircuitKey]map[chan<- interface{}]struct{}
// reverseSubscriptions tracks circuit keys subscribed to per
// subscriber. This is used to unsubscribe from all hashes efficiently.
hodlReverseSubscriptions map[chan<- interface{}]map[channeldb.CircuitKey]struct{}
// htlcAutoReleaseChan contains the new htlcs that need to be
// auto-released.
htlcAutoReleaseChan chan *htlcReleaseEvent
expiryWatcher *InvoiceExpiryWatcher
wg sync.WaitGroup
quit chan struct{}
}
// NewRegistry creates a new invoice registry. The invoice registry
// wraps the persistent on-disk invoice storage with an additional in-memory
// layer. The in-memory layer is in place such that debug invoices can be added
// which are volatile yet available system wide within the daemon.
func NewRegistry(cdb *channeldb.DB, expiryWatcher *InvoiceExpiryWatcher,
cfg *RegistryConfig) *InvoiceRegistry {
return &InvoiceRegistry{
cdb: cdb,
notificationClients: make(map[uint32]*InvoiceSubscription),
singleNotificationClients: make(map[uint32]*SingleInvoiceSubscription),
newSubscriptions: make(chan *InvoiceSubscription),
subscriptionCancels: make(chan uint32),
invoiceEvents: make(chan interface{}, 100),
hodlSubscriptions: make(map[channeldb.CircuitKey]map[chan<- interface{}]struct{}),
hodlReverseSubscriptions: make(map[chan<- interface{}]map[channeldb.CircuitKey]struct{}),
cfg: cfg,
htlcAutoReleaseChan: make(chan *htlcReleaseEvent),
expiryWatcher: expiryWatcher,
quit: make(chan struct{}),
}
}
// scanInvoicesOnStart will scan all invoices on start and add active invoices
// to the invoice expirt watcher while also attempting to delete all canceled
// invoices.
func (i *InvoiceRegistry) scanInvoicesOnStart() error {
var (
pending []invoiceExpiry
removable []channeldb.InvoiceDeleteRef
)
reset := func() {
// Zero out our results on start and if the scan is ever run
// more than once. This latter case can happen if the kvdb
// layer needs to retry the View transaction underneath (eg.
// using the etcd driver, where all transactions are allowed
// to retry for serializability).
pending = nil
removable = make([]channeldb.InvoiceDeleteRef, 0)
}
scanFunc := func(
paymentHash lntypes.Hash, invoice *channeldb.Invoice) error {
if invoice.IsPending() {
expiryRef := makeInvoiceExpiry(paymentHash, invoice)
if expiryRef != nil {
pending = append(pending, expiryRef)
}
} else if i.cfg.GcCanceledInvoicesOnStartup &&
invoice.State == channeldb.ContractCanceled {
// Consider invoice for removal if it is already
// canceled. Invoices that are expired but not yet
// canceled, will be queued up for cancellation after
// startup and will be deleted afterwards.
ref := channeldb.InvoiceDeleteRef{
PayHash: paymentHash,
AddIndex: invoice.AddIndex,
SettleIndex: invoice.SettleIndex,
}
if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr {
ref.PayAddr = &invoice.Terms.PaymentAddr
}
removable = append(removable, ref)
}
return nil
}
err := i.cdb.ScanInvoices(scanFunc, reset)
if err != nil {
return err
}
log.Debugf("Adding %d pending invoices to the expiry watcher",
len(pending))
i.expiryWatcher.AddInvoices(pending...)
if len(removable) > 0 {
log.Infof("Attempting to delete %v canceled invoices",
len(removable))
if err := i.cdb.DeleteInvoice(removable); err != nil {
log.Warnf("Deleting canceled invoices failed: %v", err)
} else {
log.Infof("Deleted %v canceled invoices",
len(removable))
}
}
return nil
}
// Start starts the registry and all goroutines it needs to carry out its task.
func (i *InvoiceRegistry) Start() error {
// Start InvoiceExpiryWatcher and prepopulate it with existing active
// invoices.
err := i.expiryWatcher.Start(i.cancelInvoiceImpl)
if err != nil {
return err
}
i.wg.Add(1)
go i.invoiceEventLoop()
// Now scan all pending and removable invoices to the expiry watcher or
// delete them.
err = i.scanInvoicesOnStart()
if err != nil {
_ = i.Stop()
return err
}
return nil
}
// Stop signals the registry for a graceful shutdown.
func (i *InvoiceRegistry) Stop() error {
log.Info("InvoiceRegistry shutting down")
i.expiryWatcher.Stop()
close(i.quit)
i.wg.Wait()
return nil
}
// invoiceEvent represents a new event that has modified on invoice on disk.
// Only two event types are currently supported: newly created invoices, and
// instance where invoices are settled.
type invoiceEvent struct {
hash lntypes.Hash
invoice *channeldb.Invoice
}
// tickAt returns a channel that ticks at the specified time. If the time has
// already passed, it will tick immediately.
func (i *InvoiceRegistry) tickAt(t time.Time) <-chan time.Time {
now := i.cfg.Clock.Now()
return i.cfg.Clock.TickAfter(t.Sub(now))
}
// invoiceEventLoop is the dedicated goroutine responsible for accepting
// new notification subscriptions, cancelling old subscriptions, and
// dispatching new invoice events.
func (i *InvoiceRegistry) invoiceEventLoop() {
defer i.wg.Done()
// Set up a heap for htlc auto-releases.
autoReleaseHeap := &queue.PriorityQueue{}
for {
// If there is something to release, set up a release tick
// channel.
var nextReleaseTick <-chan time.Time
if autoReleaseHeap.Len() > 0 {
head := autoReleaseHeap.Top().(*htlcReleaseEvent)
nextReleaseTick = i.tickAt(head.releaseTime)
}
select {
// A new invoice subscription for all invoices has just arrived!
// We'll query for any backlog notifications, then add it to the
// set of clients.
case newClient := <-i.newSubscriptions:
log.Infof("New invoice subscription "+
"client: id=%v", newClient.id)
// With the backlog notifications delivered (if any),
// we'll add this to our active subscriptions and
// continue.
i.notificationClients[newClient.id] = newClient
// A client no longer wishes to receive invoice notifications.
// So we'll remove them from the set of active clients.
case clientID := <-i.subscriptionCancels:
log.Infof("Cancelling invoice subscription for "+
"client=%v", clientID)
delete(i.notificationClients, clientID)
delete(i.singleNotificationClients, clientID)
// An invoice event has come in. This can either be an update to
// an invoice or a new single invoice subscriber. Both type of
// events are passed in via the same channel, to make sure that
// subscribers get a consistent view of the event sequence.
case event := <-i.invoiceEvents:
switch e := event.(type) {
// A sub-systems has just modified the invoice state, so
// we'll dispatch notifications to all registered
// clients.
case *invoiceEvent:
// For backwards compatibility, do not notify
// all invoice subscribers of cancel and accept
// events.
state := e.invoice.State
if state != channeldb.ContractCanceled &&
state != channeldb.ContractAccepted {
i.dispatchToClients(e)
}
i.dispatchToSingleClients(e)
// A new single invoice subscription has arrived. Add it
// to the set of clients. It is important to do this in
// sequence with any other invoice events, because an
// initial invoice update has already been sent out to
// the subscriber.
case *SingleInvoiceSubscription:
log.Infof("New single invoice subscription "+
"client: id=%v, ref=%v", e.id,
e.invoiceRef)
i.singleNotificationClients[e.id] = e
}
// A new htlc came in for auto-release.
case event := <-i.htlcAutoReleaseChan:
log.Debugf("Scheduling auto-release for htlc: "+
"ref=%v, key=%v at %v",
event.invoiceRef, event.key, event.releaseTime)
// We use an independent timer for every htlc rather
// than a set timer that is reset with every htlc coming
// in. Otherwise the sender could keep resetting the
// timer until the broadcast window is entered and our
// channel is force closed.
autoReleaseHeap.Push(event)
// The htlc at the top of the heap needs to be auto-released.
case <-nextReleaseTick:
event := autoReleaseHeap.Pop().(*htlcReleaseEvent)
err := i.cancelSingleHtlc(
event.invoiceRef, event.key, ResultMppTimeout,
)
if err != nil {
log.Errorf("HTLC timer: %v", err)
}
case <-i.quit:
return
}
}
}
// dispatchToSingleClients passes the supplied event to all notification clients
// that subscribed to all the invoice this event applies to.
func (i *InvoiceRegistry) dispatchToSingleClients(event *invoiceEvent) {
// Dispatch to single invoice subscribers.
for _, client := range i.singleNotificationClients {
payHash := client.invoiceRef.PayHash()
if payHash == nil || *payHash != event.hash {
continue
}
client.notify(event)
}
}
// dispatchToClients passes the supplied event to all notification clients that
// subscribed to all invoices. Add and settle indices are used to make sure that
// clients don't receive duplicate or unwanted events.
func (i *InvoiceRegistry) dispatchToClients(event *invoiceEvent) {
invoice := event.invoice
for clientID, client := range i.notificationClients {
// Before we dispatch this event, we'll check
// to ensure that this client hasn't already
// received this notification in order to
// ensure we don't duplicate any events.
// TODO(joostjager): Refactor switches.
state := event.invoice.State
switch {
// If we've already sent this settle event to
// the client, then we can skip this.
case state == channeldb.ContractSettled &&
client.settleIndex >= invoice.SettleIndex:
continue
// Similarly, if we've already sent this add to
// the client then we can skip this one.
case state == channeldb.ContractOpen &&
client.addIndex >= invoice.AddIndex:
continue
// These two states should never happen, but we
// log them just in case so we can detect this
// instance.
case state == channeldb.ContractOpen &&
client.addIndex+1 != invoice.AddIndex:
log.Warnf("client=%v for invoice "+
"notifications missed an update, "+
"add_index=%v, new add event index=%v",
clientID, client.addIndex,
invoice.AddIndex)
case state == channeldb.ContractSettled &&
client.settleIndex+1 != invoice.SettleIndex:
log.Warnf("client=%v for invoice "+
"notifications missed an update, "+
"settle_index=%v, new settle event index=%v",
clientID, client.settleIndex,
invoice.SettleIndex)
}
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: invoice,
}:
case <-i.quit:
return
}
// Each time we send a notification to a client, we'll record
// the latest add/settle index it has. We'll use this to ensure
// we don't send a notification twice, which can happen if a new
// event is added while we're catching up a new client.
switch event.invoice.State {
case channeldb.ContractSettled:
client.settleIndex = invoice.SettleIndex
case channeldb.ContractOpen:
client.addIndex = invoice.AddIndex
default:
log.Errorf("unexpected invoice state: %v",
event.invoice.State)
}
}
}
// deliverBacklogEvents will attempts to query the invoice database for any
// notifications that the client has missed since it reconnected last.
func (i *InvoiceRegistry) deliverBacklogEvents(client *InvoiceSubscription) error {
addEvents, err := i.cdb.InvoicesAddedSince(client.addIndex)
if err != nil {
return err
}
settleEvents, err := i.cdb.InvoicesSettledSince(client.settleIndex)
if err != nil {
return err
}
// If we have any to deliver, then we'll append them to the end of the
// notification queue in order to catch up the client before delivering
// any new notifications.
for _, addEvent := range addEvents {
// We re-bind the loop variable to ensure we don't hold onto
// the loop reference causing is to point to the same item.
addEvent := addEvent
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: &addEvent,
}:
case <-i.quit:
return ErrShuttingDown
}
}
for _, settleEvent := range settleEvents {
// We re-bind the loop variable to ensure we don't hold onto
// the loop reference causing is to point to the same item.
settleEvent := settleEvent
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: &settleEvent,
}:
case <-i.quit:
return ErrShuttingDown
}
}
return nil
}
// deliverSingleBacklogEvents will attempt to query the invoice database to
// retrieve the current invoice state and deliver this to the subscriber. Single
// invoice subscribers will always receive the current state right after
// subscribing. Only in case the invoice does not yet exist, nothing is sent
// yet.
func (i *InvoiceRegistry) deliverSingleBacklogEvents(
client *SingleInvoiceSubscription) error {
invoice, err := i.cdb.LookupInvoice(client.invoiceRef)
// It is possible that the invoice does not exist yet, but the client is
// already watching it in anticipation.
if err == channeldb.ErrInvoiceNotFound ||
err == channeldb.ErrNoInvoicesCreated {
return nil
}
if err != nil {
return err
}
payHash := client.invoiceRef.PayHash()
if payHash == nil {
return nil
}
err = client.notify(&invoiceEvent{
hash: *payHash,
invoice: &invoice,
})
if err != nil {
return err
}
return nil
}
// AddInvoice adds a regular invoice for the specified amount, identified by
// the passed preimage. Additionally, any memo or receipt data provided will
// also be stored on-disk. Once this invoice is added, subsystems within the
// daemon add/forward HTLCs are able to obtain the proper preimage required for
// redemption in the case that we're the final destination. We also return the
// addIndex of the newly created invoice which monotonically increases for each
// new invoice added. A side effect of this function is that it also sets
// AddIndex on the invoice argument.
func (i *InvoiceRegistry) AddInvoice(invoice *channeldb.Invoice,
paymentHash lntypes.Hash) (uint64, error) {
i.Lock()
ref := channeldb.InvoiceRefByHash(paymentHash)
log.Debugf("Invoice%v: added with terms %v", ref, invoice.Terms)
addIndex, err := i.cdb.AddInvoice(invoice, paymentHash)
if err != nil {
i.Unlock()
return 0, err
}
// Now that we've added the invoice, we'll send dispatch a message to
// notify the clients of this new invoice.
i.notifyClients(paymentHash, invoice)
i.Unlock()
// InvoiceExpiryWatcher.AddInvoice must not be locked by InvoiceRegistry
// to avoid deadlock when a new invoice is added while an other is being
// canceled.
invoiceExpiryRef := makeInvoiceExpiry(paymentHash, invoice)
if invoiceExpiryRef != nil {
i.expiryWatcher.AddInvoices(invoiceExpiryRef)
}
return addIndex, nil
}
// LookupInvoice looks up an invoice by its payment hash (R-Hash), if found
// then we're able to pull the funds pending within an HTLC.
//
// TODO(roasbeef): ignore if settled?
func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice,
error) {
// We'll check the database to see if there's an existing matching
// invoice.
ref := channeldb.InvoiceRefByHash(rHash)
return i.cdb.LookupInvoice(ref)
}
// LookupInvoiceByRef looks up an invoice by the given reference, if found
// then we're able to pull the funds pending within an HTLC.
func (i *InvoiceRegistry) LookupInvoiceByRef(
ref channeldb.InvoiceRef) (channeldb.Invoice, error) {
return i.cdb.LookupInvoice(ref)
}
// startHtlcTimer starts a new timer via the invoice registry main loop that
// cancels a single htlc on an invoice when the htlc hold duration has passed.
func (i *InvoiceRegistry) startHtlcTimer(invoiceRef channeldb.InvoiceRef,
key channeldb.CircuitKey, acceptTime time.Time) error {
releaseTime := acceptTime.Add(i.cfg.HtlcHoldDuration)
event := &htlcReleaseEvent{
invoiceRef: invoiceRef,
key: key,
releaseTime: releaseTime,
}
select {
case i.htlcAutoReleaseChan <- event:
return nil
case <-i.quit:
return ErrShuttingDown
}
}
// cancelSingleHtlc cancels a single accepted htlc on an invoice. It takes
// a resolution result which will be used to notify subscribed links and
// resolvers of the details of the htlc cancellation.
func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
key channeldb.CircuitKey, result FailResolutionResult) error {
i.Lock()
defer i.Unlock()
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
// Only allow individual htlc cancelation on open invoices.
if invoice.State != channeldb.ContractOpen {
log.Debugf("cancelSingleHtlc: invoice %v no longer "+
"open", invoiceRef)
return nil, nil
}
// Lookup the current status of the htlc in the database.
htlc, ok := invoice.Htlcs[key]
if !ok {
return nil, fmt.Errorf("htlc %v not found", key)
}
// Cancelation is only possible if the htlc wasn't already
// resolved.
if htlc.State != channeldb.HtlcStateAccepted {
log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+
"is already resolved", key, invoiceRef)
return nil, nil
}
log.Debugf("cancelSingleHtlc: cancelling htlc %v on invoice %v",
key, invoiceRef)
// Return an update descriptor that cancels htlc and keeps
// invoice open.
canceledHtlcs := map[channeldb.CircuitKey]struct{}{
key: {},
}
return &channeldb.InvoiceUpdateDesc{
CancelHtlcs: canceledHtlcs,
}, nil
}
// Try to mark the specified htlc as canceled in the invoice database.
// Intercept the update descriptor to set the local updated variable. If
// no invoice update is performed, we can return early.
var updated bool
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil,
func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
updateDesc, err := updateInvoice(invoice)
if err != nil {
return nil, err
}
updated = updateDesc != nil
return updateDesc, err
},
)
if err != nil {
return err
}
if !updated {
return nil
}
// The invoice has been updated. Notify subscribers of the htlc
// resolution.
htlc, ok := invoice.Htlcs[key]
if !ok {
return fmt.Errorf("htlc %v not found", key)
}
if htlc.State == channeldb.HtlcStateCanceled {
resolution := NewFailResolution(
key, int32(htlc.AcceptHeight), result,
)
i.notifyHodlSubscribers(resolution)
}
return nil
}
// processKeySend just-in-time inserts an invoice if this htlc is a keysend
// htlc.
func (i *InvoiceRegistry) processKeySend(ctx invoiceUpdateCtx) error {
// Retrieve keysend record if present.
preimageSlice, ok := ctx.customRecords[record.KeySendType]
if !ok {
return nil
}
// Cancel htlc is preimage is invalid.
preimage, err := lntypes.MakePreimage(preimageSlice)
if err != nil {
return err
}
if preimage.Hash() != ctx.hash {
return fmt.Errorf("invalid keysend preimage %v for hash %v",
preimage, ctx.hash)
}
// Only allow keysend for non-mpp payments.
if ctx.mpp != nil {
return errors.New("no mpp keysend supported")
}
// Create an invoice for the htlc amount.
amt := ctx.amtPaid
// Set tlv optional feature vector on the invoice. Otherwise we wouldn't
// be able to pay to it with keysend.
rawFeatures := lnwire.NewRawFeatureVector(
lnwire.TLVOnionPayloadOptional,
)
features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features)
// Use the minimum block delta that we require for settling htlcs.
finalCltvDelta := i.cfg.FinalCltvRejectDelta
// Pre-check expiry here to prevent inserting an invoice that will not
// be settled.
if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) {
return errors.New("final expiry too soon")
}
// The invoice database indexes all invoices by payment address, however
// legacy keysend payment do not have one. In order to avoid a new
// payment type on-disk wrt. to indexing, we'll continue to insert a
// blank payment address which is special cased in the insertion logic
// to not be indexed. In the future, once AMP is merged, this should be
// replaced by generating a random payment address on the behalf of the
// sender.
payAddr := channeldb.BlankPayAddr
// Create placeholder invoice.
invoice := &channeldb.Invoice{
CreationDate: i.cfg.Clock.Now(),
Terms: channeldb.ContractTerm{
FinalCltvDelta: finalCltvDelta,
Value: amt,
PaymentPreimage: &preimage,
PaymentAddr: payAddr,
Features: features,
},
}
if i.cfg.KeysendHoldTime != 0 {
invoice.HodlInvoice = true
invoice.Terms.Expiry = i.cfg.KeysendHoldTime
}
// Insert invoice into database. Ignore duplicates, because this
// may be a replay.
_, err = i.AddInvoice(invoice, ctx.hash)
if err != nil && err != channeldb.ErrDuplicateInvoice {
return err
}
return nil
}
// processAMP just-in-time inserts an invoice if this htlc is a keysend
// htlc.
func (i *InvoiceRegistry) processAMP(ctx invoiceUpdateCtx) error {
// AMP payments MUST also include an MPP record.
if ctx.mpp == nil {
return errors.New("no MPP record for AMP")
}
// Create an invoice for the total amount expected, provided in the MPP
// record.
amt := ctx.mpp.TotalMsat()
// Set the TLV and MPP optional features on the invoice. We'll also make
// the AMP features required so that it can't be paid by legacy or MPP
// htlcs.
rawFeatures := lnwire.NewRawFeatureVector(
lnwire.TLVOnionPayloadOptional,
lnwire.PaymentAddrOptional,
lnwire.AMPRequired,
)
features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features)
// Use the minimum block delta that we require for settling htlcs.
finalCltvDelta := i.cfg.FinalCltvRejectDelta
// Pre-check expiry here to prevent inserting an invoice that will not
// be settled.
if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) {
return errors.New("final expiry too soon")
}
// We'll use the sender-generated payment address provided in the HTLC
// to create our AMP invoice.
payAddr := ctx.mpp.PaymentAddr()
// Create placeholder invoice.
invoice := &channeldb.Invoice{
CreationDate: i.cfg.Clock.Now(),
Terms: channeldb.ContractTerm{
FinalCltvDelta: finalCltvDelta,
Value: amt,
PaymentPreimage: nil,
PaymentAddr: payAddr,
Features: features,
},
}
// Insert invoice into database. Ignore duplicates payment hashes and
// payment addrs, this may be a replay or a different HTLC for the AMP
// invoice.
_, err := i.AddInvoice(invoice, ctx.hash)
switch {
case err == channeldb.ErrDuplicateInvoice:
return nil
case err == channeldb.ErrDuplicatePayAddr:
return nil
default:
return err
}
}
// NotifyExitHopHtlc attempts to mark an invoice as settled. The return value
// describes how the htlc should be resolved.
//
// When the preimage of the invoice is not yet known (hodl invoice), this
// function moves the invoice to the accepted state. When SettleHoldInvoice is
// called later, a resolution message will be send back to the caller via the
// provided hodlChan. Invoice registry sends on this channel what action needs
// to be taken on the htlc (settle or cancel). The caller needs to ensure that
// the channel is either buffered or received on from another goroutine to
// prevent deadlock.
//
// In the case that the htlc is part of a larger set of htlcs that pay to the
// same invoice (multi-path payment), the htlc is held until the set is
// complete. If the set doesn't fully arrive in time, a timer will cancel the
// held htlc.
func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
payload Payload) (HtlcResolution, error) {
// Create the update context containing the relevant details of the
// incoming htlc.
ctx := invoiceUpdateCtx{
hash: rHash,
circuitKey: circuitKey,
amtPaid: amtPaid,
expiry: expiry,
currentHeight: currentHeight,
finalCltvRejectDelta: i.cfg.FinalCltvRejectDelta,
customRecords: payload.CustomRecords(),
mpp: payload.MultiPath(),
amp: payload.AMPRecord(),
}
switch {
// If we are accepting spontaneous AMP payments and this payload
// contains an AMP record, create an AMP invoice that will be settled
// below.
case i.cfg.AcceptAMP && ctx.amp != nil:
err := i.processAMP(ctx)
if err != nil {
ctx.log(fmt.Sprintf("amp error: %v", err))
return NewFailResolution(
circuitKey, currentHeight, ResultAmpError,
), nil
}
// If we are accepting spontaneous keysend payments, create a regular
// invoice that will be settled below. We also enforce that this is only
// done when no AMP payload is present since it will only be settle-able
// by regular HTLCs.
case i.cfg.AcceptKeySend && ctx.amp == nil:
err := i.processKeySend(ctx)
if err != nil {
ctx.log(fmt.Sprintf("keysend error: %v", err))
return NewFailResolution(
circuitKey, currentHeight, ResultKeySendError,
), nil
}
}
// Execute locked notify exit hop logic.
i.Lock()
resolution, err := i.notifyExitHopHtlcLocked(&ctx, hodlChan)
i.Unlock()
if err != nil {
return nil, err
}
switch r := resolution.(type) {
// The htlc is held. Start a timer outside the lock if the htlc should
// be auto-released, because otherwise a deadlock may happen with the
// main event loop.
case *htlcAcceptResolution:
if r.autoRelease {
err := i.startHtlcTimer(
ctx.invoiceRef(), circuitKey, r.acceptTime,
)
if err != nil {
return nil, err
}
}
// We return a nil resolution because htlc acceptances are
// represented as nil resolutions externally.
// TODO(carla) update calling code to handle accept resolutions.
return nil, nil
// A direct resolution was received for this htlc.
case HtlcResolution:
return r, nil
// Fail if an unknown resolution type was received.
default:
return nil, errors.New("invalid resolution type")
}
}
// notifyExitHopHtlcLocked is the internal implementation of NotifyExitHopHtlc
// that should be executed inside the registry lock.
func (i *InvoiceRegistry) notifyExitHopHtlcLocked(
ctx *invoiceUpdateCtx, hodlChan chan<- interface{}) (
HtlcResolution, error) {
// We'll attempt to settle an invoice matching this rHash on disk (if
// one exists). The callback will update the invoice state and/or htlcs.
var (
resolution HtlcResolution
updateSubscribers bool
)
invoice, err := i.cdb.UpdateInvoice(
ctx.invoiceRef(),
(*channeldb.SetID)(ctx.setID()),
func(inv *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
updateDesc, res, err := updateInvoice(ctx, inv)
if err != nil {
return nil, err
}
// Only send an update if the invoice state was changed.
updateSubscribers = updateDesc != nil &&
updateDesc.State != nil
// Assign resolution to outer scope variable.
resolution = res
return updateDesc, nil
},
)
switch err {
case channeldb.ErrInvoiceNotFound:
// If the invoice was not found, return a failure resolution
// with an invoice not found result.
return NewFailResolution(
ctx.circuitKey, ctx.currentHeight,
ResultInvoiceNotFound,
), nil
case nil:
default:
ctx.log(err.Error())
return nil, err
}
switch res := resolution.(type) {
case *HtlcFailResolution:
// Inspect latest htlc state on the invoice. If it is found,
// we will update the accept height as it was recorded in the
// invoice database (which occurs in the case where the htlc
// reached the database in a previous call). If the htlc was
// not found on the invoice, it was immediately failed so we
// send the failure resolution as is, which has the current
// height set as the accept height.
invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey]
if ok {
res.AcceptHeight = int32(invoiceHtlc.AcceptHeight)
}
ctx.log(fmt.Sprintf("failure resolution result "+
"outcome: %v, at accept height: %v",
res.Outcome, res.AcceptHeight))
// Some failures apply to the entire HTLC set. Break here if
// this isn't one of them.
if !res.Outcome.IsSetFailure() {
break
}
// Also cancel any HTLCs in the HTLC set that are also in the
// canceled state with the same failure result.
setID := ctx.setID()
canceledHtlcSet := invoice.HTLCSet(setID, channeldb.HtlcStateCanceled)
for key, htlc := range canceledHtlcSet {
htlcFailResolution := NewFailResolution(
key, int32(htlc.AcceptHeight), res.Outcome,
)
i.notifyHodlSubscribers(htlcFailResolution)
}
// If the htlc was settled, we will settle any previously accepted
// htlcs and notify our peer to settle them.
case *HtlcSettleResolution:
ctx.log(fmt.Sprintf("settle resolution result "+
"outcome: %v, at accept height: %v",
res.Outcome, res.AcceptHeight))
// Also settle any previously accepted htlcs. If a htlc is
// marked as settled, we should follow now and settle the htlc
// with our peer.
setID := ctx.setID()
settledHtlcSet := invoice.HTLCSet(setID, channeldb.HtlcStateSettled)
for key, htlc := range settledHtlcSet {
preimage := res.Preimage
if htlc.AMP != nil && htlc.AMP.Preimage != nil {
preimage = *htlc.AMP.Preimage
}
// Notify subscribers that the htlcs should be settled
// with our peer. Note that the outcome of the
// resolution is set based on the outcome of the single
// htlc that we just settled, so may not be accurate
// for all htlcs.
htlcSettleResolution := NewSettleResolution(
preimage, key,
int32(htlc.AcceptHeight), res.Outcome,
)
// Notify subscribers that the htlc should be settled
// with our peer.
i.notifyHodlSubscribers(htlcSettleResolution)
}
// If concurrent payments were attempted to this invoice before
// the current one was ultimately settled, cancel back any of
// the HTLCs immediately. As a result of the settle, the HTLCs
// in other HTLC sets are automatically converted to a canceled
// state when updating the invoice.
canceledHtlcSet := invoice.HTLCSetCompliment(
setID, channeldb.HtlcStateCanceled,
)
for key, htlc := range canceledHtlcSet {
htlcFailResolution := NewFailResolution(
key, int32(htlc.AcceptHeight),
ResultInvoiceAlreadySettled,
)
i.notifyHodlSubscribers(htlcFailResolution)
}
// If we accepted the htlc, subscribe to the hodl invoice and return
// an accept resolution with the htlc's accept time on it.
case *htlcAcceptResolution:
invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey]
if !ok {
return nil, fmt.Errorf("accepted htlc: %v not"+
" present on invoice: %x", ctx.circuitKey,
ctx.hash[:])
}
// Determine accepted height of this htlc. If the htlc reached
// the invoice database (possibly in a previous call to the
// invoice registry), we'll take the original accepted height
// as it was recorded in the database.
acceptHeight := int32(invoiceHtlc.AcceptHeight)
ctx.log(fmt.Sprintf("accept resolution result "+
"outcome: %v, at accept height: %v",
res.outcome, acceptHeight))
// Auto-release the htlc if the invoice is still open. It can
// only happen for mpp payments that there are htlcs in state
// Accepted while the invoice is Open.
if invoice.State == channeldb.ContractOpen {
res.acceptTime = invoiceHtlc.AcceptTime
res.autoRelease = true
}
// If we have fully accepted the set of htlcs for this invoice,
// we can now add it to our invoice expiry watcher. We do not
// add invoices before they are fully accepted, because it is
// possible that we MppTimeout the htlcs, and then our relevant
// expiry height could change.
if res.outcome == resultAccepted {
expiry := makeInvoiceExpiry(ctx.hash, invoice)
i.expiryWatcher.AddInvoices(expiry)
}
i.hodlSubscribe(hodlChan, ctx.circuitKey)
default:
panic("unknown action")
}
// Now that the links have been notified of any state changes to their
// HTLCs, we'll go ahead and notify any clients wiaiting on the invoice
// state changes.
if updateSubscribers {
i.notifyClients(ctx.hash, invoice)
}
return resolution, nil
}
// SettleHodlInvoice sets the preimage of a hodl invoice.
func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error {
i.Lock()
defer i.Unlock()
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
switch invoice.State {
case channeldb.ContractOpen:
return nil, channeldb.ErrInvoiceStillOpen
case channeldb.ContractCanceled:
return nil, channeldb.ErrInvoiceAlreadyCanceled
case channeldb.ContractSettled:
return nil, channeldb.ErrInvoiceAlreadySettled
}
return &channeldb.InvoiceUpdateDesc{
State: &channeldb.InvoiceStateUpdateDesc{
NewState: channeldb.ContractSettled,
Preimage: &preimage,
},
}, nil
}
hash := preimage.Hash()
invoiceRef := channeldb.InvoiceRefByHash(hash)
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil, updateInvoice)
if err != nil {
log.Errorf("SettleHodlInvoice with preimage %v: %v",
preimage, err)
return err
}
log.Debugf("Invoice%v: settled with preimage %v", invoiceRef,
invoice.Terms.PaymentPreimage)
// In the callback, we marked the invoice as settled. UpdateInvoice will
// have seen this and should have moved all htlcs that were accepted to
// the settled state. In the loop below, we go through all of these and
// notify links and resolvers that are waiting for resolution. Any htlcs
// that were already settled before, will be notified again. This isn't
// necessary but doesn't hurt either.
for key, htlc := range invoice.Htlcs {
if htlc.State != channeldb.HtlcStateSettled {
continue
}
resolution := NewSettleResolution(
preimage, key, int32(htlc.AcceptHeight), ResultSettled,
)
i.notifyHodlSubscribers(resolution)
}
i.notifyClients(hash, invoice)
return nil
}
// CancelInvoice attempts to cancel the invoice corresponding to the passed
// payment hash.
func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
return i.cancelInvoiceImpl(payHash, true)
}
// shouldCancel examines the state of an invoice and whether we want to
// cancel already accepted invoices, taking our force cancel boolean into
// account. This is pulled out into its own function so that tests that mock
// cancelInvoiceImpl can reuse this logic.
func shouldCancel(state channeldb.ContractState, cancelAccepted bool) bool {
if state != channeldb.ContractAccepted {
return true
}
// If the invoice is accepted, we should only cancel if we want to
// force cancelation of accepted invoices.
return cancelAccepted
}
// cancelInvoice attempts to cancel the invoice corresponding to the passed
// payment hash. Accepted invoices will only be canceled if explicitly
// requested to do so. It notifies subscribing links and resolvers that
// the associated htlcs were canceled if they change state.
func (i *InvoiceRegistry) cancelInvoiceImpl(payHash lntypes.Hash,
cancelAccepted bool) error {
i.Lock()
defer i.Unlock()
ref := channeldb.InvoiceRefByHash(payHash)
log.Debugf("Invoice%v: canceling invoice", ref)
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
if !shouldCancel(invoice.State, cancelAccepted) {
return nil, nil
}
// Move invoice to the canceled state. Rely on validation in
// channeldb to return an error if the invoice is already
// settled or canceled.
return &channeldb.InvoiceUpdateDesc{
State: &channeldb.InvoiceStateUpdateDesc{
NewState: channeldb.ContractCanceled,
},
}, nil
}
invoiceRef := channeldb.InvoiceRefByHash(payHash)
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil, updateInvoice)
// Implement idempotency by returning success if the invoice was already
// canceled.
if err == channeldb.ErrInvoiceAlreadyCanceled {
log.Debugf("Invoice%v: already canceled", ref)
return nil
}
if err != nil {
return err
}
// Return without cancellation if the invoice state is ContractAccepted.
if invoice.State == channeldb.ContractAccepted {
log.Debugf("Invoice%v: remains accepted as cancel wasn't"+
"explicitly requested.", ref)
return nil
}
log.Debugf("Invoice%v: canceled", ref)
// In the callback, some htlcs may have been moved to the canceled
// state. We now go through all of these and notify links and resolvers
// that are waiting for resolution. Any htlcs that were already canceled
// before, will be notified again. This isn't necessary but doesn't hurt
// either.
for key, htlc := range invoice.Htlcs {
if htlc.State != channeldb.HtlcStateCanceled {
continue
}
i.notifyHodlSubscribers(
NewFailResolution(
key, int32(htlc.AcceptHeight), ResultCanceled,
),
)
}
i.notifyClients(payHash, invoice)
// Attempt to also delete the invoice if requested through the registry
// config.
if i.cfg.GcCanceledInvoicesOnTheFly {
// Assemble the delete reference and attempt to delete through
// the invocice from the DB.
deleteRef := channeldb.InvoiceDeleteRef{
PayHash: payHash,
AddIndex: invoice.AddIndex,
SettleIndex: invoice.SettleIndex,
}
if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr {
deleteRef.PayAddr = &invoice.Terms.PaymentAddr
}
err = i.cdb.DeleteInvoice(
[]channeldb.InvoiceDeleteRef{deleteRef},
)
// If by any chance deletion failed, then log it instead of
// returning the error, as the invoice itsels has already been
// canceled.
if err != nil {
log.Warnf("Invoice%v could not be deleted: %v",
ref, err)
}
}
return nil
}
// notifyClients notifies all currently registered invoice notification clients
// of a newly added/settled invoice.
func (i *InvoiceRegistry) notifyClients(hash lntypes.Hash,
invoice *channeldb.Invoice) {
event := &invoiceEvent{
invoice: invoice,
hash: hash,
}
select {
case i.invoiceEvents <- event:
case <-i.quit:
}
}
// invoiceSubscriptionKit defines that are common to both all invoice
// subscribers and single invoice subscribers.
type invoiceSubscriptionKit struct {
id uint32
inv *InvoiceRegistry
ntfnQueue *queue.ConcurrentQueue
canceled uint32 // To be used atomically.
cancelChan chan struct{}
wg sync.WaitGroup
}
// InvoiceSubscription represents an intent to receive updates for newly added
// or settled invoices. For each newly added invoice, a copy of the invoice
// will be sent over the NewInvoices channel. Similarly, for each newly settled
// invoice, a copy of the invoice will be sent over the SettledInvoices
// channel.
type InvoiceSubscription struct {
invoiceSubscriptionKit
// NewInvoices is a channel that we'll use to send all newly created
// invoices with an invoice index greater than the specified
// StartingInvoiceIndex field.
NewInvoices chan *channeldb.Invoice
// SettledInvoices is a channel that we'll use to send all setted
// invoices with an invoices index greater than the specified
// StartingInvoiceIndex field.
SettledInvoices chan *channeldb.Invoice
// addIndex is the highest add index the caller knows of. We'll use
// this information to send out an event backlog to the notifications
// subscriber. Any new add events with an index greater than this will
// be dispatched before any new notifications are sent out.
addIndex uint64
// settleIndex is the highest settle index the caller knows of. We'll
// use this information to send out an event backlog to the
// notifications subscriber. Any new settle events with an index
// greater than this will be dispatched before any new notifications
// are sent out.
settleIndex uint64
}
// SingleInvoiceSubscription represents an intent to receive updates for a
// specific invoice.
type SingleInvoiceSubscription struct {
invoiceSubscriptionKit
invoiceRef channeldb.InvoiceRef
// Updates is a channel that we'll use to send all invoice events for
// the invoice that is subscribed to.
Updates chan *channeldb.Invoice
}
// Cancel unregisters the InvoiceSubscription, freeing any previously allocated
// resources.
func (i *invoiceSubscriptionKit) Cancel() {
if !atomic.CompareAndSwapUint32(&i.canceled, 0, 1) {
return
}
select {
case i.inv.subscriptionCancels <- i.id:
case <-i.inv.quit:
}
i.ntfnQueue.Stop()
close(i.cancelChan)
i.wg.Wait()
}
func (i *invoiceSubscriptionKit) notify(event *invoiceEvent) error {
select {
case i.ntfnQueue.ChanIn() <- event:
case <-i.inv.quit:
return ErrShuttingDown
}
return nil
}
// SubscribeNotifications returns an InvoiceSubscription which allows the
// caller to receive async notifications when any invoices are settled or
// added. The invoiceIndex parameter is a streaming "checkpoint". We'll start
// by first sending out all new events with an invoice index _greater_ than
// this value. Afterwards, we'll send out real-time notifications.
func (i *InvoiceRegistry) SubscribeNotifications(
addIndex, settleIndex uint64) (*InvoiceSubscription, error) {
client := &InvoiceSubscription{
NewInvoices: make(chan *channeldb.Invoice),
SettledInvoices: make(chan *channeldb.Invoice),
addIndex: addIndex,
settleIndex: settleIndex,
invoiceSubscriptionKit: invoiceSubscriptionKit{
inv: i,
ntfnQueue: queue.NewConcurrentQueue(20),
cancelChan: make(chan struct{}),
},
}
client.ntfnQueue.Start()
i.clientMtx.Lock()
client.id = i.nextClientID
i.nextClientID++
i.clientMtx.Unlock()
// Before we register this new invoice subscription, we'll launch a new
// goroutine that will proxy all notifications appended to the end of
// the concurrent queue to the two client-side channels the caller will
// feed off of.
i.wg.Add(1)
go func() {
defer i.wg.Done()
for {
select {
// A new invoice event has been sent by the
// invoiceRegistry! We'll figure out if this is an add
// event or a settle event, then dispatch the event to
// the client.
case ntfn := <-client.ntfnQueue.ChanOut():
invoiceEvent := ntfn.(*invoiceEvent)
var targetChan chan *channeldb.Invoice
state := invoiceEvent.invoice.State
switch state {
case channeldb.ContractOpen:
targetChan = client.NewInvoices
case channeldb.ContractSettled:
targetChan = client.SettledInvoices
default:
log.Errorf("unknown invoice "+
"state: %v", state)
continue
}
select {
case targetChan <- invoiceEvent.invoice:
case <-client.cancelChan:
return
case <-i.quit:
return
}
case <-client.cancelChan:
return
case <-i.quit:
return
}
}
}()
i.Lock()
defer i.Unlock()
// Query the database to see if based on the provided addIndex and
// settledIndex we need to deliver any backlog notifications.
err := i.deliverBacklogEvents(client)
if err != nil {
return nil, err
}
select {
case i.newSubscriptions <- client:
case <-i.quit:
return nil, ErrShuttingDown
}
return client, nil
}
// SubscribeSingleInvoice returns an SingleInvoiceSubscription which allows the
// caller to receive async notifications for a specific invoice.
func (i *InvoiceRegistry) SubscribeSingleInvoice(
hash lntypes.Hash) (*SingleInvoiceSubscription, error) {
client := &SingleInvoiceSubscription{
Updates: make(chan *channeldb.Invoice),
invoiceSubscriptionKit: invoiceSubscriptionKit{
inv: i,
ntfnQueue: queue.NewConcurrentQueue(20),
cancelChan: make(chan struct{}),
},
invoiceRef: channeldb.InvoiceRefByHash(hash),
}
client.ntfnQueue.Start()
i.clientMtx.Lock()
client.id = i.nextClientID
i.nextClientID++
i.clientMtx.Unlock()
// Before we register this new invoice subscription, we'll launch a new
// goroutine that will proxy all notifications appended to the end of
// the concurrent queue to the two client-side channels the caller will
// feed off of.
i.wg.Add(1)
go func() {
defer i.wg.Done()
for {
select {
// A new invoice event has been sent by the
// invoiceRegistry. We will dispatch the event to the
// client.
case ntfn := <-client.ntfnQueue.ChanOut():
invoiceEvent := ntfn.(*invoiceEvent)
select {
case client.Updates <- invoiceEvent.invoice:
case <-client.cancelChan:
return
case <-i.quit:
return
}
case <-client.cancelChan:
return
case <-i.quit:
return
}
}
}()
// Within the lock, we both query the invoice state and pass the client
// subscription to the invoiceEvents channel. This is to make sure that
// the client receives a consistent stream of events.
i.Lock()
defer i.Unlock()
err := i.deliverSingleBacklogEvents(client)
if err != nil {
return nil, err
}
select {
case i.invoiceEvents <- client:
case <-i.quit:
return nil, ErrShuttingDown
}
return client, nil
}
// notifyHodlSubscribers sends out the htlc resolution to all current
// subscribers.
func (i *InvoiceRegistry) notifyHodlSubscribers(htlcResolution HtlcResolution) {
subscribers, ok := i.hodlSubscriptions[htlcResolution.CircuitKey()]
if !ok {
return
}
// Notify all interested subscribers and remove subscription from both
// maps. The subscription can be removed as there only ever will be a
// single resolution for each hash.
for subscriber := range subscribers {
select {
case subscriber <- htlcResolution:
case <-i.quit:
return
}
delete(
i.hodlReverseSubscriptions[subscriber],
htlcResolution.CircuitKey(),
)
}
delete(i.hodlSubscriptions, htlcResolution.CircuitKey())
}
// hodlSubscribe adds a new invoice subscription.
func (i *InvoiceRegistry) hodlSubscribe(subscriber chan<- interface{},
circuitKey channeldb.CircuitKey) {
log.Debugf("Hodl subscribe for %v", circuitKey)
subscriptions, ok := i.hodlSubscriptions[circuitKey]
if !ok {
subscriptions = make(map[chan<- interface{}]struct{})
i.hodlSubscriptions[circuitKey] = subscriptions
}
subscriptions[subscriber] = struct{}{}
reverseSubscriptions, ok := i.hodlReverseSubscriptions[subscriber]
if !ok {
reverseSubscriptions = make(map[channeldb.CircuitKey]struct{})
i.hodlReverseSubscriptions[subscriber] = reverseSubscriptions
}
reverseSubscriptions[circuitKey] = struct{}{}
}
// HodlUnsubscribeAll cancels the subscription.
func (i *InvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {
i.Lock()
defer i.Unlock()
hashes := i.hodlReverseSubscriptions[subscriber]
for hash := range hashes {
delete(i.hodlSubscriptions[hash], subscriber)
}
delete(i.hodlReverseSubscriptions, subscriber)
}
invoices: recognize AMP invoice settles during ntnf dispatch+catchup
In this commit, we add the setID to the invoiceEvent struct as it will
be useful when we need to be able to distinguish a new open invoice,
from an AMP invoice that's being settled for the first time.
we then update the logic during notification dispatch to utilize the new
field to allow it to detect the repeated settles of AMP invoices.
package invoices
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/queue"
"github.com/lightningnetwork/lnd/record"
)
var (
// ErrInvoiceExpiryTooSoon is returned when an invoice is attempted to be
// accepted or settled with not enough blocks remaining.
ErrInvoiceExpiryTooSoon = errors.New("invoice expiry too soon")
// ErrInvoiceAmountTooLow is returned when an invoice is attempted to be
// accepted or settled with an amount that is too low.
ErrInvoiceAmountTooLow = errors.New("paid amount less than invoice amount")
// ErrShuttingDown is returned when an operation failed because the
// invoice registry is shutting down.
ErrShuttingDown = errors.New("invoice registry shutting down")
)
const (
// DefaultHtlcHoldDuration defines the default for how long mpp htlcs
// are held while waiting for the other set members to arrive.
DefaultHtlcHoldDuration = 120 * time.Second
)
// RegistryConfig contains the configuration parameters for invoice registry.
type RegistryConfig struct {
// FinalCltvRejectDelta defines the number of blocks before the expiry
// of the htlc where we no longer settle it as an exit hop and instead
// cancel it back. Normally this value should be lower than the cltv
// expiry of any invoice we create and the code effectuating this should
// not be hit.
FinalCltvRejectDelta int32
// HtlcHoldDuration defines for how long mpp htlcs are held while
// waiting for the other set members to arrive.
HtlcHoldDuration time.Duration
// Clock holds the clock implementation that is used to provide
// Now() and TickAfter() and is useful to stub out the clock functions
// during testing.
Clock clock.Clock
// AcceptKeySend indicates whether we want to accept spontaneous key
// send payments.
AcceptKeySend bool
// AcceptAMP indicates whether we want to accept spontaneous AMP
// payments.
AcceptAMP bool
// GcCanceledInvoicesOnStartup if set, we'll attempt to garbage collect
// all canceled invoices upon start.
GcCanceledInvoicesOnStartup bool
// GcCanceledInvoicesOnTheFly if set, we'll garbage collect all newly
// canceled invoices on the fly.
GcCanceledInvoicesOnTheFly bool
// KeysendHoldTime indicates for how long we want to accept and hold
// spontaneous keysend payments.
KeysendHoldTime time.Duration
}
// htlcReleaseEvent describes an htlc auto-release event. It is used to release
// mpp htlcs for which the complete set didn't arrive in time.
type htlcReleaseEvent struct {
// invoiceRef identifiers the invoice this htlc belongs to.
invoiceRef channeldb.InvoiceRef
// key is the circuit key of the htlc to release.
key channeldb.CircuitKey
// releaseTime is the time at which to release the htlc.
releaseTime time.Time
}
// Less is used to order PriorityQueueItem's by their release time such that
// items with the older release time are at the top of the queue.
//
// NOTE: Part of the queue.PriorityQueueItem interface.
func (r *htlcReleaseEvent) Less(other queue.PriorityQueueItem) bool {
return r.releaseTime.Before(other.(*htlcReleaseEvent).releaseTime)
}
// InvoiceRegistry is a central registry of all the outstanding invoices
// created by the daemon. The registry is a thin wrapper around a map in order
// to ensure that all updates/reads are thread safe.
type InvoiceRegistry struct {
sync.RWMutex
cdb *channeldb.DB
// cfg contains the registry's configuration parameters.
cfg *RegistryConfig
clientMtx sync.Mutex
nextClientID uint32
notificationClients map[uint32]*InvoiceSubscription
singleNotificationClients map[uint32]*SingleInvoiceSubscription
newSubscriptions chan *InvoiceSubscription
subscriptionCancels chan uint32
// invoiceEvents is a single channel over which both invoice updates and
// new single invoice subscriptions are carried.
invoiceEvents chan interface{}
// subscriptions is a map from a circuit key to a list of subscribers.
// It is used for efficient notification of links.
hodlSubscriptions map[channeldb.CircuitKey]map[chan<- interface{}]struct{}
// reverseSubscriptions tracks circuit keys subscribed to per
// subscriber. This is used to unsubscribe from all hashes efficiently.
hodlReverseSubscriptions map[chan<- interface{}]map[channeldb.CircuitKey]struct{}
// htlcAutoReleaseChan contains the new htlcs that need to be
// auto-released.
htlcAutoReleaseChan chan *htlcReleaseEvent
expiryWatcher *InvoiceExpiryWatcher
wg sync.WaitGroup
quit chan struct{}
}
// NewRegistry creates a new invoice registry. The invoice registry
// wraps the persistent on-disk invoice storage with an additional in-memory
// layer. The in-memory layer is in place such that debug invoices can be added
// which are volatile yet available system wide within the daemon.
func NewRegistry(cdb *channeldb.DB, expiryWatcher *InvoiceExpiryWatcher,
cfg *RegistryConfig) *InvoiceRegistry {
return &InvoiceRegistry{
cdb: cdb,
notificationClients: make(map[uint32]*InvoiceSubscription),
singleNotificationClients: make(map[uint32]*SingleInvoiceSubscription),
newSubscriptions: make(chan *InvoiceSubscription),
subscriptionCancels: make(chan uint32),
invoiceEvents: make(chan interface{}, 100),
hodlSubscriptions: make(map[channeldb.CircuitKey]map[chan<- interface{}]struct{}),
hodlReverseSubscriptions: make(map[chan<- interface{}]map[channeldb.CircuitKey]struct{}),
cfg: cfg,
htlcAutoReleaseChan: make(chan *htlcReleaseEvent),
expiryWatcher: expiryWatcher,
quit: make(chan struct{}),
}
}
// scanInvoicesOnStart will scan all invoices on start and add active invoices
// to the invoice expirt watcher while also attempting to delete all canceled
// invoices.
func (i *InvoiceRegistry) scanInvoicesOnStart() error {
var (
pending []invoiceExpiry
removable []channeldb.InvoiceDeleteRef
)
reset := func() {
// Zero out our results on start and if the scan is ever run
// more than once. This latter case can happen if the kvdb
// layer needs to retry the View transaction underneath (eg.
// using the etcd driver, where all transactions are allowed
// to retry for serializability).
pending = nil
removable = make([]channeldb.InvoiceDeleteRef, 0)
}
scanFunc := func(
paymentHash lntypes.Hash, invoice *channeldb.Invoice) error {
if invoice.IsPending() {
expiryRef := makeInvoiceExpiry(paymentHash, invoice)
if expiryRef != nil {
pending = append(pending, expiryRef)
}
} else if i.cfg.GcCanceledInvoicesOnStartup &&
invoice.State == channeldb.ContractCanceled {
// Consider invoice for removal if it is already
// canceled. Invoices that are expired but not yet
// canceled, will be queued up for cancellation after
// startup and will be deleted afterwards.
ref := channeldb.InvoiceDeleteRef{
PayHash: paymentHash,
AddIndex: invoice.AddIndex,
SettleIndex: invoice.SettleIndex,
}
if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr {
ref.PayAddr = &invoice.Terms.PaymentAddr
}
removable = append(removable, ref)
}
return nil
}
err := i.cdb.ScanInvoices(scanFunc, reset)
if err != nil {
return err
}
log.Debugf("Adding %d pending invoices to the expiry watcher",
len(pending))
i.expiryWatcher.AddInvoices(pending...)
if len(removable) > 0 {
log.Infof("Attempting to delete %v canceled invoices",
len(removable))
if err := i.cdb.DeleteInvoice(removable); err != nil {
log.Warnf("Deleting canceled invoices failed: %v", err)
} else {
log.Infof("Deleted %v canceled invoices",
len(removable))
}
}
return nil
}
// Start starts the registry and all goroutines it needs to carry out its task.
func (i *InvoiceRegistry) Start() error {
// Start InvoiceExpiryWatcher and prepopulate it with existing active
// invoices.
err := i.expiryWatcher.Start(i.cancelInvoiceImpl)
if err != nil {
return err
}
i.wg.Add(1)
go i.invoiceEventLoop()
// Now scan all pending and removable invoices to the expiry watcher or
// delete them.
err = i.scanInvoicesOnStart()
if err != nil {
_ = i.Stop()
return err
}
return nil
}
// Stop signals the registry for a graceful shutdown.
func (i *InvoiceRegistry) Stop() error {
log.Info("InvoiceRegistry shutting down")
i.expiryWatcher.Stop()
close(i.quit)
i.wg.Wait()
return nil
}
// invoiceEvent represents a new event that has modified on invoice on disk.
// Only two event types are currently supported: newly created invoices, and
// instance where invoices are settled.
type invoiceEvent struct {
hash lntypes.Hash
invoice *channeldb.Invoice
setID *[32]byte
}
// tickAt returns a channel that ticks at the specified time. If the time has
// already passed, it will tick immediately.
func (i *InvoiceRegistry) tickAt(t time.Time) <-chan time.Time {
now := i.cfg.Clock.Now()
return i.cfg.Clock.TickAfter(t.Sub(now))
}
// invoiceEventLoop is the dedicated goroutine responsible for accepting
// new notification subscriptions, cancelling old subscriptions, and
// dispatching new invoice events.
func (i *InvoiceRegistry) invoiceEventLoop() {
defer i.wg.Done()
// Set up a heap for htlc auto-releases.
autoReleaseHeap := &queue.PriorityQueue{}
for {
// If there is something to release, set up a release tick
// channel.
var nextReleaseTick <-chan time.Time
if autoReleaseHeap.Len() > 0 {
head := autoReleaseHeap.Top().(*htlcReleaseEvent)
nextReleaseTick = i.tickAt(head.releaseTime)
}
select {
// A new invoice subscription for all invoices has just arrived!
// We'll query for any backlog notifications, then add it to the
// set of clients.
case newClient := <-i.newSubscriptions:
log.Infof("New invoice subscription "+
"client: id=%v", newClient.id)
// With the backlog notifications delivered (if any),
// we'll add this to our active subscriptions and
// continue.
i.notificationClients[newClient.id] = newClient
// A client no longer wishes to receive invoice notifications.
// So we'll remove them from the set of active clients.
case clientID := <-i.subscriptionCancels:
log.Infof("Cancelling invoice subscription for "+
"client=%v", clientID)
delete(i.notificationClients, clientID)
delete(i.singleNotificationClients, clientID)
// An invoice event has come in. This can either be an update to
// an invoice or a new single invoice subscriber. Both type of
// events are passed in via the same channel, to make sure that
// subscribers get a consistent view of the event sequence.
case event := <-i.invoiceEvents:
switch e := event.(type) {
// A sub-systems has just modified the invoice state, so
// we'll dispatch notifications to all registered
// clients.
case *invoiceEvent:
// For backwards compatibility, do not notify
// all invoice subscribers of cancel and accept
// events.
state := e.invoice.State
if state != channeldb.ContractCanceled &&
state != channeldb.ContractAccepted {
i.dispatchToClients(e)
}
i.dispatchToSingleClients(e)
// A new single invoice subscription has arrived. Add it
// to the set of clients. It is important to do this in
// sequence with any other invoice events, because an
// initial invoice update has already been sent out to
// the subscriber.
case *SingleInvoiceSubscription:
log.Infof("New single invoice subscription "+
"client: id=%v, ref=%v", e.id,
e.invoiceRef)
i.singleNotificationClients[e.id] = e
}
// A new htlc came in for auto-release.
case event := <-i.htlcAutoReleaseChan:
log.Debugf("Scheduling auto-release for htlc: "+
"ref=%v, key=%v at %v",
event.invoiceRef, event.key, event.releaseTime)
// We use an independent timer for every htlc rather
// than a set timer that is reset with every htlc coming
// in. Otherwise the sender could keep resetting the
// timer until the broadcast window is entered and our
// channel is force closed.
autoReleaseHeap.Push(event)
// The htlc at the top of the heap needs to be auto-released.
case <-nextReleaseTick:
event := autoReleaseHeap.Pop().(*htlcReleaseEvent)
err := i.cancelSingleHtlc(
event.invoiceRef, event.key, ResultMppTimeout,
)
if err != nil {
log.Errorf("HTLC timer: %v", err)
}
case <-i.quit:
return
}
}
}
// dispatchToSingleClients passes the supplied event to all notification clients
// that subscribed to all the invoice this event applies to.
func (i *InvoiceRegistry) dispatchToSingleClients(event *invoiceEvent) {
// Dispatch to single invoice subscribers.
for _, client := range i.singleNotificationClients {
payHash := client.invoiceRef.PayHash()
if payHash == nil || *payHash != event.hash {
continue
}
client.notify(event)
}
}
// dispatchToClients passes the supplied event to all notification clients that
// subscribed to all invoices. Add and settle indices are used to make sure that
// clients don't receive duplicate or unwanted events.
func (i *InvoiceRegistry) dispatchToClients(event *invoiceEvent) {
invoice := event.invoice
for clientID, client := range i.notificationClients {
// Before we dispatch this event, we'll check
// to ensure that this client hasn't already
// received this notification in order to
// ensure we don't duplicate any events.
// TODO(joostjager): Refactor switches.
state := event.invoice.State
switch {
// If we've already sent this settle event to
// the client, then we can skip this.
case state == channeldb.ContractSettled &&
client.settleIndex >= invoice.SettleIndex:
continue
// Similarly, if we've already sent this add to
// the client then we can skip this one, but only if this isn't
// an AMP invoice. AMP invoices always remain in the settle
// state as a base invoice.
case event.setID == nil && state == channeldb.ContractOpen &&
client.addIndex >= invoice.AddIndex:
continue
// These two states should never happen, but we
// log them just in case so we can detect this
// instance.
case state == channeldb.ContractOpen &&
client.addIndex+1 != invoice.AddIndex:
log.Warnf("client=%v for invoice "+
"notifications missed an update, "+
"add_index=%v, new add event index=%v",
clientID, client.addIndex,
invoice.AddIndex)
case state == channeldb.ContractSettled &&
client.settleIndex+1 != invoice.SettleIndex:
log.Warnf("client=%v for invoice "+
"notifications missed an update, "+
"settle_index=%v, new settle event index=%v",
clientID, client.settleIndex,
invoice.SettleIndex)
}
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: invoice,
setID: event.setID,
}:
case <-i.quit:
return
}
// Each time we send a notification to a client, we'll record
// the latest add/settle index it has. We'll use this to ensure
// we don't send a notification twice, which can happen if a new
// event is added while we're catching up a new client.
invState := event.invoice.State
switch {
case invState == channeldb.ContractSettled:
client.settleIndex = invoice.SettleIndex
case invState == channeldb.ContractOpen && event.setID == nil:
client.addIndex = invoice.AddIndex
// If this is an AMP invoice, then we'll need to use the set ID
// to keep track of the settle index of the client. AMP
// invoices never go to the open state, but if a setID is
// passed, then we know it was just settled and will track the
// highest settle index so far.
case invState == channeldb.ContractOpen && event.setID != nil:
setID := *event.setID
client.settleIndex = invoice.AMPState[setID].SettleIndex
default:
log.Errorf("unexpected invoice state: %v",
event.invoice.State)
}
}
}
// deliverBacklogEvents will attempts to query the invoice database for any
// notifications that the client has missed since it reconnected last.
func (i *InvoiceRegistry) deliverBacklogEvents(client *InvoiceSubscription) error {
addEvents, err := i.cdb.InvoicesAddedSince(client.addIndex)
if err != nil {
return err
}
settleEvents, err := i.cdb.InvoicesSettledSince(client.settleIndex)
if err != nil {
return err
}
// If we have any to deliver, then we'll append them to the end of the
// notification queue in order to catch up the client before delivering
// any new notifications.
for _, addEvent := range addEvents {
// We re-bind the loop variable to ensure we don't hold onto
// the loop reference causing is to point to the same item.
addEvent := addEvent
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: &addEvent,
}:
case <-i.quit:
return ErrShuttingDown
}
}
for _, settleEvent := range settleEvents {
// We re-bind the loop variable to ensure we don't hold onto
// the loop reference causing is to point to the same item.
settleEvent := settleEvent
select {
case client.ntfnQueue.ChanIn() <- &invoiceEvent{
invoice: &settleEvent,
}:
case <-i.quit:
return ErrShuttingDown
}
}
return nil
}
// deliverSingleBacklogEvents will attempt to query the invoice database to
// retrieve the current invoice state and deliver this to the subscriber. Single
// invoice subscribers will always receive the current state right after
// subscribing. Only in case the invoice does not yet exist, nothing is sent
// yet.
func (i *InvoiceRegistry) deliverSingleBacklogEvents(
client *SingleInvoiceSubscription) error {
invoice, err := i.cdb.LookupInvoice(client.invoiceRef)
// It is possible that the invoice does not exist yet, but the client is
// already watching it in anticipation.
if err == channeldb.ErrInvoiceNotFound ||
err == channeldb.ErrNoInvoicesCreated {
return nil
}
if err != nil {
return err
}
payHash := client.invoiceRef.PayHash()
if payHash == nil {
return nil
}
err = client.notify(&invoiceEvent{
hash: *payHash,
invoice: &invoice,
})
if err != nil {
return err
}
return nil
}
// AddInvoice adds a regular invoice for the specified amount, identified by
// the passed preimage. Additionally, any memo or receipt data provided will
// also be stored on-disk. Once this invoice is added, subsystems within the
// daemon add/forward HTLCs are able to obtain the proper preimage required for
// redemption in the case that we're the final destination. We also return the
// addIndex of the newly created invoice which monotonically increases for each
// new invoice added. A side effect of this function is that it also sets
// AddIndex on the invoice argument.
func (i *InvoiceRegistry) AddInvoice(invoice *channeldb.Invoice,
paymentHash lntypes.Hash) (uint64, error) {
i.Lock()
ref := channeldb.InvoiceRefByHash(paymentHash)
log.Debugf("Invoice%v: added with terms %v", ref, invoice.Terms)
addIndex, err := i.cdb.AddInvoice(invoice, paymentHash)
if err != nil {
i.Unlock()
return 0, err
}
// Now that we've added the invoice, we'll send dispatch a message to
// notify the clients of this new invoice.
i.notifyClients(paymentHash, invoice, nil)
i.Unlock()
// InvoiceExpiryWatcher.AddInvoice must not be locked by InvoiceRegistry
// to avoid deadlock when a new invoice is added while an other is being
// canceled.
invoiceExpiryRef := makeInvoiceExpiry(paymentHash, invoice)
if invoiceExpiryRef != nil {
i.expiryWatcher.AddInvoices(invoiceExpiryRef)
}
return addIndex, nil
}
// LookupInvoice looks up an invoice by its payment hash (R-Hash), if found
// then we're able to pull the funds pending within an HTLC.
//
// TODO(roasbeef): ignore if settled?
func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice,
error) {
// We'll check the database to see if there's an existing matching
// invoice.
ref := channeldb.InvoiceRefByHash(rHash)
return i.cdb.LookupInvoice(ref)
}
// LookupInvoiceByRef looks up an invoice by the given reference, if found
// then we're able to pull the funds pending within an HTLC.
func (i *InvoiceRegistry) LookupInvoiceByRef(
ref channeldb.InvoiceRef) (channeldb.Invoice, error) {
return i.cdb.LookupInvoice(ref)
}
// startHtlcTimer starts a new timer via the invoice registry main loop that
// cancels a single htlc on an invoice when the htlc hold duration has passed.
func (i *InvoiceRegistry) startHtlcTimer(invoiceRef channeldb.InvoiceRef,
key channeldb.CircuitKey, acceptTime time.Time) error {
releaseTime := acceptTime.Add(i.cfg.HtlcHoldDuration)
event := &htlcReleaseEvent{
invoiceRef: invoiceRef,
key: key,
releaseTime: releaseTime,
}
select {
case i.htlcAutoReleaseChan <- event:
return nil
case <-i.quit:
return ErrShuttingDown
}
}
// cancelSingleHtlc cancels a single accepted htlc on an invoice. It takes
// a resolution result which will be used to notify subscribed links and
// resolvers of the details of the htlc cancellation.
func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
key channeldb.CircuitKey, result FailResolutionResult) error {
i.Lock()
defer i.Unlock()
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
// Only allow individual htlc cancelation on open invoices.
if invoice.State != channeldb.ContractOpen {
log.Debugf("cancelSingleHtlc: invoice %v no longer "+
"open", invoiceRef)
return nil, nil
}
// Lookup the current status of the htlc in the database.
htlc, ok := invoice.Htlcs[key]
if !ok {
return nil, fmt.Errorf("htlc %v not found", key)
}
// Cancelation is only possible if the htlc wasn't already
// resolved.
if htlc.State != channeldb.HtlcStateAccepted {
log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+
"is already resolved", key, invoiceRef)
return nil, nil
}
log.Debugf("cancelSingleHtlc: cancelling htlc %v on invoice %v",
key, invoiceRef)
// Return an update descriptor that cancels htlc and keeps
// invoice open.
canceledHtlcs := map[channeldb.CircuitKey]struct{}{
key: {},
}
return &channeldb.InvoiceUpdateDesc{
CancelHtlcs: canceledHtlcs,
}, nil
}
// Try to mark the specified htlc as canceled in the invoice database.
// Intercept the update descriptor to set the local updated variable. If
// no invoice update is performed, we can return early.
var updated bool
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil,
func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
updateDesc, err := updateInvoice(invoice)
if err != nil {
return nil, err
}
updated = updateDesc != nil
return updateDesc, err
},
)
if err != nil {
return err
}
if !updated {
return nil
}
// The invoice has been updated. Notify subscribers of the htlc
// resolution.
htlc, ok := invoice.Htlcs[key]
if !ok {
return fmt.Errorf("htlc %v not found", key)
}
if htlc.State == channeldb.HtlcStateCanceled {
resolution := NewFailResolution(
key, int32(htlc.AcceptHeight), result,
)
i.notifyHodlSubscribers(resolution)
}
return nil
}
// processKeySend just-in-time inserts an invoice if this htlc is a keysend
// htlc.
func (i *InvoiceRegistry) processKeySend(ctx invoiceUpdateCtx) error {
// Retrieve keysend record if present.
preimageSlice, ok := ctx.customRecords[record.KeySendType]
if !ok {
return nil
}
// Cancel htlc is preimage is invalid.
preimage, err := lntypes.MakePreimage(preimageSlice)
if err != nil {
return err
}
if preimage.Hash() != ctx.hash {
return fmt.Errorf("invalid keysend preimage %v for hash %v",
preimage, ctx.hash)
}
// Only allow keysend for non-mpp payments.
if ctx.mpp != nil {
return errors.New("no mpp keysend supported")
}
// Create an invoice for the htlc amount.
amt := ctx.amtPaid
// Set tlv optional feature vector on the invoice. Otherwise we wouldn't
// be able to pay to it with keysend.
rawFeatures := lnwire.NewRawFeatureVector(
lnwire.TLVOnionPayloadOptional,
)
features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features)
// Use the minimum block delta that we require for settling htlcs.
finalCltvDelta := i.cfg.FinalCltvRejectDelta
// Pre-check expiry here to prevent inserting an invoice that will not
// be settled.
if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) {
return errors.New("final expiry too soon")
}
// The invoice database indexes all invoices by payment address, however
// legacy keysend payment do not have one. In order to avoid a new
// payment type on-disk wrt. to indexing, we'll continue to insert a
// blank payment address which is special cased in the insertion logic
// to not be indexed. In the future, once AMP is merged, this should be
// replaced by generating a random payment address on the behalf of the
// sender.
payAddr := channeldb.BlankPayAddr
// Create placeholder invoice.
invoice := &channeldb.Invoice{
CreationDate: i.cfg.Clock.Now(),
Terms: channeldb.ContractTerm{
FinalCltvDelta: finalCltvDelta,
Value: amt,
PaymentPreimage: &preimage,
PaymentAddr: payAddr,
Features: features,
},
}
if i.cfg.KeysendHoldTime != 0 {
invoice.HodlInvoice = true
invoice.Terms.Expiry = i.cfg.KeysendHoldTime
}
// Insert invoice into database. Ignore duplicates, because this
// may be a replay.
_, err = i.AddInvoice(invoice, ctx.hash)
if err != nil && err != channeldb.ErrDuplicateInvoice {
return err
}
return nil
}
// processAMP just-in-time inserts an invoice if this htlc is a keysend
// htlc.
func (i *InvoiceRegistry) processAMP(ctx invoiceUpdateCtx) error {
// AMP payments MUST also include an MPP record.
if ctx.mpp == nil {
return errors.New("no MPP record for AMP")
}
// Create an invoice for the total amount expected, provided in the MPP
// record.
amt := ctx.mpp.TotalMsat()
// Set the TLV and MPP optional features on the invoice. We'll also make
// the AMP features required so that it can't be paid by legacy or MPP
// htlcs.
rawFeatures := lnwire.NewRawFeatureVector(
lnwire.TLVOnionPayloadOptional,
lnwire.PaymentAddrOptional,
lnwire.AMPRequired,
)
features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features)
// Use the minimum block delta that we require for settling htlcs.
finalCltvDelta := i.cfg.FinalCltvRejectDelta
// Pre-check expiry here to prevent inserting an invoice that will not
// be settled.
if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) {
return errors.New("final expiry too soon")
}
// We'll use the sender-generated payment address provided in the HTLC
// to create our AMP invoice.
payAddr := ctx.mpp.PaymentAddr()
// Create placeholder invoice.
invoice := &channeldb.Invoice{
CreationDate: i.cfg.Clock.Now(),
Terms: channeldb.ContractTerm{
FinalCltvDelta: finalCltvDelta,
Value: amt,
PaymentPreimage: nil,
PaymentAddr: payAddr,
Features: features,
},
}
// Insert invoice into database. Ignore duplicates payment hashes and
// payment addrs, this may be a replay or a different HTLC for the AMP
// invoice.
_, err := i.AddInvoice(invoice, ctx.hash)
switch {
case err == channeldb.ErrDuplicateInvoice:
return nil
case err == channeldb.ErrDuplicatePayAddr:
return nil
default:
return err
}
}
// NotifyExitHopHtlc attempts to mark an invoice as settled. The return value
// describes how the htlc should be resolved.
//
// When the preimage of the invoice is not yet known (hodl invoice), this
// function moves the invoice to the accepted state. When SettleHoldInvoice is
// called later, a resolution message will be send back to the caller via the
// provided hodlChan. Invoice registry sends on this channel what action needs
// to be taken on the htlc (settle or cancel). The caller needs to ensure that
// the channel is either buffered or received on from another goroutine to
// prevent deadlock.
//
// In the case that the htlc is part of a larger set of htlcs that pay to the
// same invoice (multi-path payment), the htlc is held until the set is
// complete. If the set doesn't fully arrive in time, a timer will cancel the
// held htlc.
func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
payload Payload) (HtlcResolution, error) {
// Create the update context containing the relevant details of the
// incoming htlc.
ctx := invoiceUpdateCtx{
hash: rHash,
circuitKey: circuitKey,
amtPaid: amtPaid,
expiry: expiry,
currentHeight: currentHeight,
finalCltvRejectDelta: i.cfg.FinalCltvRejectDelta,
customRecords: payload.CustomRecords(),
mpp: payload.MultiPath(),
amp: payload.AMPRecord(),
}
switch {
// If we are accepting spontaneous AMP payments and this payload
// contains an AMP record, create an AMP invoice that will be settled
// below.
case i.cfg.AcceptAMP && ctx.amp != nil:
err := i.processAMP(ctx)
if err != nil {
ctx.log(fmt.Sprintf("amp error: %v", err))
return NewFailResolution(
circuitKey, currentHeight, ResultAmpError,
), nil
}
// If we are accepting spontaneous keysend payments, create a regular
// invoice that will be settled below. We also enforce that this is only
// done when no AMP payload is present since it will only be settle-able
// by regular HTLCs.
case i.cfg.AcceptKeySend && ctx.amp == nil:
err := i.processKeySend(ctx)
if err != nil {
ctx.log(fmt.Sprintf("keysend error: %v", err))
return NewFailResolution(
circuitKey, currentHeight, ResultKeySendError,
), nil
}
}
// Execute locked notify exit hop logic.
i.Lock()
resolution, err := i.notifyExitHopHtlcLocked(&ctx, hodlChan)
i.Unlock()
if err != nil {
return nil, err
}
switch r := resolution.(type) {
// The htlc is held. Start a timer outside the lock if the htlc should
// be auto-released, because otherwise a deadlock may happen with the
// main event loop.
case *htlcAcceptResolution:
if r.autoRelease {
err := i.startHtlcTimer(
ctx.invoiceRef(), circuitKey, r.acceptTime,
)
if err != nil {
return nil, err
}
}
// We return a nil resolution because htlc acceptances are
// represented as nil resolutions externally.
// TODO(carla) update calling code to handle accept resolutions.
return nil, nil
// A direct resolution was received for this htlc.
case HtlcResolution:
return r, nil
// Fail if an unknown resolution type was received.
default:
return nil, errors.New("invalid resolution type")
}
}
// notifyExitHopHtlcLocked is the internal implementation of NotifyExitHopHtlc
// that should be executed inside the registry lock.
func (i *InvoiceRegistry) notifyExitHopHtlcLocked(
ctx *invoiceUpdateCtx, hodlChan chan<- interface{}) (
HtlcResolution, error) {
// We'll attempt to settle an invoice matching this rHash on disk (if
// one exists). The callback will update the invoice state and/or htlcs.
var (
resolution HtlcResolution
updateSubscribers bool
)
invoice, err := i.cdb.UpdateInvoice(
ctx.invoiceRef(),
(*channeldb.SetID)(ctx.setID()),
func(inv *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
updateDesc, res, err := updateInvoice(ctx, inv)
if err != nil {
return nil, err
}
// Only send an update if the invoice state was changed.
updateSubscribers = updateDesc != nil &&
updateDesc.State != nil
// Assign resolution to outer scope variable.
resolution = res
return updateDesc, nil
},
)
switch err {
case channeldb.ErrInvoiceNotFound:
// If the invoice was not found, return a failure resolution
// with an invoice not found result.
return NewFailResolution(
ctx.circuitKey, ctx.currentHeight,
ResultInvoiceNotFound,
), nil
case nil:
default:
ctx.log(err.Error())
return nil, err
}
switch res := resolution.(type) {
case *HtlcFailResolution:
// Inspect latest htlc state on the invoice. If it is found,
// we will update the accept height as it was recorded in the
// invoice database (which occurs in the case where the htlc
// reached the database in a previous call). If the htlc was
// not found on the invoice, it was immediately failed so we
// send the failure resolution as is, which has the current
// height set as the accept height.
invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey]
if ok {
res.AcceptHeight = int32(invoiceHtlc.AcceptHeight)
}
ctx.log(fmt.Sprintf("failure resolution result "+
"outcome: %v, at accept height: %v",
res.Outcome, res.AcceptHeight))
// Some failures apply to the entire HTLC set. Break here if
// this isn't one of them.
if !res.Outcome.IsSetFailure() {
break
}
// Also cancel any HTLCs in the HTLC set that are also in the
// canceled state with the same failure result.
setID := ctx.setID()
canceledHtlcSet := invoice.HTLCSet(setID, channeldb.HtlcStateCanceled)
for key, htlc := range canceledHtlcSet {
htlcFailResolution := NewFailResolution(
key, int32(htlc.AcceptHeight), res.Outcome,
)
i.notifyHodlSubscribers(htlcFailResolution)
}
// If the htlc was settled, we will settle any previously accepted
// htlcs and notify our peer to settle them.
case *HtlcSettleResolution:
ctx.log(fmt.Sprintf("settle resolution result "+
"outcome: %v, at accept height: %v",
res.Outcome, res.AcceptHeight))
// Also settle any previously accepted htlcs. If a htlc is
// marked as settled, we should follow now and settle the htlc
// with our peer.
setID := ctx.setID()
settledHtlcSet := invoice.HTLCSet(setID, channeldb.HtlcStateSettled)
for key, htlc := range settledHtlcSet {
preimage := res.Preimage
if htlc.AMP != nil && htlc.AMP.Preimage != nil {
preimage = *htlc.AMP.Preimage
}
// Notify subscribers that the htlcs should be settled
// with our peer. Note that the outcome of the
// resolution is set based on the outcome of the single
// htlc that we just settled, so may not be accurate
// for all htlcs.
htlcSettleResolution := NewSettleResolution(
preimage, key,
int32(htlc.AcceptHeight), res.Outcome,
)
// Notify subscribers that the htlc should be settled
// with our peer.
i.notifyHodlSubscribers(htlcSettleResolution)
}
// If concurrent payments were attempted to this invoice before
// the current one was ultimately settled, cancel back any of
// the HTLCs immediately. As a result of the settle, the HTLCs
// in other HTLC sets are automatically converted to a canceled
// state when updating the invoice.
//
// TODO(roasbeef): can remove now??
canceledHtlcSet := invoice.HTLCSetCompliment(
setID, channeldb.HtlcStateCanceled,
)
for key, htlc := range canceledHtlcSet {
htlcFailResolution := NewFailResolution(
key, int32(htlc.AcceptHeight),
ResultInvoiceAlreadySettled,
)
i.notifyHodlSubscribers(htlcFailResolution)
}
// If we accepted the htlc, subscribe to the hodl invoice and return
// an accept resolution with the htlc's accept time on it.
case *htlcAcceptResolution:
invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey]
if !ok {
return nil, fmt.Errorf("accepted htlc: %v not"+
" present on invoice: %x", ctx.circuitKey,
ctx.hash[:])
}
// Determine accepted height of this htlc. If the htlc reached
// the invoice database (possibly in a previous call to the
// invoice registry), we'll take the original accepted height
// as it was recorded in the database.
acceptHeight := int32(invoiceHtlc.AcceptHeight)
ctx.log(fmt.Sprintf("accept resolution result "+
"outcome: %v, at accept height: %v",
res.outcome, acceptHeight))
// Auto-release the htlc if the invoice is still open. It can
// only happen for mpp payments that there are htlcs in state
// Accepted while the invoice is Open.
if invoice.State == channeldb.ContractOpen {
res.acceptTime = invoiceHtlc.AcceptTime
res.autoRelease = true
}
// If we have fully accepted the set of htlcs for this invoice,
// we can now add it to our invoice expiry watcher. We do not
// add invoices before they are fully accepted, because it is
// possible that we MppTimeout the htlcs, and then our relevant
// expiry height could change.
if res.outcome == resultAccepted {
expiry := makeInvoiceExpiry(ctx.hash, invoice)
i.expiryWatcher.AddInvoices(expiry)
}
i.hodlSubscribe(hodlChan, ctx.circuitKey)
default:
panic("unknown action")
}
// Now that the links have been notified of any state changes to their
// HTLCs, we'll go ahead and notify any clients wiaiting on the invoice
// state changes.
if updateSubscribers {
// We'll add a setID onto the notification, but only if this is
// an AMP invoice being settled.
var setID *[32]byte
if _, ok := resolution.(*HtlcSettleResolution); ok {
setID = ctx.setID()
}
i.notifyClients(ctx.hash, invoice, setID)
}
return resolution, nil
}
// SettleHodlInvoice sets the preimage of a hodl invoice.
func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error {
i.Lock()
defer i.Unlock()
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
switch invoice.State {
case channeldb.ContractOpen:
return nil, channeldb.ErrInvoiceStillOpen
case channeldb.ContractCanceled:
return nil, channeldb.ErrInvoiceAlreadyCanceled
case channeldb.ContractSettled:
return nil, channeldb.ErrInvoiceAlreadySettled
}
return &channeldb.InvoiceUpdateDesc{
State: &channeldb.InvoiceStateUpdateDesc{
NewState: channeldb.ContractSettled,
Preimage: &preimage,
},
}, nil
}
hash := preimage.Hash()
invoiceRef := channeldb.InvoiceRefByHash(hash)
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil, updateInvoice)
if err != nil {
log.Errorf("SettleHodlInvoice with preimage %v: %v",
preimage, err)
return err
}
log.Debugf("Invoice%v: settled with preimage %v", invoiceRef,
invoice.Terms.PaymentPreimage)
// In the callback, we marked the invoice as settled. UpdateInvoice will
// have seen this and should have moved all htlcs that were accepted to
// the settled state. In the loop below, we go through all of these and
// notify links and resolvers that are waiting for resolution. Any htlcs
// that were already settled before, will be notified again. This isn't
// necessary but doesn't hurt either.
for key, htlc := range invoice.Htlcs {
if htlc.State != channeldb.HtlcStateSettled {
continue
}
resolution := NewSettleResolution(
preimage, key, int32(htlc.AcceptHeight), ResultSettled,
)
i.notifyHodlSubscribers(resolution)
}
i.notifyClients(hash, invoice, nil)
return nil
}
// CancelInvoice attempts to cancel the invoice corresponding to the passed
// payment hash.
func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
return i.cancelInvoiceImpl(payHash, true)
}
// shouldCancel examines the state of an invoice and whether we want to
// cancel already accepted invoices, taking our force cancel boolean into
// account. This is pulled out into its own function so that tests that mock
// cancelInvoiceImpl can reuse this logic.
func shouldCancel(state channeldb.ContractState, cancelAccepted bool) bool {
if state != channeldb.ContractAccepted {
return true
}
// If the invoice is accepted, we should only cancel if we want to
// force cancelation of accepted invoices.
return cancelAccepted
}
// cancelInvoice attempts to cancel the invoice corresponding to the passed
// payment hash. Accepted invoices will only be canceled if explicitly
// requested to do so. It notifies subscribing links and resolvers that
// the associated htlcs were canceled if they change state.
func (i *InvoiceRegistry) cancelInvoiceImpl(payHash lntypes.Hash,
cancelAccepted bool) error {
i.Lock()
defer i.Unlock()
ref := channeldb.InvoiceRefByHash(payHash)
log.Debugf("Invoice%v: canceling invoice", ref)
updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) {
if !shouldCancel(invoice.State, cancelAccepted) {
return nil, nil
}
// Move invoice to the canceled state. Rely on validation in
// channeldb to return an error if the invoice is already
// settled or canceled.
return &channeldb.InvoiceUpdateDesc{
State: &channeldb.InvoiceStateUpdateDesc{
NewState: channeldb.ContractCanceled,
},
}, nil
}
invoiceRef := channeldb.InvoiceRefByHash(payHash)
invoice, err := i.cdb.UpdateInvoice(invoiceRef, nil, updateInvoice)
// Implement idempotency by returning success if the invoice was already
// canceled.
if err == channeldb.ErrInvoiceAlreadyCanceled {
log.Debugf("Invoice%v: already canceled", ref)
return nil
}
if err != nil {
return err
}
// Return without cancellation if the invoice state is ContractAccepted.
if invoice.State == channeldb.ContractAccepted {
log.Debugf("Invoice%v: remains accepted as cancel wasn't"+
"explicitly requested.", ref)
return nil
}
log.Debugf("Invoice%v: canceled", ref)
// In the callback, some htlcs may have been moved to the canceled
// state. We now go through all of these and notify links and resolvers
// that are waiting for resolution. Any htlcs that were already canceled
// before, will be notified again. This isn't necessary but doesn't hurt
// either.
for key, htlc := range invoice.Htlcs {
if htlc.State != channeldb.HtlcStateCanceled {
continue
}
i.notifyHodlSubscribers(
NewFailResolution(
key, int32(htlc.AcceptHeight), ResultCanceled,
),
)
}
i.notifyClients(payHash, invoice, nil)
// Attempt to also delete the invoice if requested through the registry
// config.
if i.cfg.GcCanceledInvoicesOnTheFly {
// Assemble the delete reference and attempt to delete through
// the invocice from the DB.
deleteRef := channeldb.InvoiceDeleteRef{
PayHash: payHash,
AddIndex: invoice.AddIndex,
SettleIndex: invoice.SettleIndex,
}
if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr {
deleteRef.PayAddr = &invoice.Terms.PaymentAddr
}
err = i.cdb.DeleteInvoice(
[]channeldb.InvoiceDeleteRef{deleteRef},
)
// If by any chance deletion failed, then log it instead of
// returning the error, as the invoice itsels has already been
// canceled.
if err != nil {
log.Warnf("Invoice%v could not be deleted: %v",
ref, err)
}
}
return nil
}
// notifyClients notifies all currently registered invoice notification clients
// of a newly added/settled invoice.
func (i *InvoiceRegistry) notifyClients(hash lntypes.Hash,
invoice *channeldb.Invoice, setID *[32]byte) {
event := &invoiceEvent{
invoice: invoice,
hash: hash,
setID: setID,
}
select {
case i.invoiceEvents <- event:
case <-i.quit:
}
}
// invoiceSubscriptionKit defines that are common to both all invoice
// subscribers and single invoice subscribers.
type invoiceSubscriptionKit struct {
id uint32
inv *InvoiceRegistry
ntfnQueue *queue.ConcurrentQueue
canceled uint32 // To be used atomically.
cancelChan chan struct{}
wg sync.WaitGroup
}
// InvoiceSubscription represents an intent to receive updates for newly added
// or settled invoices. For each newly added invoice, a copy of the invoice
// will be sent over the NewInvoices channel. Similarly, for each newly settled
// invoice, a copy of the invoice will be sent over the SettledInvoices
// channel.
type InvoiceSubscription struct {
invoiceSubscriptionKit
// NewInvoices is a channel that we'll use to send all newly created
// invoices with an invoice index greater than the specified
// StartingInvoiceIndex field.
NewInvoices chan *channeldb.Invoice
// SettledInvoices is a channel that we'll use to send all setted
// invoices with an invoices index greater than the specified
// StartingInvoiceIndex field.
SettledInvoices chan *channeldb.Invoice
// addIndex is the highest add index the caller knows of. We'll use
// this information to send out an event backlog to the notifications
// subscriber. Any new add events with an index greater than this will
// be dispatched before any new notifications are sent out.
addIndex uint64
// settleIndex is the highest settle index the caller knows of. We'll
// use this information to send out an event backlog to the
// notifications subscriber. Any new settle events with an index
// greater than this will be dispatched before any new notifications
// are sent out.
settleIndex uint64
}
// SingleInvoiceSubscription represents an intent to receive updates for a
// specific invoice.
type SingleInvoiceSubscription struct {
invoiceSubscriptionKit
invoiceRef channeldb.InvoiceRef
// Updates is a channel that we'll use to send all invoice events for
// the invoice that is subscribed to.
Updates chan *channeldb.Invoice
}
// Cancel unregisters the InvoiceSubscription, freeing any previously allocated
// resources.
func (i *invoiceSubscriptionKit) Cancel() {
if !atomic.CompareAndSwapUint32(&i.canceled, 0, 1) {
return
}
select {
case i.inv.subscriptionCancels <- i.id:
case <-i.inv.quit:
}
i.ntfnQueue.Stop()
close(i.cancelChan)
i.wg.Wait()
}
func (i *invoiceSubscriptionKit) notify(event *invoiceEvent) error {
select {
case i.ntfnQueue.ChanIn() <- event:
case <-i.inv.quit:
return ErrShuttingDown
}
return nil
}
// SubscribeNotifications returns an InvoiceSubscription which allows the
// caller to receive async notifications when any invoices are settled or
// added. The invoiceIndex parameter is a streaming "checkpoint". We'll start
// by first sending out all new events with an invoice index _greater_ than
// this value. Afterwards, we'll send out real-time notifications.
func (i *InvoiceRegistry) SubscribeNotifications(
addIndex, settleIndex uint64) (*InvoiceSubscription, error) {
client := &InvoiceSubscription{
NewInvoices: make(chan *channeldb.Invoice),
SettledInvoices: make(chan *channeldb.Invoice),
addIndex: addIndex,
settleIndex: settleIndex,
invoiceSubscriptionKit: invoiceSubscriptionKit{
inv: i,
ntfnQueue: queue.NewConcurrentQueue(20),
cancelChan: make(chan struct{}),
},
}
client.ntfnQueue.Start()
i.clientMtx.Lock()
client.id = i.nextClientID
i.nextClientID++
i.clientMtx.Unlock()
// Before we register this new invoice subscription, we'll launch a new
// goroutine that will proxy all notifications appended to the end of
// the concurrent queue to the two client-side channels the caller will
// feed off of.
i.wg.Add(1)
go func() {
defer i.wg.Done()
for {
select {
// A new invoice event has been sent by the
// invoiceRegistry! We'll figure out if this is an add
// event or a settle event, then dispatch the event to
// the client.
case ntfn := <-client.ntfnQueue.ChanOut():
invoiceEvent := ntfn.(*invoiceEvent)
var targetChan chan *channeldb.Invoice
state := invoiceEvent.invoice.State
switch {
// AMP invoices never move to settled, but will
// be sent with a set ID if an HTLC set is
// being settled.
case state == channeldb.ContractOpen &&
invoiceEvent.setID != nil:
fallthrough
case state == channeldb.ContractSettled:
targetChan = client.SettledInvoices
case state == channeldb.ContractOpen:
targetChan = client.NewInvoices
default:
log.Errorf("unknown invoice "+
"state: %v", state)
continue
}
select {
case targetChan <- invoiceEvent.invoice:
case <-client.cancelChan:
return
case <-i.quit:
return
}
case <-client.cancelChan:
return
case <-i.quit:
return
}
}
}()
i.Lock()
defer i.Unlock()
// Query the database to see if based on the provided addIndex and
// settledIndex we need to deliver any backlog notifications.
err := i.deliverBacklogEvents(client)
if err != nil {
return nil, err
}
select {
case i.newSubscriptions <- client:
case <-i.quit:
return nil, ErrShuttingDown
}
return client, nil
}
// SubscribeSingleInvoice returns an SingleInvoiceSubscription which allows the
// caller to receive async notifications for a specific invoice.
func (i *InvoiceRegistry) SubscribeSingleInvoice(
hash lntypes.Hash) (*SingleInvoiceSubscription, error) {
client := &SingleInvoiceSubscription{
Updates: make(chan *channeldb.Invoice),
invoiceSubscriptionKit: invoiceSubscriptionKit{
inv: i,
ntfnQueue: queue.NewConcurrentQueue(20),
cancelChan: make(chan struct{}),
},
invoiceRef: channeldb.InvoiceRefByHash(hash),
}
client.ntfnQueue.Start()
i.clientMtx.Lock()
client.id = i.nextClientID
i.nextClientID++
i.clientMtx.Unlock()
// Before we register this new invoice subscription, we'll launch a new
// goroutine that will proxy all notifications appended to the end of
// the concurrent queue to the two client-side channels the caller will
// feed off of.
i.wg.Add(1)
go func() {
defer i.wg.Done()
for {
select {
// A new invoice event has been sent by the
// invoiceRegistry. We will dispatch the event to the
// client.
case ntfn := <-client.ntfnQueue.ChanOut():
invoiceEvent := ntfn.(*invoiceEvent)
select {
case client.Updates <- invoiceEvent.invoice:
case <-client.cancelChan:
return
case <-i.quit:
return
}
case <-client.cancelChan:
return
case <-i.quit:
return
}
}
}()
// Within the lock, we both query the invoice state and pass the client
// subscription to the invoiceEvents channel. This is to make sure that
// the client receives a consistent stream of events.
i.Lock()
defer i.Unlock()
err := i.deliverSingleBacklogEvents(client)
if err != nil {
return nil, err
}
select {
case i.invoiceEvents <- client:
case <-i.quit:
return nil, ErrShuttingDown
}
return client, nil
}
// notifyHodlSubscribers sends out the htlc resolution to all current
// subscribers.
func (i *InvoiceRegistry) notifyHodlSubscribers(htlcResolution HtlcResolution) {
subscribers, ok := i.hodlSubscriptions[htlcResolution.CircuitKey()]
if !ok {
return
}
// Notify all interested subscribers and remove subscription from both
// maps. The subscription can be removed as there only ever will be a
// single resolution for each hash.
for subscriber := range subscribers {
select {
case subscriber <- htlcResolution:
case <-i.quit:
return
}
delete(
i.hodlReverseSubscriptions[subscriber],
htlcResolution.CircuitKey(),
)
}
delete(i.hodlSubscriptions, htlcResolution.CircuitKey())
}
// hodlSubscribe adds a new invoice subscription.
func (i *InvoiceRegistry) hodlSubscribe(subscriber chan<- interface{},
circuitKey channeldb.CircuitKey) {
log.Debugf("Hodl subscribe for %v", circuitKey)
subscriptions, ok := i.hodlSubscriptions[circuitKey]
if !ok {
subscriptions = make(map[chan<- interface{}]struct{})
i.hodlSubscriptions[circuitKey] = subscriptions
}
subscriptions[subscriber] = struct{}{}
reverseSubscriptions, ok := i.hodlReverseSubscriptions[subscriber]
if !ok {
reverseSubscriptions = make(map[channeldb.CircuitKey]struct{})
i.hodlReverseSubscriptions[subscriber] = reverseSubscriptions
}
reverseSubscriptions[circuitKey] = struct{}{}
}
// HodlUnsubscribeAll cancels the subscription.
func (i *InvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {
i.Lock()
defer i.Unlock()
hashes := i.hodlReverseSubscriptions[subscriber]
for hash := range hashes {
delete(i.hodlSubscriptions[hash], subscriber)
}
delete(i.hodlReverseSubscriptions, subscriber)
}
|
package govector
import (
"fmt"
"testing"
"github.com/bmizerany/assert"
)
func TestVectors(t *testing.T) {
x, err := AsVector([]int{2, 2, 2, 4, 2, 5})
assert.Equal(t, nil, err, "Error casting integer array to vector")
w, err := AsVector([]float64{1.0, 1.0, 1.0, 1.0, 1.0, 4.0})
assert.Equal(t, nil, err, "Error casting float64 array to vector")
q, err := AsVector([]float64{0.05, 0.95})
assert.Equal(t, nil, err, "Error casing float64 array to vector")
d_x := x.Diff()
d_w := w.Diff()
max := x.Max()
assert.Equal(t, 5.0, max, "Error calculating max")
min := x.Min()
assert.Equal(t, 2.0, min, "Error calculating min")
empirical := x.Ecdf()
percentile := empirical(2.4)
assert.Equal(t, 2.0/3.0, percentile, "Error in CDF calculation")
_, err = d_x.WeightedMean(d_w)
assert.Equal(t, nil, err, "Error calculating weighted mean")
_ = x.Quantiles(q)
cumsum := x.Cumsum()
assert.Equal(t, Vector{2, 4, 6, 10, 12, 17}, cumsum, "Error calculating cumulative sum")
ranks := x.Rank()
assert.Equal(t, Vector{3, 0, 0, 4, 0, 5}, ranks, "Error calculating ranks")
shuffled := x.Shuffle()
assert.Equal(t, x.Len(), shuffled.Len(), "Error shuffling vector")
y, err := AsVector([]int{-2, 2, -1, 4, 2, 5})
assert.Equal(t, nil, err, "Error casting negative integer array to vector")
abs := y.Abs()
assert.Equal(t, Vector{2, 2, 1, 4, 2, 5}, abs, "Error finding absolute values")
_ = x.Apply(empirical)
n := x.Len()
x.Push(50)
assert.Equal(t, n+1, x.Len(), "Error appending value to vector")
xw := Join(x, w)
assert.Equal(t, x.Len()+w.Len(), xw.Len(), "Error joining vectors")
filtered := xw.Filter(func(x float64) bool {
if x < 10 {
return false
}
return true
})
assert.Equal(t, 12, len(filtered), "Error filtering vector")
z, err := AsVector([]int{0, 2, 4, 6, 8, 10, 12, 14, 16, 18})
assert.Equal(t, nil, err)
smoothed := z.Smooth(0, 0)
assert.Equal(t, z, smoothed)
smoothed = z.Smooth(1, 1)
expected := Vector{1, 2, 4, 6, 8, 10, 12, 14, 16, 17}
assert.Equal(t, expected, smoothed, "Error smoothing vector")
x.Sort()
assert.Equal(t, Vector{2, 2, 2, 2, 4, 5, 50}, x)
}
func TestFixedPush(t *testing.T) {
arr := make([]float64, 3, 3)
v := Vector(arr)
v.PushFixed(5.0)
v.PushFixed(25.0)
v.PushFixed(125.0)
fmt.Printf("%#v\n", v)
assert.Equal(t, v[2], 125.0)
v.PushFixed(250.0)
assert.Equal(t, v[2], 250.0)
assert.Equal(t, v[0], 25.0)
}
Improving test to check array length and removing debug print
package govector
import (
"testing"
"github.com/bmizerany/assert"
)
func TestVectors(t *testing.T) {
x, err := AsVector([]int{2, 2, 2, 4, 2, 5})
assert.Equal(t, nil, err, "Error casting integer array to vector")
w, err := AsVector([]float64{1.0, 1.0, 1.0, 1.0, 1.0, 4.0})
assert.Equal(t, nil, err, "Error casting float64 array to vector")
q, err := AsVector([]float64{0.05, 0.95})
assert.Equal(t, nil, err, "Error casing float64 array to vector")
d_x := x.Diff()
d_w := w.Diff()
max := x.Max()
assert.Equal(t, 5.0, max, "Error calculating max")
min := x.Min()
assert.Equal(t, 2.0, min, "Error calculating min")
empirical := x.Ecdf()
percentile := empirical(2.4)
assert.Equal(t, 2.0/3.0, percentile, "Error in CDF calculation")
_, err = d_x.WeightedMean(d_w)
assert.Equal(t, nil, err, "Error calculating weighted mean")
_ = x.Quantiles(q)
cumsum := x.Cumsum()
assert.Equal(t, Vector{2, 4, 6, 10, 12, 17}, cumsum, "Error calculating cumulative sum")
ranks := x.Rank()
assert.Equal(t, Vector{3, 0, 0, 4, 0, 5}, ranks, "Error calculating ranks")
shuffled := x.Shuffle()
assert.Equal(t, x.Len(), shuffled.Len(), "Error shuffling vector")
y, err := AsVector([]int{-2, 2, -1, 4, 2, 5})
assert.Equal(t, nil, err, "Error casting negative integer array to vector")
abs := y.Abs()
assert.Equal(t, Vector{2, 2, 1, 4, 2, 5}, abs, "Error finding absolute values")
_ = x.Apply(empirical)
n := x.Len()
x.Push(50)
assert.Equal(t, n+1, x.Len(), "Error appending value to vector")
xw := Join(x, w)
assert.Equal(t, x.Len()+w.Len(), xw.Len(), "Error joining vectors")
filtered := xw.Filter(func(x float64) bool {
if x < 10 {
return false
}
return true
})
assert.Equal(t, 12, len(filtered), "Error filtering vector")
z, err := AsVector([]int{0, 2, 4, 6, 8, 10, 12, 14, 16, 18})
assert.Equal(t, nil, err)
smoothed := z.Smooth(0, 0)
assert.Equal(t, z, smoothed)
smoothed = z.Smooth(1, 1)
expected := Vector{1, 2, 4, 6, 8, 10, 12, 14, 16, 17}
assert.Equal(t, expected, smoothed, "Error smoothing vector")
x.Sort()
assert.Equal(t, Vector{2, 2, 2, 2, 4, 5, 50}, x)
}
func TestFixedPush(t *testing.T) {
arr := make([]float64, 3, 3)
v := Vector(arr)
v.PushFixed(5.0)
v.PushFixed(25.0)
v.PushFixed(125.0)
assert.Equal(t, v[2], 125.0)
v.PushFixed(250.0)
assert.Equal(t, v[2], 250.0)
assert.Equal(t, v[0], 25.0)
assert.Equal(t, len(v), 3)
}
|
package e2e
import (
"fmt"
"sync"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
// Reboot all nodes in cluster all at once. Wait for nodes to return. Run nginx
// workload.
func TestReboot(t *testing.T) {
nodeList, err := client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
t.Logf("rebooting %v nodes", len(nodeList.Items))
var wg sync.WaitGroup
for _, node := range nodeList.Items {
wg.Add(1)
go func(node v1.Node) {
defer wg.Done()
if err := newNode(&node).Reboot(); err != nil {
t.Errorf("failed to reboot node: %v", err)
}
}(node)
}
wg.Wait()
if err := nodesReady(client, nodeList, t); err != nil {
t.Fatalf("some or all nodes did not recover from reboot: %v", err)
}
}
// nodesReady blocks until all nodes in list are ready based on Name. Safe
// against new unknown nodes joining while the original set reboots.
func nodesReady(c kubernetes.Interface, expectedNodes *v1.NodeList, t *testing.T) error {
var expectedNodeSet = make(map[string]struct{})
for _, node := range expectedNodes.Items {
expectedNodeSet[node.ObjectMeta.Name] = struct{}{}
}
return retry(80, 5*time.Second, func() error {
list, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
var recoveredNodes int
for _, node := range list.Items {
_, ok := expectedNodeSet[node.ObjectMeta.Name]
if !ok {
t.Logf("unexpected node checked in")
continue
}
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady {
if condition.Status == v1.ConditionTrue {
recoveredNodes++
} else {
return fmt.Errorf("one or more nodes not in the ready state: %v", node.Status.Phase)
}
break
}
}
}
if recoveredNodes != len(expectedNodeSet) {
return fmt.Errorf("not enough nodes recovered, expected %v got %v", len(expectedNodeSet), recoveredNodes)
}
return nil
})
}
e2e/reboot: wait for control plane to recover
package e2e
import (
"fmt"
"log"
"sort"
"strings"
"sync"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
// Reboot all nodes in cluster all at once. Wait for nodes to return. Run nginx
// workload.
func TestReboot(t *testing.T) {
nodeList, err := client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
t.Logf("rebooting %v nodes", len(nodeList.Items))
var wg sync.WaitGroup
for _, node := range nodeList.Items {
wg.Add(1)
go func(node v1.Node) {
defer wg.Done()
if err := newNode(&node).Reboot(); err != nil {
t.Errorf("failed to reboot node: %v", err)
}
}(node)
}
wg.Wait()
if err := nodesReady(client, nodeList, t); err != nil {
t.Fatalf("some or all nodes did not recover from reboot: %v", err)
}
if err := controlPlaneReady(client, 120, 5*time.Second); err != nil {
t.Fatalf("waiting for control plane: %v", err)
}
}
// nodesReady blocks until all nodes in list are ready based on Name. Safe
// against new unknown nodes joining while the original set reboots.
func nodesReady(c kubernetes.Interface, expectedNodes *v1.NodeList, t *testing.T) error {
var expectedNodeSet = make(map[string]struct{})
for _, node := range expectedNodes.Items {
expectedNodeSet[node.ObjectMeta.Name] = struct{}{}
}
return retry(80, 5*time.Second, func() error {
list, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
var recoveredNodes int
for _, node := range list.Items {
_, ok := expectedNodeSet[node.ObjectMeta.Name]
if !ok {
t.Logf("unexpected node checked in")
continue
}
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady {
if condition.Status == v1.ConditionTrue {
recoveredNodes++
} else {
return fmt.Errorf("one or more nodes not in the ready state: %v", node.Status.Phase)
}
break
}
}
}
if recoveredNodes != len(expectedNodeSet) {
return fmt.Errorf("not enough nodes recovered, expected %v got %v", len(expectedNodeSet), recoveredNodes)
}
return nil
})
}
const checkpointAnnotation = "checkpointer.alpha.coreos.com/checkpoint-of"
// controlPlaneReady waits for API server availability and no checkpointed pods
// in kube-system.
func controlPlaneReady(c kubernetes.Interface, attempts int, backoff time.Duration) error {
return retry(attempts, backoff, func() error {
pods, err := c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("get pods in kube-system: %v", err)
}
// list of pods that are checkpoint pods, not the real pods.
var (
waitablePods []string
regularPods []string
)
// only wait on Pods that have lack a parent, or have a non-runnning parent
for _, pod := range pods.Items {
if checkpointedPodName, ok := pod.Annotations[checkpointAnnotation]; ok {
foundParent := false
for _, possibleParentPod := range pods.Items {
if possibleParentPod.Name == checkpointedPodName {
foundParent = possibleParentPod.Status.Phase == "Running"
break
}
}
if !foundParent {
waitablePods = append(waitablePods, pod.Name)
}
} else {
regularPods = append(regularPods, pod.Name)
}
}
if len(waitablePods) > 0 {
sort.Strings(waitablePods)
sort.Strings(regularPods)
waitablePodsStr := strings.Join(waitablePods, ",")
regularPodsStr := strings.Join(regularPods, ",")
log.Printf("waiting for control plane: running non-checkpoint pods: %s", regularPodsStr)
return fmt.Errorf("waiting for control plane: waiting on checkpointed pods: %s", waitablePodsStr)
}
return nil
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.