CombinedText stringlengths 4 3.42M |
|---|
package safefile
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
)
func ensureFileContains(name, data string) error {
b, err := ioutil.ReadFile(name)
if err != nil {
return err
}
if string(b) != data {
return fmt.Errorf("wrong data in file: expected %s, got %s", data, string(b))
}
return nil
}
func tempFileName(count int) string {
return filepath.Join(os.TempDir(), fmt.Sprintf("safefile-test-%d-%x", count, time.Now().UnixNano()))
}
var testData = "Hello, this is a test data"
func testInTempDir() error {
name := tempFileName(0)
defer os.Remove(name)
f, err := Create(name, 0666)
if err != nil {
return err
}
if name != f.OrigName() {
f.Close()
return fmt.Errorf("name %q differs from OrigName: %q", name, f.OrigName())
}
_, err = io.WriteString(f, testData)
if err != nil {
f.Close()
return err
}
err = f.Commit()
if err != nil {
return err
}
err = f.Close()
if err != nil {
return err
}
return ensureFileContains(name, testData)
}
func TestFile(t *testing.T) {
err := testInTempDir()
if err != nil {
t.Fatalf("%s", err)
}
}
func TestWriteFile(t *testing.T) {
name := tempFileName(1)
err := WriteFile(name, []byte(testData), 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = ensureFileContains(name, testData)
if err != nil {
os.Remove(name)
t.Fatalf("%s", err)
}
os.Remove(name)
}
func TestAbandon(t *testing.T) {
name := tempFileName(3)
f, err := Create(name, 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = f.Close()
if err != nil {
t.Fatalf("Abandon failed: %s", err)
}
// Make sure temporary file doesn't exist.
_, err = os.Stat(f.Name())
if err != nil && !os.IsNotExist(err) {
t.Fatalf("%s", err)
}
}
func TestDoubleCommit(t *testing.T) {
name := tempFileName(3)
f, err := Create(name, 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = f.Commit()
if err != nil {
os.Remove(name)
t.Fatalf("First commit failed: %s", err)
}
err = f.Commit()
if err != ErrAlreadyCommitted {
os.Remove(name)
t.Fatalf("Second commit didn't fail: %s", err)
}
err = f.Close()
if err != nil {
os.Remove(name)
t.Fatalf("Close failed: %s", err)
}
os.Remove(name)
}
Close file in test.
package safefile
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
)
func ensureFileContains(name, data string) error {
b, err := ioutil.ReadFile(name)
if err != nil {
return err
}
if string(b) != data {
return fmt.Errorf("wrong data in file: expected %s, got %s", data, string(b))
}
return nil
}
func tempFileName(count int) string {
return filepath.Join(os.TempDir(), fmt.Sprintf("safefile-test-%d-%x", count, time.Now().UnixNano()))
}
var testData = "Hello, this is a test data"
func testInTempDir() error {
name := tempFileName(0)
defer os.Remove(name)
f, err := Create(name, 0666)
if err != nil {
return err
}
if name != f.OrigName() {
f.Close()
return fmt.Errorf("name %q differs from OrigName: %q", name, f.OrigName())
}
_, err = io.WriteString(f, testData)
if err != nil {
f.Close()
return err
}
err = f.Commit()
if err != nil {
f.Close()
return err
}
err = f.Close()
if err != nil {
return err
}
return ensureFileContains(name, testData)
}
func TestFile(t *testing.T) {
err := testInTempDir()
if err != nil {
t.Fatalf("%s", err)
}
}
func TestWriteFile(t *testing.T) {
name := tempFileName(1)
err := WriteFile(name, []byte(testData), 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = ensureFileContains(name, testData)
if err != nil {
os.Remove(name)
t.Fatalf("%s", err)
}
os.Remove(name)
}
func TestAbandon(t *testing.T) {
name := tempFileName(3)
f, err := Create(name, 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = f.Close()
if err != nil {
t.Fatalf("Abandon failed: %s", err)
}
// Make sure temporary file doesn't exist.
_, err = os.Stat(f.Name())
if err != nil && !os.IsNotExist(err) {
t.Fatalf("%s", err)
}
}
func TestDoubleCommit(t *testing.T) {
name := tempFileName(3)
f, err := Create(name, 0666)
if err != nil {
t.Fatalf("%s", err)
}
err = f.Commit()
if err != nil {
os.Remove(name)
t.Fatalf("First commit failed: %s", err)
}
err = f.Commit()
if err != ErrAlreadyCommitted {
os.Remove(name)
t.Fatalf("Second commit didn't fail: %s", err)
}
err = f.Close()
if err != nil {
os.Remove(name)
t.Fatalf("Close failed: %s", err)
}
os.Remove(name)
}
|
package transactionpool
import (
"testing"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// addSiacoinTransactionToPool creates a transaction with siacoin outputs and
// adds them to the pool, returning the transaction.
func (tpt *tpoolTester) addSiacoinTransactionToPool() (txn types.Transaction) {
// spendCoins will automatically add transaction(s) to the transaction pool.
// They will contain siacoin output(s).
txn, err := tpt.spendCoins(types.NewCurrency64(1), types.ZeroUnlockHash)
if err != nil {
tpt.t.Fatal(err)
}
return
}
// addDependentSiacoinTransactionToPool adds a transaction to the pool with a
// siacoin output, and then adds a second transaction to the pool that requires
// the unconfirmed siacoin output.
func (tpt *tpoolTester) addDependentSiacoinTransactionToPool() (firstTxn, dependentTxn types.Transaction) {
// Get an address to receive coins.
addr, _, err := tpt.wallet.CoinAddress()
if err != nil {
tpt.t.Fatal(err)
}
// spendCoins will automatically add transaction(s) to the transaction
// pool. They will contain siacoin output(s). We send all of our coins to
// ourself to guarantee that the next transaction will depend on an
// existing unconfirmed transaction.
balance := tpt.wallet.Balance(false)
firstTxn, err = tpt.spendCoins(balance, addr)
if err != nil {
tpt.t.Fatal(err)
}
// Send the full balance to ourselves again. The second transaction will
// necesarily require the first transaction as a dependency, since we're
// sending all of the coins again.
dependentTxn, err = tpt.spendCoins(balance, addr)
if err != nil {
tpt.t.Fatal(err)
}
return
}
// TestAddSiacoinTransactionToPool creates a tpoolTester and uses it to call
// addSiacoinTransactionToPool.
func TestAddSiacoinTransactionToPool(t *testing.T) {
tpt := newTpoolTester("TestAddSiacoinTransactionToPool", t)
tpt.addSiacoinTransactionToPool()
}
// TestAddDependentSiacoinTransactionToPool creates a tpoolTester and uses it
// to cal addDependentSiacoinTransactionToPool.
func TestAddDependentSiacoinTransactionToPool(t *testing.T) {
tpt := newTpoolTester("TestAddDependentSiacoinTransactionToPool", t)
tpt.addDependentSiacoinTransactionToPool()
}
// TestDuplicateTransaction checks that a duplicate transaction error is
// triggered when duplicate transactions are added to the transaction pool.
// This test won't be needed after the duplication prevention mechanism is
// removed, and that will be removed after fees are required in all
// transactions submitted to the pool.
func TestDuplicateTransaction(t *testing.T) {
tpt := newTpoolTester("TestDuplicateTransaction", t)
txn := tpt.addSiacoinTransactionToPool()
err := tpt.tpool.AcceptTransaction(txn)
if err != modules.ErrTransactionPoolDuplicate {
t.Fatal("expecting ErrDuplicate got:", err)
}
}
tpool test fix
package transactionpool
import (
"testing"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// addSiacoinTransactionToPool creates a transaction with siacoin outputs and
// adds them to the pool, returning the transaction.
func (tpt *tpoolTester) addSiacoinTransactionToPool() (txn types.Transaction) {
// spendCoins will automatically add transaction(s) to the transaction pool.
// They will contain siacoin output(s).
txn, err := tpt.spendCoins(types.NewCurrency64(1), types.ZeroUnlockHash)
if err != nil {
tpt.t.Fatal(err)
}
return
}
// addDependentSiacoinTransactionToPool adds a transaction to the pool with a
// siacoin output, and then adds a second transaction to the pool that requires
// the unconfirmed siacoin output.
func (tpt *tpoolTester) addDependentSiacoinTransactionToPool() (firstTxn, dependentTxn types.Transaction) {
// Get an address to receive coins.
addr, _, err := tpt.wallet.CoinAddress(false) // false means hide the address from the user; doesn't matter for test.
if err != nil {
tpt.t.Fatal(err)
}
// spendCoins will automatically add transaction(s) to the transaction
// pool. They will contain siacoin output(s). We send all of our coins to
// ourself to guarantee that the next transaction will depend on an
// existing unconfirmed transaction.
balance := tpt.wallet.Balance(false)
firstTxn, err = tpt.spendCoins(balance, addr)
if err != nil {
tpt.t.Fatal(err)
}
// Send the full balance to ourselves again. The second transaction will
// necesarily require the first transaction as a dependency, since we're
// sending all of the coins again.
dependentTxn, err = tpt.spendCoins(balance, addr)
if err != nil {
tpt.t.Fatal(err)
}
return
}
// TestAddSiacoinTransactionToPool creates a tpoolTester and uses it to call
// addSiacoinTransactionToPool.
func TestAddSiacoinTransactionToPool(t *testing.T) {
tpt := newTpoolTester("TestAddSiacoinTransactionToPool", t)
tpt.addSiacoinTransactionToPool()
}
// TestAddDependentSiacoinTransactionToPool creates a tpoolTester and uses it
// to cal addDependentSiacoinTransactionToPool.
func TestAddDependentSiacoinTransactionToPool(t *testing.T) {
tpt := newTpoolTester("TestAddDependentSiacoinTransactionToPool", t)
tpt.addDependentSiacoinTransactionToPool()
}
// TestDuplicateTransaction checks that a duplicate transaction error is
// triggered when duplicate transactions are added to the transaction pool.
// This test won't be needed after the duplication prevention mechanism is
// removed, and that will be removed after fees are required in all
// transactions submitted to the pool.
func TestDuplicateTransaction(t *testing.T) {
tpt := newTpoolTester("TestDuplicateTransaction", t)
txn := tpt.addSiacoinTransactionToPool()
err := tpt.tpool.AcceptTransaction(txn)
if err != modules.ErrTransactionPoolDuplicate {
t.Fatal("expecting ErrDuplicate got:", err)
}
}
|
package schema
import (
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"sort"
"strings"
)
//go:generate msgp
// MetricData contains all metric metadata and a datapoint
type MetricData struct {
Id string `json:"id"`
OrgId int `json:"org_id"`
Name string `json:"name"`
Metric string `json:"metric"`
Interval int `json:"interval"`
Value float64 `json:"value"`
Unit string `json:"unit"`
Time int64 `json:"time"`
TargetType string `json:"target_type"`
Tags []string `json:"tags" elastic:"type:string,index:not_analyzed"`
}
// returns a id (hash key) in the format OrgId.md5Sum
// the md5sum is a hash of the the concatination of the
// series name + each tag key:value pair, sorted alphabetically.
func (m *MetricData) SetId() {
var buffer bytes.Buffer
buffer.WriteString(m.Name)
sort.Strings(m.Tags)
for _, k := range m.Tags {
buffer.WriteString(fmt.Sprintf(";%s", k))
}
m.Id = fmt.Sprintf("%d.%x", m.OrgId, md5.Sum(buffer.Bytes()))
}
// can be used by some encoders, such as msgp
type MetricDataArray []*MetricData
// for ES
type MetricDefinition struct {
Id string `json:"id"`
OrgId int `json:"org_id"`
Name string `json:"name" elastic:"type:string,index:not_analyzed"` // graphite format
Metric string `json:"metric"` // kairosdb format (like graphite, but not including some tags)
Interval int `json:"interval"` // minimum 10
Unit string `json:"unit"`
TargetType string `json:"target_type"` // an emum ["derive","gauge"] in nodejs
Tags []string `json:"tags" elastic:"type:string,index:not_analyzed"`
LastUpdate int64 `json:"lastUpdate"` // unix epoch time, per the nodejs definition
Nodes map[string]string `json:"nodes"`
NodeCount int `json:"node_count"`
}
func (m *MetricDefinition) SetId() {
var buffer bytes.Buffer
buffer.WriteString(m.Name)
sort.Strings(m.Tags)
for _, k := range m.Tags {
buffer.WriteString(fmt.Sprintf(";%s", k))
}
m.Id = fmt.Sprintf("%d.%x", m.OrgId, md5.Sum(buffer.Bytes()))
}
func (m *MetricDefinition) Validate() error {
if m.Name == "" || m.OrgId == 0 || m.Interval == 0 {
// TODO: this error message ought to be more informative
err := fmt.Errorf("metric is not valid!")
return err
}
return nil
}
func MetricDefinitionFromJSON(b []byte) (*MetricDefinition, error) {
def := new(MetricDefinition)
if err := json.Unmarshal(b, &def); err != nil {
return nil, err
}
return def, nil
}
// MetricDefinitionFromMetricData yields a MetricDefinition that has no references
// to the original MetricData
func MetricDefinitionFromMetricData(d *MetricData) *MetricDefinition {
nodesMap := make(map[string]string)
nodes := strings.Split(d.Name, ".")
for i, n := range nodes {
key := fmt.Sprintf("n%d", i)
nodesMap[key] = n
}
tags := make([]string, len(d.Tags))
copy(tags, d.Tags)
return &MetricDefinition{
Id: d.Id,
Name: d.Name,
OrgId: d.OrgId,
Metric: d.Metric,
TargetType: d.TargetType,
Interval: d.Interval,
LastUpdate: d.Time,
Unit: d.Unit,
Tags: tags,
Nodes: nodesMap,
NodeCount: len(nodes),
}
}
clearer MetricDefinition validation output
package schema
import (
"bytes"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
)
var errInvalidIntervalzero = errors.New("interval cannot be 0")
var errInvalidOrgIdzero = errors.New("org-id cannot be 0")
var errInvalidEmptyName = errors.New("name cannot be empty")
//go:generate msgp
// MetricData contains all metric metadata and a datapoint
type MetricData struct {
Id string `json:"id"`
OrgId int `json:"org_id"`
Name string `json:"name"`
Metric string `json:"metric"`
Interval int `json:"interval"`
Value float64 `json:"value"`
Unit string `json:"unit"`
Time int64 `json:"time"`
TargetType string `json:"target_type"`
Tags []string `json:"tags" elastic:"type:string,index:not_analyzed"`
}
// returns a id (hash key) in the format OrgId.md5Sum
// the md5sum is a hash of the the concatination of the
// series name + each tag key:value pair, sorted alphabetically.
func (m *MetricData) SetId() {
var buffer bytes.Buffer
buffer.WriteString(m.Name)
sort.Strings(m.Tags)
for _, k := range m.Tags {
buffer.WriteString(fmt.Sprintf(";%s", k))
}
m.Id = fmt.Sprintf("%d.%x", m.OrgId, md5.Sum(buffer.Bytes()))
}
// can be used by some encoders, such as msgp
type MetricDataArray []*MetricData
// for ES
type MetricDefinition struct {
Id string `json:"id"`
OrgId int `json:"org_id"`
Name string `json:"name" elastic:"type:string,index:not_analyzed"` // graphite format
Metric string `json:"metric"` // kairosdb format (like graphite, but not including some tags)
Interval int `json:"interval"` // minimum 10
Unit string `json:"unit"`
TargetType string `json:"target_type"` // an emum ["derive","gauge"] in nodejs
Tags []string `json:"tags" elastic:"type:string,index:not_analyzed"`
LastUpdate int64 `json:"lastUpdate"` // unix epoch time, per the nodejs definition
Nodes map[string]string `json:"nodes"`
NodeCount int `json:"node_count"`
}
func (m *MetricDefinition) SetId() {
var buffer bytes.Buffer
buffer.WriteString(m.Name)
sort.Strings(m.Tags)
for _, k := range m.Tags {
buffer.WriteString(fmt.Sprintf(";%s", k))
}
m.Id = fmt.Sprintf("%d.%x", m.OrgId, md5.Sum(buffer.Bytes()))
}
func (m *MetricDefinition) Validate() error {
if m.OrgId == 0 {
return errInvalidOrgIdzero
}
if m.Interval == 0 {
return errInvalidIntervalzero
}
if m.Name == "" {
return errInvalidEmptyName
}
return nil
}
func MetricDefinitionFromJSON(b []byte) (*MetricDefinition, error) {
def := new(MetricDefinition)
if err := json.Unmarshal(b, &def); err != nil {
return nil, err
}
return def, nil
}
// MetricDefinitionFromMetricData yields a MetricDefinition that has no references
// to the original MetricData
func MetricDefinitionFromMetricData(d *MetricData) *MetricDefinition {
nodesMap := make(map[string]string)
nodes := strings.Split(d.Name, ".")
for i, n := range nodes {
key := fmt.Sprintf("n%d", i)
nodesMap[key] = n
}
tags := make([]string, len(d.Tags))
copy(tags, d.Tags)
return &MetricDefinition{
Id: d.Id,
Name: d.Name,
OrgId: d.OrgId,
Metric: d.Metric,
TargetType: d.TargetType,
Interval: d.Interval,
LastUpdate: d.Time,
Unit: d.Unit,
Tags: tags,
Nodes: nodesMap,
NodeCount: len(nodes),
}
}
|
package postgis
import (
"encoding/json"
"errors"
"fmt"
"database/sql"
gostErrors "github.com/geodan/gost/src/errors"
"github.com/geodan/gost/src/sensorthings/entities"
"github.com/geodan/gost/src/sensorthings/odata"
)
var totalDatastreams int
var dsMapping = map[string]string{"observedArea": "public.ST_AsGeoJSON(datastream.observedarea) AS observedarea"}
// GetTotalDatastreams returns the amount of datastreams in the database
func (gdb *GostDatabase) GetTotalDatastreams() int {
return totalDatastreams
}
// GetDatastream retrieves a datastream by id
func (gdb *GostDatabase) GetDatastream(id interface{}, qo *odata.QueryOptions) (*entities.Datastream, error) {
intID, ok := ToIntID(id)
if !ok {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "", "", dsMapping)+" FROM %s.datastream where id = %v", gdb.Schema, intID)
datastream, err := processDatastream(gdb.Db, sql, qo)
if err != nil {
return nil, err
}
return datastream, nil
}
// GetDatastreams retrieves all datastreams
func (gdb *GostDatabase) GetDatastreams(qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "", "", dsMapping)+" FROM %s.datastream order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream", gdb.Schema)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamByObservation retrieves a datastream linked to the given observation
func (gdb *GostDatabase) GetDatastreamByObservation(observationID interface{}, qo *odata.QueryOptions) (*entities.Datastream, error) {
tID, ok := ToIntID(observationID)
if !ok {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.observation on datastream.id = observation.stream_id where observation.id = %v", gdb.Schema, gdb.Schema, tID)
return processDatastream(gdb.Db, sql, qo)
}
// GetDatastreamsByThing retrieves all datastreams linked to the given thing
func (gdb *GostDatabase) GetDatastreamsByThing(thingID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(thingID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.thing on thing.id = datastream.thing_id where thing.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.thing on thing.id = datastream.thing_id where thing.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamsBySensor retrieves all datastreams linked to the given sensor
func (gdb *GostDatabase) GetDatastreamsBySensor(sensorID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(sensorID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.sensor on sensor.id = datastream.sensor_id where sensor.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.sensor on sensor.id = datastream.sensor_id where sensor.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamsByObservedProperty retrieves all datastreams linked to the given ObservedProerty
func (gdb *GostDatabase) GetDatastreamsByObservedProperty(oID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(oID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.observedproperty on observedproperty.id = datastream.observedproperty_id where observedproperty.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
CountSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.observedproperty on observedproperty.id = datastream.observedproperty_id where observedproperty.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, CountSQL)
}
func processDatastream(db *sql.DB, sql string, qo *odata.QueryOptions) (*entities.Datastream, error) {
datastreams, _, err := processDatastreams(db, sql, qo, "")
if err != nil {
return nil, err
}
if len(datastreams) == 0 {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
return datastreams[0], nil
}
func processDatastreams(db *sql.DB, sql string, qo *odata.QueryOptions, countSQL string) ([]*entities.Datastream, int, error) {
rows, err := db.Query(sql)
defer rows.Close()
if err != nil {
return nil, 0, err
}
var datastreams = []*entities.Datastream{}
for rows.Next() {
var id interface{}
var name, description, unitofmeasurement string
var observedarea *string
var ot int
var params []interface{}
var qp []string
if qo == nil || qo.QuerySelect == nil || len(qo.QuerySelect.Params) == 0 {
d := &entities.Datastream{}
qp = d.GetPropertyNames()
} else {
qp = qo.QuerySelect.Params
}
for _, p := range qp {
if p == "id" {
params = append(params, &id)
}
if p == "name" {
params = append(params, &name)
}
if p == "description" {
params = append(params, &description)
}
if p == "unitOfMeasurement" {
params = append(params, &unitofmeasurement)
}
if p == "observationType" {
params = append(params, &ot)
}
if p == "observedArea" {
params = append(params, &observedarea)
}
}
err = rows.Scan(params...)
unitOfMeasurementMap, err := JSONToMap(&unitofmeasurement)
if err != nil {
return nil, 0, err
}
observedAreaMap, err := JSONToMap(observedarea)
if err != nil {
return nil, 0, err
}
datastream := entities.Datastream{}
datastream.ID = id
datastream.Name = name
datastream.Description = description
datastream.UnitOfMeasurement = unitOfMeasurementMap
datastream.ObservedArea = observedAreaMap
if ot != 0 {
obs, _ := entities.GetObservationTypeByID(ot)
datastream.ObservationType = obs.Value
}
datastreams = append(datastreams, &datastream)
}
var count int
if len(countSQL) > 0 {
db.QueryRow(countSQL).Scan(&count)
}
return datastreams, count, nil
}
// PostDatastream todo
// TODO: !!!!ADD phenomenonTime SUPPORT!!!!
// TODO: !!!!ADD resulttime SUPPORT!!!!
func (gdb *GostDatabase) PostDatastream(d *entities.Datastream) (*entities.Datastream, error) {
var dsID, tID, sID, oID int
var ok bool
if tID, ok = ToIntID(d.Thing.ID); !ok || !gdb.ThingExists(tID) {
return nil, gostErrors.NewBadRequestError(errors.New("Thing does not exist"))
}
if sID, ok = ToIntID(d.Sensor.ID); !ok || !gdb.SensorExists(sID) {
return nil, gostErrors.NewBadRequestError(errors.New("Sensor does not exist"))
}
if oID, ok = ToIntID(d.ObservedProperty.ID); !ok || !gdb.ObservedPropertyExists(oID) {
return nil, gostErrors.NewBadRequestError(errors.New("ObservedProperty does not exist"))
}
unitOfMeasurement, _ := json.Marshal(d.UnitOfMeasurement)
geom := "NULL"
if len(d.ObservedArea) != 0 {
observedAreaBytes, _ := json.Marshal(d.ObservedArea)
geom = fmt.Sprintf("ST_SetSRID(ST_GeomFromGeoJSON('%s'),4326)", string(observedAreaBytes[:]))
}
// get the ObservationType id in the lookup table
observationType, err := entities.GetObservationTypeByValue(d.ObservationType)
if err != nil {
return nil, gostErrors.NewBadRequestError(errors.New("ObservationType does not exist"))
}
sql := fmt.Sprintf("INSERT INTO %s.datastream (name, description, unitofmeasurement, observedarea, thing_id, sensor_id, observedproperty_id, observationtype) VALUES ($1, $2, $3, %s, $4, $5, $6, $7) RETURNING id", gdb.Schema, geom)
err = gdb.Db.QueryRow(sql, d.Name, d.Description, unitOfMeasurement, tID, sID, oID, observationType.Code).Scan(&dsID)
if err != nil {
return nil, err
}
d.ID = dsID
// clear inner entities to serves links upon response
d.Thing = nil
d.Sensor = nil
d.ObservedProperty = nil
totalDatastreams++
return d, nil
}
// PatchDatastream updates a Datastream in the database
func (gdb *GostDatabase) PatchDatastream(id interface{}, ds *entities.Datastream) (*entities.Datastream, error) {
var err error
var ok bool
var intID int
updates := make(map[string]interface{})
if intID, ok = ToIntID(id); !ok || !gdb.DatastreamExists(intID) {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
if len(ds.Name) > 0 {
updates["name"] = ds.Name
}
if len(ds.Description) > 0 {
updates["description"] = ds.Description
}
if len(ds.ObservationType) > 0 {
observationType, err := entities.GetObservationTypeByValue(ds.ObservationType)
if err != nil {
return nil, gostErrors.NewBadRequestError(errors.New("ObservationType does not exist"))
}
updates["observationtype"] = observationType.Code
}
if len(ds.UnitOfMeasurement) > 0 {
j, _ := json.Marshal(ds.UnitOfMeasurement)
updates["unitofmeasurement"] = string(j[:])
}
if len(ds.ObservedArea) > 0 {
observedAreaBytes, _ := json.Marshal(ds.ObservedArea)
updates["observedarea"] = fmt.Sprintf("ST_SetSRID(ST_GeomFromGeoJSON('%s'),4326)", string(observedAreaBytes[:]))
}
if err = gdb.updateEntityColumns("datastream", updates, intID); err != nil {
return nil, err
}
nd, _ := gdb.GetDatastream(intID, nil)
return nd, nil
}
// DeleteDatastream tries to delete a Datastream by the given id
func (gdb *GostDatabase) DeleteDatastream(id interface{}) error {
intID, ok := ToIntID(id)
if !ok {
return gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
r, err := gdb.Db.Exec(fmt.Sprintf("DELETE FROM %s.datastream WHERE id = $1", gdb.Schema), intID)
if err != nil {
return err
}
if c, _ := r.RowsAffected(); c == 0 {
return gostErrors.NewRequestNotFound(errors.New("Datastream not found"))
}
totalDatastreams--
return nil
}
// DatastreamExists checks if a Datastream is present in the database based on a given id
func (gdb *GostDatabase) DatastreamExists(databaseID int) bool {
var result bool
sql := fmt.Sprintf("SELECT exists (SELECT 1 FROM %s.datastream WHERE id = $1 LIMIT 1)", gdb.Schema)
err := gdb.Db.QueryRow(sql, databaseID).Scan(&result)
if err != nil {
return false
}
return result
}
start datastream.observedArea
package postgis
import (
"encoding/json"
"errors"
"fmt"
"database/sql"
gostErrors "github.com/geodan/gost/src/errors"
"github.com/geodan/gost/src/sensorthings/entities"
"github.com/geodan/gost/src/sensorthings/odata"
)
var totalDatastreams int
var dsMapping = map[string]string{"observedArea": "public.ST_AsGeoJSON(datastream.observedarea) AS observedarea"}
// GetTotalDatastreams returns the amount of datastreams in the database
func (gdb *GostDatabase) GetTotalDatastreams() int {
return totalDatastreams
}
// GetObservedArea returns the observed area of all observations of datastream
func (gdb *GostDatabase) GetObservedArea(id int) (map[string]interface{}, error) {
sqlString := "select ST_AsGeoJSON(ST_ConvexHull(ST_Collect(feature))) as geom from %s.featureofinterest where id in (select distinct featureofinterest_id from %s.observation where stream_id=%v)"
sql := fmt.Sprintf(sqlString, gdb.Schema, gdb.Schema, id)
rows, err := gdb.Db.Query(sql)
var geom string
var propMap map[string]interface{}
defer rows.Close()
if err != nil {
return nil, err
}
for rows.Next() {
err := rows.Scan(&geom)
if err == nil {
propMap, _ = JSONToMap(&geom)
}
}
return propMap, err
}
// GetDatastream retrieves a datastream by id
func (gdb *GostDatabase) GetDatastream(id interface{}, qo *odata.QueryOptions) (*entities.Datastream, error) {
intID, ok := ToIntID(id)
if !ok {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "", "", dsMapping)+" FROM %s.datastream where id = %v", gdb.Schema, intID)
datastream, err := processDatastream(gdb.Db, sql, qo)
if err != nil {
return nil, err
}
observedArea, _ := gdb.GetObservedArea(intID)
datastream.ObservedArea = observedArea
return datastream, nil
}
// GetDatastreams retrieves all datastreams
func (gdb *GostDatabase) GetDatastreams(qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "", "", dsMapping)+" FROM %s.datastream order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream", gdb.Schema)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamByObservation retrieves a datastream linked to the given observation
func (gdb *GostDatabase) GetDatastreamByObservation(observationID interface{}, qo *odata.QueryOptions) (*entities.Datastream, error) {
tID, ok := ToIntID(observationID)
if !ok {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.observation on datastream.id = observation.stream_id where observation.id = %v", gdb.Schema, gdb.Schema, tID)
return processDatastream(gdb.Db, sql, qo)
}
// GetDatastreamsByThing retrieves all datastreams linked to the given thing
func (gdb *GostDatabase) GetDatastreamsByThing(thingID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(thingID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.thing on thing.id = datastream.thing_id where thing.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.thing on thing.id = datastream.thing_id where thing.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamsBySensor retrieves all datastreams linked to the given sensor
func (gdb *GostDatabase) GetDatastreamsBySensor(sensorID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(sensorID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.sensor on sensor.id = datastream.sensor_id where sensor.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
countSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.sensor on sensor.id = datastream.sensor_id where sensor.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, countSQL)
}
// GetDatastreamsByObservedProperty retrieves all datastreams linked to the given ObservedProerty
func (gdb *GostDatabase) GetDatastreamsByObservedProperty(oID interface{}, qo *odata.QueryOptions) ([]*entities.Datastream, int, error) {
intID, ok := ToIntID(oID)
if !ok {
return nil, 0, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
sql := fmt.Sprintf("select "+CreateSelectString(&entities.Datastream{}, qo, "datastream.", "", dsMapping)+" FROM %s.datastream inner join %s.observedproperty on observedproperty.id = datastream.observedproperty_id where observedproperty.id = %v order by id desc "+CreateTopSkipQueryString(qo), gdb.Schema, gdb.Schema, intID)
CountSQL := fmt.Sprintf("select COUNT(*) FROM %s.datastream inner join %s.observedproperty on observedproperty.id = datastream.observedproperty_id where observedproperty.id = %v", gdb.Schema, gdb.Schema, intID)
return processDatastreams(gdb.Db, sql, qo, CountSQL)
}
func processDatastream(db *sql.DB, sql string, qo *odata.QueryOptions) (*entities.Datastream, error) {
datastreams, _, err := processDatastreams(db, sql, qo, "")
if err != nil {
return nil, err
}
if len(datastreams) == 0 {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
return datastreams[0], nil
}
func processDatastreams(db *sql.DB, sql string, qo *odata.QueryOptions, countSQL string) ([]*entities.Datastream, int, error) {
rows, err := db.Query(sql)
defer rows.Close()
if err != nil {
return nil, 0, err
}
var datastreams = []*entities.Datastream{}
for rows.Next() {
var id interface{}
var name, description, unitofmeasurement string
var observedarea *string
var ot int
var params []interface{}
var qp []string
if qo == nil || qo.QuerySelect == nil || len(qo.QuerySelect.Params) == 0 {
d := &entities.Datastream{}
qp = d.GetPropertyNames()
} else {
qp = qo.QuerySelect.Params
}
for _, p := range qp {
if p == "id" {
params = append(params, &id)
}
if p == "name" {
params = append(params, &name)
}
if p == "description" {
params = append(params, &description)
}
if p == "unitOfMeasurement" {
params = append(params, &unitofmeasurement)
}
if p == "observationType" {
params = append(params, &ot)
}
if p == "observedArea" {
params = append(params, &observedarea)
}
}
err = rows.Scan(params...)
unitOfMeasurementMap, err := JSONToMap(&unitofmeasurement)
if err != nil {
return nil, 0, err
}
observedAreaMap, err := JSONToMap(observedarea)
if err != nil {
return nil, 0, err
}
datastream := entities.Datastream{}
datastream.ID = id
datastream.Name = name
datastream.Description = description
datastream.UnitOfMeasurement = unitOfMeasurementMap
datastream.ObservedArea = observedAreaMap
if ot != 0 {
obs, _ := entities.GetObservationTypeByID(ot)
datastream.ObservationType = obs.Value
}
datastreams = append(datastreams, &datastream)
}
var count int
if len(countSQL) > 0 {
db.QueryRow(countSQL).Scan(&count)
}
return datastreams, count, nil
}
// PostDatastream todo
// TODO: !!!!ADD phenomenonTime SUPPORT!!!!
// TODO: !!!!ADD resulttime SUPPORT!!!!
func (gdb *GostDatabase) PostDatastream(d *entities.Datastream) (*entities.Datastream, error) {
var dsID, tID, sID, oID int
var ok bool
if tID, ok = ToIntID(d.Thing.ID); !ok || !gdb.ThingExists(tID) {
return nil, gostErrors.NewBadRequestError(errors.New("Thing does not exist"))
}
if sID, ok = ToIntID(d.Sensor.ID); !ok || !gdb.SensorExists(sID) {
return nil, gostErrors.NewBadRequestError(errors.New("Sensor does not exist"))
}
if oID, ok = ToIntID(d.ObservedProperty.ID); !ok || !gdb.ObservedPropertyExists(oID) {
return nil, gostErrors.NewBadRequestError(errors.New("ObservedProperty does not exist"))
}
unitOfMeasurement, _ := json.Marshal(d.UnitOfMeasurement)
geom := "NULL"
if len(d.ObservedArea) != 0 {
observedAreaBytes, _ := json.Marshal(d.ObservedArea)
geom = fmt.Sprintf("ST_SetSRID(ST_GeomFromGeoJSON('%s'),4326)", string(observedAreaBytes[:]))
}
// get the ObservationType id in the lookup table
observationType, err := entities.GetObservationTypeByValue(d.ObservationType)
if err != nil {
return nil, gostErrors.NewBadRequestError(errors.New("ObservationType does not exist"))
}
sql := fmt.Sprintf("INSERT INTO %s.datastream (name, description, unitofmeasurement, observedarea, thing_id, sensor_id, observedproperty_id, observationtype) VALUES ($1, $2, $3, %s, $4, $5, $6, $7) RETURNING id", gdb.Schema, geom)
err = gdb.Db.QueryRow(sql, d.Name, d.Description, unitOfMeasurement, tID, sID, oID, observationType.Code).Scan(&dsID)
if err != nil {
return nil, err
}
d.ID = dsID
// clear inner entities to serves links upon response
d.Thing = nil
d.Sensor = nil
d.ObservedProperty = nil
totalDatastreams++
return d, nil
}
// PatchDatastream updates a Datastream in the database
func (gdb *GostDatabase) PatchDatastream(id interface{}, ds *entities.Datastream) (*entities.Datastream, error) {
var err error
var ok bool
var intID int
updates := make(map[string]interface{})
if intID, ok = ToIntID(id); !ok || !gdb.DatastreamExists(intID) {
return nil, gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
if len(ds.Name) > 0 {
updates["name"] = ds.Name
}
if len(ds.Description) > 0 {
updates["description"] = ds.Description
}
if len(ds.ObservationType) > 0 {
observationType, err := entities.GetObservationTypeByValue(ds.ObservationType)
if err != nil {
return nil, gostErrors.NewBadRequestError(errors.New("ObservationType does not exist"))
}
updates["observationtype"] = observationType.Code
}
if len(ds.UnitOfMeasurement) > 0 {
j, _ := json.Marshal(ds.UnitOfMeasurement)
updates["unitofmeasurement"] = string(j[:])
}
if len(ds.ObservedArea) > 0 {
observedAreaBytes, _ := json.Marshal(ds.ObservedArea)
updates["observedarea"] = fmt.Sprintf("ST_SetSRID(ST_GeomFromGeoJSON('%s'),4326)", string(observedAreaBytes[:]))
}
if err = gdb.updateEntityColumns("datastream", updates, intID); err != nil {
return nil, err
}
nd, _ := gdb.GetDatastream(intID, nil)
return nd, nil
}
// DeleteDatastream tries to delete a Datastream by the given id
func (gdb *GostDatabase) DeleteDatastream(id interface{}) error {
intID, ok := ToIntID(id)
if !ok {
return gostErrors.NewRequestNotFound(errors.New("Datastream does not exist"))
}
r, err := gdb.Db.Exec(fmt.Sprintf("DELETE FROM %s.datastream WHERE id = $1", gdb.Schema), intID)
if err != nil {
return err
}
if c, _ := r.RowsAffected(); c == 0 {
return gostErrors.NewRequestNotFound(errors.New("Datastream not found"))
}
totalDatastreams--
return nil
}
// DatastreamExists checks if a Datastream is present in the database based on a given id
func (gdb *GostDatabase) DatastreamExists(databaseID int) bool {
var result bool
sql := fmt.Sprintf("SELECT exists (SELECT 1 FROM %s.datastream WHERE id = $1 LIMIT 1)", gdb.Schema)
err := gdb.Db.QueryRow(sql, databaseID).Scan(&result)
if err != nil {
return false
}
return result
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkix contains shared, low level structures used for ASN.1 parsing
// and serialization of X.509 certificates, CRL and OCSP.
package pkix
import (
"encoding/asn1"
"encoding/hex"
"fmt"
"math/big"
"time"
)
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.1.1.2.
type AlgorithmIdentifier struct {
Algorithm asn1.ObjectIdentifier
Parameters asn1.RawValue `asn1:"optional"`
}
type RDNSequence []RelativeDistinguishedNameSET
var attributeTypeNames = map[string]string{
"2.5.4.6": "C",
"2.5.4.10": "O",
"2.5.4.11": "OU",
"2.5.4.3": "CN",
"2.5.4.5": "SERIALNUMBER",
"2.5.4.7": "L",
"2.5.4.8": "ST",
"2.5.4.9": "STREET",
"2.5.4.17": "POSTALCODE",
}
// String returns a string representation of the sequence r,
// roughly following the RFC 2253 Distinguished Names syntax.
func (r RDNSequence) String() string {
s := ""
for i := 0; i < len(r); i++ {
rdn := r[len(r)-1-i]
if i > 0 {
s += ","
}
for j, tv := range rdn {
if j > 0 {
s += "+"
}
oidString := tv.Type.String()
typeName, ok := attributeTypeNames[oidString]
if !ok {
derBytes, err := asn1.Marshal(tv.Value)
if err == nil {
s += oidString + "=#" + hex.EncodeToString(derBytes)
continue // No value escaping necessary.
}
typeName = oidString
}
valueString := fmt.Sprint(tv.Value)
escaped := make([]rune, 0, len(valueString))
for k, c := range valueString {
escape := false
switch c {
case ',', '+', '"', '\\', '<', '>', ';':
escape = true
case ' ':
escape = k == 0 || k == len(valueString)-1
case '#':
escape = k == 0
}
if escape {
escaped = append(escaped, '\\', c)
} else {
escaped = append(escaped, c)
}
}
s += typeName + "=" + string(escaped)
}
}
return s
}
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
// RFC 5280, Section 4.1.2.4.
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
type AttributeTypeAndValueSET struct {
Type asn1.ObjectIdentifier
Value [][]AttributeTypeAndValue `asn1:"set"`
}
// Extension represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.2.
type Extension struct {
Id asn1.ObjectIdentifier
Critical bool `asn1:"optional"`
Value []byte
}
// Name represents an X.509 distinguished name. This only includes the common
// elements of a DN. When parsing, all elements are stored in Names and
// non-standard elements can be extracted from there. When marshaling, elements
// in ExtraNames are appended and override other values with the same OID.
type Name struct {
Country, Organization, OrganizationalUnit []string
Locality, Province []string
StreetAddress, PostalCode []string
SerialNumber, CommonName string
Names []AttributeTypeAndValue
ExtraNames []AttributeTypeAndValue
}
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
n.Names = append(n.Names, atv)
value, ok := atv.Value.(string)
if !ok {
continue
}
t := atv.Type
if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
switch t[3] {
case 3:
n.CommonName = value
case 5:
n.SerialNumber = value
case 6:
n.Country = append(n.Country, value)
case 7:
n.Locality = append(n.Locality, value)
case 8:
n.Province = append(n.Province, value)
case 9:
n.StreetAddress = append(n.StreetAddress, value)
case 10:
n.Organization = append(n.Organization, value)
case 11:
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
case 17:
n.PostalCode = append(n.PostalCode, value)
}
}
}
}
}
var (
oidCountry = []int{2, 5, 4, 6}
oidOrganization = []int{2, 5, 4, 10}
oidOrganizationalUnit = []int{2, 5, 4, 11}
oidCommonName = []int{2, 5, 4, 3}
oidSerialNumber = []int{2, 5, 4, 5}
oidLocality = []int{2, 5, 4, 7}
oidProvince = []int{2, 5, 4, 8}
oidStreetAddress = []int{2, 5, 4, 9}
oidPostalCode = []int{2, 5, 4, 17}
)
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
// and returns the new value. The relativeDistinguishedNameSET contains an
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
// search for AttributeTypeAndValue.
func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
return in
}
s := make([]AttributeTypeAndValue, len(values))
for i, value := range values {
s[i].Type = oid
s[i].Value = value
}
return append(in, s)
}
func (n Name) ToRDNSequence() (ret RDNSequence) {
ret = n.appendRDNs(ret, n.Country, oidCountry)
ret = n.appendRDNs(ret, n.Province, oidProvince)
ret = n.appendRDNs(ret, n.Locality, oidLocality)
ret = n.appendRDNs(ret, n.StreetAddress, oidStreetAddress)
ret = n.appendRDNs(ret, n.PostalCode, oidPostalCode)
ret = n.appendRDNs(ret, n.Organization, oidOrganization)
ret = n.appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
if len(n.CommonName) > 0 {
ret = n.appendRDNs(ret, []string{n.CommonName}, oidCommonName)
}
if len(n.SerialNumber) > 0 {
ret = n.appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
}
for _, atv := range n.ExtraNames {
ret = append(ret, []AttributeTypeAndValue{atv})
}
return ret
}
// String returns the string form of n, roughly following
// the RFC 2253 Distinguished Names syntax.
func (n Name) String() string {
return n.ToRDNSequence().String()
}
// oidInAttributeTypeAndValue reports whether a type with the given OID exists
// in atv.
func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
for _, a := range atv {
if a.Type.Equal(oid) {
return true
}
}
return false
}
// CertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
// signature.
type CertificateList struct {
TBSCertList TBSCertificateList
SignatureAlgorithm AlgorithmIdentifier
SignatureValue asn1.BitString
}
// HasExpired reports whether certList should have been updated by now.
func (certList *CertificateList) HasExpired(now time.Time) bool {
return !now.Before(certList.TBSCertList.NextUpdate)
}
// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1.
type TBSCertificateList struct {
Raw asn1.RawContent
Version int `asn1:"optional,default:0"`
Signature AlgorithmIdentifier
Issuer RDNSequence
ThisUpdate time.Time
NextUpdate time.Time `asn1:"optional"`
RevokedCertificates []RevokedCertificate `asn1:"optional"`
Extensions []Extension `asn1:"tag:0,optional,explicit"`
}
// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1.
type RevokedCertificate struct {
SerialNumber *big.Int
RevocationTime time.Time
Extensions []Extension `asn1:"optional"`
}
crypto/x509/pkix: improve docs and Name.String()
Previously, non-standard attributes in Name.Names were being
omitted when printed using Name.String(). Now, any non-standard
attributes that would not already be printed in Name.String()
are being added temporarily to Name.ExtraNames to be printed.
Fixes #33094
Fixes #23069
Change-Id: Id9829c20968e16db7194549f69c0eb5985044944
Reviewed-on: https://go-review.googlesource.com/c/go/+/229864
Run-TryBot: Katie Hockman <e321a8fd89c4908465ddaf8d5d0d62480efacb63@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Filippo Valsorda <4f40cda291c5f9634e1affd3db44947af61f705c@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkix contains shared, low level structures used for ASN.1 parsing
// and serialization of X.509 certificates, CRL and OCSP.
package pkix
import (
"encoding/asn1"
"encoding/hex"
"fmt"
"math/big"
"time"
)
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.1.1.2.
type AlgorithmIdentifier struct {
Algorithm asn1.ObjectIdentifier
Parameters asn1.RawValue `asn1:"optional"`
}
type RDNSequence []RelativeDistinguishedNameSET
var attributeTypeNames = map[string]string{
"2.5.4.6": "C",
"2.5.4.10": "O",
"2.5.4.11": "OU",
"2.5.4.3": "CN",
"2.5.4.5": "SERIALNUMBER",
"2.5.4.7": "L",
"2.5.4.8": "ST",
"2.5.4.9": "STREET",
"2.5.4.17": "POSTALCODE",
}
// String returns a string representation of the sequence r,
// roughly following the RFC 2253 Distinguished Names syntax.
func (r RDNSequence) String() string {
s := ""
for i := 0; i < len(r); i++ {
rdn := r[len(r)-1-i]
if i > 0 {
s += ","
}
for j, tv := range rdn {
if j > 0 {
s += "+"
}
oidString := tv.Type.String()
typeName, ok := attributeTypeNames[oidString]
if !ok {
derBytes, err := asn1.Marshal(tv.Value)
if err == nil {
s += oidString + "=#" + hex.EncodeToString(derBytes)
continue // No value escaping necessary.
}
typeName = oidString
}
valueString := fmt.Sprint(tv.Value)
escaped := make([]rune, 0, len(valueString))
for k, c := range valueString {
escape := false
switch c {
case ',', '+', '"', '\\', '<', '>', ';':
escape = true
case ' ':
escape = k == 0 || k == len(valueString)-1
case '#':
escape = k == 0
}
if escape {
escaped = append(escaped, '\\', c)
} else {
escaped = append(escaped, c)
}
}
s += typeName + "=" + string(escaped)
}
}
return s
}
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
// RFC 5280, Section 4.1.2.4.
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
type AttributeTypeAndValueSET struct {
Type asn1.ObjectIdentifier
Value [][]AttributeTypeAndValue `asn1:"set"`
}
// Extension represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.2.
type Extension struct {
Id asn1.ObjectIdentifier
Critical bool `asn1:"optional"`
Value []byte
}
// Name represents an X.509 distinguished name. This only includes the common
// elements of a DN. Note that Name is only an approximation of the X.509
// structure. If an accurate representation is needed, asn1.Unmarshal the raw
// subject or issuer as an RDNSequence.
type Name struct {
Country, Organization, OrganizationalUnit []string
Locality, Province []string
StreetAddress, PostalCode []string
SerialNumber, CommonName string
// Names contains all parsed attributes. When parsing distinguished names,
// this can be used to extract non-standard attributes that are not parsed
// by this package. When marshaling to RDNSequences, the Names field is
// ignored, see ExtraNames.
Names []AttributeTypeAndValue
// ExtraNames contains attributes to be copied, raw, into any marshaled
// distinguished names. Values override any attributes with the same OID.
// The ExtraNames field is not populated when parsing, see Names.
ExtraNames []AttributeTypeAndValue
}
// FillFromRDNSequence populates n from the provided RDNSequence.
// Multi-entry RDNs are flattened, all entries are added to the
// relevant n fields, and the grouping is not preserved.
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
n.Names = append(n.Names, atv)
value, ok := atv.Value.(string)
if !ok {
continue
}
t := atv.Type
if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
switch t[3] {
case 3:
n.CommonName = value
case 5:
n.SerialNumber = value
case 6:
n.Country = append(n.Country, value)
case 7:
n.Locality = append(n.Locality, value)
case 8:
n.Province = append(n.Province, value)
case 9:
n.StreetAddress = append(n.StreetAddress, value)
case 10:
n.Organization = append(n.Organization, value)
case 11:
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
case 17:
n.PostalCode = append(n.PostalCode, value)
}
}
}
}
}
var (
oidCountry = []int{2, 5, 4, 6}
oidOrganization = []int{2, 5, 4, 10}
oidOrganizationalUnit = []int{2, 5, 4, 11}
oidCommonName = []int{2, 5, 4, 3}
oidSerialNumber = []int{2, 5, 4, 5}
oidLocality = []int{2, 5, 4, 7}
oidProvince = []int{2, 5, 4, 8}
oidStreetAddress = []int{2, 5, 4, 9}
oidPostalCode = []int{2, 5, 4, 17}
)
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
// and returns the new value. The relativeDistinguishedNameSET contains an
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
// search for AttributeTypeAndValue.
func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
return in
}
s := make([]AttributeTypeAndValue, len(values))
for i, value := range values {
s[i].Type = oid
s[i].Value = value
}
return append(in, s)
}
// ToRDNSequence converts n into a single RDNSequence. The following
// attributes are encoded as multi-value RDNs:
//
// - Country
// - Organization
// - OrganizationalUnit
// - Locality
// - Province
// - StreetAddress
// - PostalCode
//
// Each ExtraNames entry is encoded as an individual RDN.
func (n Name) ToRDNSequence() (ret RDNSequence) {
ret = n.appendRDNs(ret, n.Country, oidCountry)
ret = n.appendRDNs(ret, n.Province, oidProvince)
ret = n.appendRDNs(ret, n.Locality, oidLocality)
ret = n.appendRDNs(ret, n.StreetAddress, oidStreetAddress)
ret = n.appendRDNs(ret, n.PostalCode, oidPostalCode)
ret = n.appendRDNs(ret, n.Organization, oidOrganization)
ret = n.appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
if len(n.CommonName) > 0 {
ret = n.appendRDNs(ret, []string{n.CommonName}, oidCommonName)
}
if len(n.SerialNumber) > 0 {
ret = n.appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
}
for _, atv := range n.ExtraNames {
ret = append(ret, []AttributeTypeAndValue{atv})
}
return ret
}
// String returns the string form of n, roughly following
// the RFC 2253 Distinguished Names syntax.
func (n Name) String() string {
if len(n.ExtraNames) == 0 {
for _, atv := range n.Names {
t := atv.Type
if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
switch t[3] {
case 3, 5, 6, 7, 8, 9, 10, 11, 17:
// These attributes are already parsed into named fields.
continue
}
}
n.ExtraNames = append(n.ExtraNames, atv)
}
}
return n.ToRDNSequence().String()
}
// oidInAttributeTypeAndValue reports whether a type with the given OID exists
// in atv.
func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
for _, a := range atv {
if a.Type.Equal(oid) {
return true
}
}
return false
}
// CertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
// signature.
type CertificateList struct {
TBSCertList TBSCertificateList
SignatureAlgorithm AlgorithmIdentifier
SignatureValue asn1.BitString
}
// HasExpired reports whether certList should have been updated by now.
func (certList *CertificateList) HasExpired(now time.Time) bool {
return !now.Before(certList.TBSCertList.NextUpdate)
}
// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1.
type TBSCertificateList struct {
Raw asn1.RawContent
Version int `asn1:"optional,default:0"`
Signature AlgorithmIdentifier
Issuer RDNSequence
ThisUpdate time.Time
NextUpdate time.Time `asn1:"optional"`
RevokedCertificates []RevokedCertificate `asn1:"optional"`
Extensions []Extension `asn1:"tag:0,optional,explicit"`
}
// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1.
type RevokedCertificate struct {
SerialNumber *big.Int
RevocationTime time.Time
Extensions []Extension `asn1:"optional"`
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sql
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"math/rand"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func init() {
type dbConn struct {
db *DB
c *driverConn
}
freedFrom := make(map[dbConn]string)
var mu sync.Mutex
getFreedFrom := func(c dbConn) string {
mu.Lock()
defer mu.Unlock()
return freedFrom[c]
}
setFreedFrom := func(c dbConn, s string) {
mu.Lock()
defer mu.Unlock()
freedFrom[c] = s
}
putConnHook = func(db *DB, c *driverConn) {
idx := -1
for i, v := range db.freeConn {
if v == c {
idx = i
break
}
}
if idx >= 0 {
// print before panic, as panic may get lost due to conflicting panic
// (all goroutines asleep) elsewhere, since we might not unlock
// the mutex in freeConn here.
println("double free of conn. conflicts are:\nA) " + getFreedFrom(dbConn{db, c}) + "\n\nand\nB) " + stack())
panic("double free of conn.")
}
setFreedFrom(dbConn{db, c}, stack())
}
}
const fakeDBName = "foo"
var chrisBirthday = time.Unix(123456789, 0)
func newTestDB(t testing.TB, name string) *DB {
return newTestDBConnector(t, &fakeConnector{name: fakeDBName}, name)
}
func newTestDBConnector(t testing.TB, fc *fakeConnector, name string) *DB {
fc.name = fakeDBName
db := OpenDB(fc)
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
if name == "people" {
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
exec(t, db, "INSERT|people|name=Alice,age=?,photo=APHOTO", 1)
exec(t, db, "INSERT|people|name=Bob,age=?,photo=BPHOTO", 2)
exec(t, db, "INSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
}
if name == "magicquery" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|magicquery|op=string,millis=int32")
exec(t, db, "INSERT|magicquery|op=sleep,millis=10")
}
if name == "tx_status" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|tx_status|tx_status=string")
exec(t, db, "INSERT|tx_status|tx_status=invalid")
}
return db
}
func TestOpenDB(t *testing.T) {
db := OpenDB(dsnConnector{dsn: fakeDBName, driver: fdriver})
if db.Driver() != fdriver {
t.Fatalf("OpenDB should return the driver of the Connector")
}
}
func TestDriverPanic(t *testing.T) {
// Test that if driver panics, database/sql does not deadlock.
db, err := Open("test", fakeDBName)
if err != nil {
t.Fatalf("Open: %v", err)
}
expectPanic := func(name string, f func()) {
defer func() {
err := recover()
if err == nil {
t.Fatalf("%s did not panic", name)
}
}()
f()
}
expectPanic("Exec Exec", func() { db.Exec("PANIC|Exec|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
expectPanic("Exec NumInput", func() { db.Exec("PANIC|NumInput|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
expectPanic("Exec Close", func() { db.Exec("PANIC|Close|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
exec(t, db, "PANIC|Query|WIPE") // should run successfully: Exec does not call Query
exec(t, db, "WIPE") // check not deadlocked
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
expectPanic("Query Query", func() { db.Query("PANIC|Query|SELECT|people|age,name|") })
expectPanic("Query NumInput", func() { db.Query("PANIC|NumInput|SELECT|people|age,name|") })
expectPanic("Query Close", func() {
rows, err := db.Query("PANIC|Close|SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
})
db.Query("PANIC|Exec|SELECT|people|age,name|") // should run successfully: Query does not call Exec
exec(t, db, "WIPE") // check not deadlocked
}
func exec(t testing.TB, db *DB, query string, args ...interface{}) {
t.Helper()
_, err := db.Exec(query, args...)
if err != nil {
t.Fatalf("Exec of %q: %v", query, err)
}
}
func closeDB(t testing.TB, db *DB) {
if e := recover(); e != nil {
fmt.Printf("Panic: %v\n", e)
panic(e)
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db.mu.Lock()
for i, dc := range db.freeConn {
if n := len(dc.openStmt); n > 0 {
// Just a sanity check. This is legal in
// general, but if we make the tests clean up
// their statements first, then we can safely
// verify this is always zero here, and any
// other value is a leak.
t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, len(db.freeConn), n)
}
}
db.mu.Unlock()
err := db.Close()
if err != nil {
t.Fatalf("error closing DB: %v", err)
}
var numOpen int
if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
numOpen = db.numOpenConns()
return numOpen == 0
}) {
t.Fatalf("%d connections still open after closing DB", numOpen)
}
}
// numPrepares assumes that db has exactly 1 idle conn and returns
// its count of calls to Prepare
func numPrepares(t *testing.T, db *DB) int {
if n := len(db.freeConn); n != 1 {
t.Fatalf("free conns = %d; want 1", n)
}
return db.freeConn[0].ci.(*fakeConn).numPrepare
}
func (db *DB) numDeps() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.dep)
}
// Dependencies are closed via a goroutine, so this polls waiting for
// numDeps to fall to want, waiting up to d.
func (db *DB) numDepsPollUntil(want int, d time.Duration) int {
deadline := time.Now().Add(d)
for {
n := db.numDeps()
if n <= want || time.Now().After(deadline) {
return n
}
time.Sleep(50 * time.Millisecond)
}
}
func (db *DB) numFreeConns() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.freeConn)
}
func (db *DB) numOpenConns() int {
db.mu.Lock()
defer db.mu.Unlock()
return db.numOpen
}
// clearAllConns closes all connections in db.
func (db *DB) clearAllConns(t *testing.T) {
db.SetMaxIdleConns(0)
if g, w := db.numFreeConns(), 0; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(0, time.Second); n > 0 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
}
func (db *DB) dumpDeps(t *testing.T) {
for fc := range db.dep {
db.dumpDep(t, 0, fc, map[finalCloser]bool{})
}
}
func (db *DB) dumpDep(t *testing.T, depth int, dep finalCloser, seen map[finalCloser]bool) {
seen[dep] = true
indent := strings.Repeat(" ", depth)
ds := db.dep[dep]
for k := range ds {
t.Logf("%s%T (%p) waiting for -> %T (%p)", indent, dep, dep, k, k)
if fc, ok := k.(finalCloser); ok {
if !seen[fc] {
db.dumpDep(t, depth+1, fc, seen)
}
}
}
}
func TestQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
// TestQueryContext tests canceling the context while scanning the rows.
func TestQueryContext(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rows, err := db.QueryContext(ctx, "SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
index := 0
for rows.Next() {
if index == 2 {
cancel()
waitForRowsClose(t, rows, 5*time.Second)
}
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
if index == 2 {
break
}
t.Fatalf("Scan: %v", err)
}
if index == 2 && err != context.Canceled {
t.Fatalf("Scan: %v; want context.Canceled", err)
}
got = append(got, r)
index++
}
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.Canceled {
t.Fatalf("context err = %v; want context.Canceled", err)
}
default:
t.Fatalf("context err = nil; want context.Canceled")
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
waitForRowsClose(t, rows, 5*time.Second)
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func waitCondition(waitFor, checkEvery time.Duration, fn func() bool) bool {
deadline := time.Now().Add(waitFor)
for time.Now().Before(deadline) {
if fn() {
return true
}
time.Sleep(checkEvery)
}
return false
}
// waitForFree checks db.numFreeConns until either it equals want or
// the maxWait time elapses.
func waitForFree(t *testing.T, db *DB, maxWait time.Duration, want int) {
var numFree int
if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
numFree = db.numFreeConns()
return numFree == want
}) {
t.Fatalf("free conns after hitting EOF = %d; want %d", numFree, want)
}
}
func waitForRowsClose(t *testing.T, rows *Rows, maxWait time.Duration) {
if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
rows.closemu.RLock()
defer rows.closemu.RUnlock()
return rows.closed
}) {
t.Fatal("failed to close rows")
}
}
// TestQueryContextWait ensures that rows and all internal statements are closed when
// a query context is closed during execution.
func TestQueryContextWait(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
// TODO(kardianos): convert this from using a timeout to using an explicit
// cancel when the query signals that it is "executing" the query.
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err := db.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
// Verify closed rows connection after error condition.
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
// TODO(kardianos): if the context timeouts before the db.QueryContext
// executes this check may fail. After adjusting how the context
// is canceled above revert this back to a Fatal error.
t.Logf("executed %d Prepare statements; want 1", prepares)
}
}
// TestTxContextWait tests the transaction behavior when the tx context is canceled
// during execution of the query.
func TestTxContextWait(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
tx.keepConnOnRollback = false
go func() {
time.Sleep(15 * time.Millisecond)
cancel()
}()
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.Canceled {
t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
}
waitForFree(t, db, 5*time.Second, 0)
}
// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
// the final connection.
func TestTxContextWaitNoDiscard(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
// Guard against the context being canceled before BeginTx completes.
if err == context.DeadlineExceeded {
t.Skip("tx context canceled prior to first use")
}
t.Fatal(err)
}
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
waitForFree(t, db, 5*time.Second, 1)
}
// TestUnsupportedOptions checks that the database fails when a driver that
// doesn't implement ConnBeginTx is used with non-default options and an
// un-cancellable context.
func TestUnsupportedOptions(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
_, err := db.BeginTx(context.Background(), &TxOptions{
Isolation: LevelSerializable, ReadOnly: true,
})
if err == nil {
t.Fatal("expected error when using unsupported options, got nil")
}
}
func TestMultiResultSetQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|;SELECT|people|name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row1 struct {
age int
name string
}
type row2 struct {
name string
}
got1 := []row1{}
for rows.Next() {
var r row1
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got1 = append(got1, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want1 := []row1{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got1, want1) {
t.Errorf("mismatch.\n got1: %#v\nwant: %#v", got1, want1)
}
if !rows.NextResultSet() {
t.Errorf("expected another result set")
}
got2 := []row2{}
for rows.Next() {
var r row2
err = rows.Scan(&r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got2 = append(got2, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want2 := []row2{
{name: "Alice"},
{name: "Bob"},
{name: "Chris"},
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got2, want2)
}
if rows.NextResultSet() {
t.Errorf("expected no more result sets")
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestQueryNamedArg(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query(
// Ensure the name and age parameters only match on placeholder name, not position.
"SELECT|people|age,name|name=?name,age=?age",
Named("age", 2),
Named("name", "Bob"),
)
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 2, name: "Bob"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestPoolExhaustOnCancel(t *testing.T) {
if testing.Short() {
t.Skip("long test")
}
max := 3
var saturate, saturateDone sync.WaitGroup
saturate.Add(max)
saturateDone.Add(max)
donePing := make(chan bool)
state := 0
// waiter will be called for all queries, including
// initial setup queries. The state is only assigned when
// no queries are made.
//
// Only allow the first batch of queries to finish once the
// second batch of Ping queries have finished.
waiter := func(ctx context.Context) {
switch state {
case 0:
// Nothing. Initial database setup.
case 1:
saturate.Done()
select {
case <-ctx.Done():
case <-donePing:
}
case 2:
}
}
db := newTestDBConnector(t, &fakeConnector{waiter: waiter}, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(max)
// First saturate the connection pool.
// Then start new requests for a connection that is canceled after it is requested.
state = 1
for i := 0; i < max; i++ {
go func() {
rows, err := db.Query("SELECT|people|name,photo|")
if err != nil {
t.Errorf("Query: %v", err)
return
}
rows.Close()
saturateDone.Done()
}()
}
saturate.Wait()
if t.Failed() {
t.FailNow()
}
state = 2
// Now cancel the request while it is waiting.
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
for i := 0; i < max; i++ {
ctxReq, cancelReq := context.WithCancel(ctx)
go func() {
time.Sleep(100 * time.Millisecond)
cancelReq()
}()
err := db.PingContext(ctxReq)
if err != context.Canceled {
t.Fatalf("PingContext (Exhaust): %v", err)
}
}
close(donePing)
saturateDone.Wait()
// Now try to open a normal connection.
err := db.PingContext(ctx)
if err != nil {
t.Fatalf("PingContext (Normal): %v", err)
}
}
func TestRowsColumns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
cols, err := rows.Columns()
if err != nil {
t.Fatalf("Columns: %v", err)
}
want := []string{"age", "name"}
if !reflect.DeepEqual(cols, want) {
t.Errorf("got %#v; want %#v", cols, want)
}
if err := rows.Close(); err != nil {
t.Errorf("error closing rows: %s", err)
}
}
func TestRowsColumnTypes(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
tt, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("ColumnTypes: %v", err)
}
types := make([]reflect.Type, len(tt))
for i, tp := range tt {
st := tp.ScanType()
if st == nil {
t.Errorf("scantype is null for column %q", tp.Name())
continue
}
types[i] = st
}
values := make([]interface{}, len(tt))
for i := range values {
values[i] = reflect.New(types[i]).Interface()
}
ct := 0
for rows.Next() {
err = rows.Scan(values...)
if err != nil {
t.Fatalf("failed to scan values in %v", err)
}
if ct == 1 {
if age := *values[0].(*int32); age != 2 {
t.Errorf("Expected 2, got %v", age)
}
if name := *values[1].(*string); name != "Bob" {
t.Errorf("Expected Bob, got %v", name)
}
}
ct++
}
if ct != 3 {
t.Errorf("expected 3 rows, got %d", ct)
}
if err := rows.Close(); err != nil {
t.Errorf("error closing rows: %s", err)
}
}
func TestQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
var birthday time.Time
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
}
err = db.QueryRow("SELECT|people|bdate|age=?", 3).Scan(&birthday)
if err != nil || !birthday.Equal(chrisBirthday) {
t.Errorf("chris birthday = %v, err = %v; want %v", birthday, err, chrisBirthday)
}
err = db.QueryRow("SELECT|people|age,name|age=?", 2).Scan(&age, &name)
if err != nil {
t.Fatalf("age QueryRow+Scan: %v", err)
}
if name != "Bob" {
t.Errorf("expected name Bob, got %q", name)
}
if age != 2 {
t.Errorf("expected age 2, got %d", age)
}
err = db.QueryRow("SELECT|people|age,name|name=?", "Alice").Scan(&age, &name)
if err != nil {
t.Fatalf("name QueryRow+Scan: %v", err)
}
if name != "Alice" {
t.Errorf("expected name Alice, got %q", name)
}
if age != 1 {
t.Errorf("expected age 1, got %d", age)
}
var photo []byte
err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
if err != nil {
t.Fatalf("photo QueryRow+Scan: %v", err)
}
want := []byte("APHOTO")
if !reflect.DeepEqual(photo, want) {
t.Errorf("photo = %q; want %q", photo, want)
}
}
func TestRowErr(t *testing.T) {
db := newTestDB(t, "people")
err := db.QueryRowContext(context.Background(), "SELECT|people|bdate|age=?", 3).Err()
if err != nil {
t.Errorf("Unexpected err = %v; want %v", err, nil)
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
err = db.QueryRowContext(ctx, "SELECT|people|bdate|age=?", 3).Err()
exp := "context canceled"
if err == nil || !strings.Contains(err.Error(), exp) {
t.Errorf("Expected err = %v; got %v", exp, err)
}
}
func TestTxRollbackCommitErr(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
err = tx.Rollback()
if err != nil {
t.Errorf("expected nil error from Rollback; got %v", err)
}
err = tx.Commit()
if err != ErrTxDone {
t.Errorf("expected %q from Commit; got %q", ErrTxDone, err)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
err = tx.Commit()
if err != nil {
t.Errorf("expected nil error from Commit; got %v", err)
}
err = tx.Rollback()
if err != ErrTxDone {
t.Errorf("expected %q from Rollback; got %q", ErrTxDone, err)
}
}
func TestStatementErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("Close: %v", err)
}
var name string
err = stmt.QueryRow("foo").Scan(&name)
if err == nil {
t.Errorf("expected error from QueryRow.Scan after Stmt.Close")
}
}
func TestStatementQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
var age int
for n, tt := range []struct {
name string
want int
}{
{"Alice", 1},
{"Bob", 2},
{"Chris", 3},
} {
if err := stmt.QueryRow(tt.name).Scan(&age); err != nil {
t.Errorf("%d: on %q, QueryRow/Scan: %v", n, tt.name, err)
} else if age != tt.want {
t.Errorf("%d: age=%d, want %d", n, age, tt.want)
}
}
}
type stubDriverStmt struct {
err error
}
func (s stubDriverStmt) Close() error {
return s.err
}
func (s stubDriverStmt) NumInput() int {
return -1
}
func (s stubDriverStmt) Exec(args []driver.Value) (driver.Result, error) {
return nil, nil
}
func (s stubDriverStmt) Query(args []driver.Value) (driver.Rows, error) {
return nil, nil
}
// golang.org/issue/12798
func TestStatementClose(t *testing.T) {
want := errors.New("STMT ERROR")
tests := []struct {
stmt *Stmt
msg string
}{
{&Stmt{stickyErr: want}, "stickyErr not propagated"},
{&Stmt{cg: &Tx{}, cgds: &driverStmt{Locker: &sync.Mutex{}, si: stubDriverStmt{want}}}, "driverStmt.Close() error not propagated"},
}
for _, test := range tests {
if err := test.stmt.Close(); err != want {
t.Errorf("%s. Got stmt.Close() = %v, want = %v", test.msg, err, want)
}
}
}
// golang.org/issue/3734
func TestStatementQueryRowConcurrent(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
const n = 10
ch := make(chan error, n)
for i := 0; i < n; i++ {
go func() {
var age int
err := stmt.QueryRow("Alice").Scan(&age)
if err == nil && age != 1 {
err = fmt.Errorf("unexpected age %d", age)
}
ch <- err
}()
}
for i := 0; i < n; i++ {
if err := <-ch; err != nil {
t.Error(err)
}
}
}
// just a test of fakedb itself
func TestBogusPreboundParameters(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
_, err := db.Prepare("INSERT|t1|name=?,age=bogusconversion")
if err == nil {
t.Fatalf("expected error")
}
if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
t.Errorf("unexpected error: %v", err)
}
}
func TestExec(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Errorf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
type execTest struct {
args []interface{}
wantErr string
}
execTests := []execTest{
// Okay:
{[]interface{}{"Brad", 31}, ""},
{[]interface{}{"Brad", int64(31)}, ""},
{[]interface{}{"Bob", "32"}, ""},
{[]interface{}{7, 9}, ""},
// Invalid conversions:
{[]interface{}{"Brad", int64(0xFFFFFFFF)}, "sql: converting argument $2 type: sql/driver: value 4294967295 overflows int32"},
{[]interface{}{"Brad", "strconv fail"}, `sql: converting argument $2 type: sql/driver: value "strconv fail" can't be converted to int32`},
// Wrong number of args:
{[]interface{}{}, "sql: expected 2 arguments, got 0"},
{[]interface{}{1, 2, 3}, "sql: expected 2 arguments, got 3"},
}
for n, et := range execTests {
_, err := stmt.Exec(et.args...)
errStr := ""
if err != nil {
errStr = err.Error()
}
if errStr != et.wantErr {
t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
n, et.args, errStr, et.wantErr)
}
}
}
func TestTxPrepare(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
stmt, err := tx.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
_, err = stmt.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !stmt.closed {
t.Fatal("Stmt not closed after Commit")
}
}
func TestTxStmt(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !txs.closed {
t.Fatal("Stmt not closed after Commit")
}
}
func TestTxStmtPreparedOnce(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs1 := tx.Stmt(stmt)
txs2 := tx.Stmt(stmt)
_, err = txs1.Exec("Go", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
txs1.Close()
_, err = txs2.Exec("Gopher", 8)
if err != nil {
t.Fatalf("Exec = %v", err)
}
txs2.Close()
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestTxStmtClosedRePrepares(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("stmt.Close() = %v", err)
}
// tx.Stmt increments numPrepares because stmt is closed.
txs := tx.Stmt(stmt)
if txs.stickyErr != nil {
t.Fatal(txs.stickyErr)
}
if txs.parentStmt != nil {
t.Fatal("expected nil parentStmt")
}
_, err = txs.Exec(`Eric`, 82)
if err != nil {
t.Fatalf("txs.Exec = %v", err)
}
err = txs.Close()
if err != nil {
t.Fatalf("txs.Close = %v", err)
}
tx.Rollback()
if prepares := numPrepares(t, db) - prepares0; prepares != 2 {
t.Errorf("executed %d Prepare statements; want 2", prepares)
}
}
func TestParentStmtOutlivesTxStmt(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
// Make sure everything happens on the same connection.
db.SetMaxOpenConns(1)
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
if len(stmt.css) != 1 {
t.Fatalf("len(stmt.css) = %v; want 1", len(stmt.css))
}
err = txs.Close()
if err != nil {
t.Fatalf("txs.Close() = %v", err)
}
err = tx.Rollback()
if err != nil {
t.Fatalf("tx.Rollback() = %v", err)
}
// txs must not be valid.
_, err = txs.Exec("Suzan", 30)
if err == nil {
t.Fatalf("txs.Exec(), expected err")
}
// Stmt must still be valid.
_, err = stmt.Exec("Janina", 25)
if err != nil {
t.Fatalf("stmt.Exec() = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
// Test that tx.Stmt called with a statement already
// associated with tx as argument re-prepares the same
// statement again.
func TestTxStmtFromTxStmtRePrepares(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs1 := tx.Stmt(stmt)
// tx.Stmt(txs1) increments numPrepares because txs1 already
// belongs to a transaction (albeit the same transaction).
txs2 := tx.Stmt(txs1)
if txs2.stickyErr != nil {
t.Fatal(txs2.stickyErr)
}
if txs2.parentStmt != nil {
t.Fatal("expected nil parentStmt")
}
_, err = txs2.Exec(`Eric`, 82)
if err != nil {
t.Fatal(err)
}
err = txs1.Close()
if err != nil {
t.Fatalf("txs1.Close = %v", err)
}
err = txs2.Close()
if err != nil {
t.Fatalf("txs1.Close = %v", err)
}
err = tx.Rollback()
if err != nil {
t.Fatalf("tx.Rollback = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 2 {
t.Errorf("executed %d Prepare statements; want 2", prepares)
}
}
// Issue: https://golang.org/issue/2784
// This test didn't fail before because we got lucky with the fakedb driver.
// It was failing, and now not, in github.com/bradfitz/go-sql-test
func TestTxQuery(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
exec(t, db, "INSERT|t1|name=Alice")
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r, err := tx.Query("SELECT|t1|name|")
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(r.Err())
}
t.Fatal("expected one row")
}
var x string
err = r.Scan(&x)
if err != nil {
t.Fatal(err)
}
}
func TestTxQueryInvalid(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Query("SELECT|t1|name|")
if err == nil {
t.Fatal("Error expected")
}
}
// Tests fix for issue 4433, that retries in Begin happen when
// conn.Begin() returns ErrBadConn
func TestTxErrBadConn(t *testing.T) {
db, err := Open("test", fakeDBName+";badConn")
if err != nil {
t.Fatalf("Open: %v", err)
}
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
}
func TestConnQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
var name string
err = conn.QueryRowContext(ctx, "SELECT|people|name|age=?", 3).Scan(&name)
if err != nil {
t.Fatal(err)
}
if name != "Chris" {
t.Fatalf("unexpected result, got %q want Chris", name)
}
err = conn.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
}
func TestConnRaw(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
sawFunc := false
err = conn.Raw(func(dc interface{}) error {
sawFunc = true
if _, ok := dc.(*fakeConn); !ok {
return fmt.Errorf("got %T want *fakeConn", dc)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if !sawFunc {
t.Fatal("Raw func not called")
}
func() {
defer func() {
x := recover()
if x == nil {
t.Fatal("expected panic")
}
conn.closemu.Lock()
closed := conn.dc == nil
conn.closemu.Unlock()
if !closed {
t.Fatal("expected connection to be closed after panic")
}
}()
err = conn.Raw(func(dc interface{}) error {
panic("Conn.Raw panic should return an error")
})
t.Fatal("expected panic from Raw func")
}()
}
func TestCursorFake(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
exec(t, db, "CREATE|peoplecursor|list=table")
exec(t, db, "INSERT|peoplecursor|list=people!name!age")
rows, err := db.QueryContext(ctx, `SELECT|peoplecursor|list|`)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
if !rows.Next() {
t.Fatal("no rows")
}
var cursor = &Rows{}
err = rows.Scan(cursor)
if err != nil {
t.Fatal(err)
}
defer cursor.Close()
const expectedRows = 3
var currentRow int64
var n int64
var s string
for cursor.Next() {
currentRow++
err = cursor.Scan(&s, &n)
if err != nil {
t.Fatal(err)
}
if n != currentRow {
t.Errorf("expected number(Age)=%d, got %d", currentRow, n)
}
}
if currentRow != expectedRows {
t.Errorf("expected %d rows, got %d rows", expectedRows, currentRow)
}
}
func TestInvalidNilValues(t *testing.T) {
var date1 time.Time
var date2 int
tests := []struct {
name string
input interface{}
expectedError string
}{
{
name: "time.Time",
input: &date1,
expectedError: `sql: Scan error on column index 0, name "bdate": unsupported Scan, storing driver.Value type <nil> into type *time.Time`,
},
{
name: "int",
input: &date2,
expectedError: `sql: Scan error on column index 0, name "bdate": converting NULL to int is unsupported`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
err = conn.QueryRowContext(ctx, "SELECT|people|bdate|age=?", 1).Scan(tt.input)
if err == nil {
t.Fatal("expected error when querying nil column, but succeeded")
}
if err.Error() != tt.expectedError {
t.Fatalf("Expected error: %s\nReceived: %s", tt.expectedError, err.Error())
}
err = conn.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
})
}
}
func TestConnTx(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
tx, err := conn.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
insertName, insertAge := "Nancy", 33
_, err = tx.ExecContext(ctx, "INSERT|people|name=?,age=?,photo=APHOTO", insertName, insertAge)
if err != nil {
t.Fatal(err)
}
err = tx.Commit()
if err != nil {
t.Fatal(err)
}
var selectName string
err = conn.QueryRowContext(ctx, "SELECT|people|name|age=?", insertAge).Scan(&selectName)
if err != nil {
t.Fatal(err)
}
if selectName != insertName {
t.Fatalf("got %q want %q", selectName, insertName)
}
}
// TestConnIsValid verifies that a database connection that should be discarded,
// is actually discarded and does not re-enter the connection pool.
// If the IsValid method from *fakeConn is removed, this test will fail.
func TestConnIsValid(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
ctx := context.Background()
c, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
err = c.Raw(func(raw interface{}) error {
dc := raw.(*fakeConn)
dc.stickyBad = true
return nil
})
if err != nil {
t.Fatal(err)
}
c.Close()
if len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {
t.Fatal("bad connection returned to pool; expected bad connection to be discarded")
}
}
// Tests fix for issue 2542, that we release a lock when querying on
// a closed connection.
func TestIssue2542Deadlock(t *testing.T) {
db := newTestDB(t, "people")
closeDB(t, db)
for i := 0; i < 2; i++ {
_, err := db.Query("SELECT|people|age,name|")
if err == nil {
t.Fatalf("expected error")
}
}
}
// From golang.org/issue/3865
func TestCloseStmtBeforeRows(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
s, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r, err := s.Query()
if err != nil {
s.Close()
t.Fatal(err)
}
err = s.Close()
if err != nil {
t.Fatal(err)
}
r.Close()
}
// Tests fix for issue 2788, that we bind nil to a []byte if the
// value in the column is sql null
func TestNullByteSlice(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
exec(t, db, "INSERT|t|id=10,name=?", nil)
var name []byte
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatal(err)
}
if name != nil {
t.Fatalf("name []byte should be nil for null column value, got: %#v", name)
}
exec(t, db, "INSERT|t|id=11,name=?", "bob")
err = db.QueryRow("SELECT|t|name|id=?", 11).Scan(&name)
if err != nil {
t.Fatal(err)
}
if string(name) != "bob" {
t.Fatalf("name []byte should be bob, got: %q", string(name))
}
}
func TestPointerParamsAndScans(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
bob := "bob"
var name *string
name = &bob
exec(t, db, "INSERT|t|id=10,name=?", name)
name = nil
exec(t, db, "INSERT|t|id=20,name=?", name)
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatalf("querying id 10: %v", err)
}
if name == nil {
t.Errorf("id 10's name = nil; want bob")
} else if *name != "bob" {
t.Errorf("id 10's name = %q; want bob", *name)
}
err = db.QueryRow("SELECT|t|name|id=?", 20).Scan(&name)
if err != nil {
t.Fatalf("querying id 20: %v", err)
}
if name != nil {
t.Errorf("id 20 = %q; want nil", *name)
}
}
func TestQueryRowClosingStmt(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age, &name)
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 free conn")
}
fakeConn := db.freeConn[0].ci.(*fakeConn)
if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
t.Errorf("statement close mismatch: made %d, closed %d", made, closed)
}
}
var atomicRowsCloseHook atomic.Value // of func(*Rows, *error)
func init() {
rowsCloseHook = func() func(*Rows, *error) {
fn, _ := atomicRowsCloseHook.Load().(func(*Rows, *error))
return fn
}
}
func setRowsCloseHook(fn func(*Rows, *error)) {
if fn == nil {
// Can't change an atomic.Value back to nil, so set it to this
// no-op func instead.
fn = func(*Rows, *error) {}
}
atomicRowsCloseHook.Store(fn)
}
// Test issue 6651
func TestIssue6651(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var v string
want := "error in rows.Next"
rowsCursorNextHook = func(dest []driver.Value) error {
return fmt.Errorf(want)
}
defer func() { rowsCursorNextHook = nil }()
err := db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
rowsCursorNextHook = nil
want = "error in rows.Close"
setRowsCloseHook(func(rows *Rows, err *error) {
*err = fmt.Errorf(want)
})
defer setRowsCloseHook(nil)
err = db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
}
type nullTestRow struct {
nullParam interface{}
notNullParam interface{}
scanNullVal interface{}
}
type nullTestSpec struct {
nullType string
notNullType string
rows [6]nullTestRow
}
func TestNullStringParam(t *testing.T) {
spec := nullTestSpec{"nullstring", "string", [6]nullTestRow{
{NullString{"aqua", true}, "", NullString{"aqua", true}},
{NullString{"brown", false}, "", NullString{"", false}},
{"chartreuse", "", NullString{"chartreuse", true}},
{NullString{"darkred", true}, "", NullString{"darkred", true}},
{NullString{"eel", false}, "", NullString{"", false}},
{"foo", NullString{"black", false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt64Param(t *testing.T) {
spec := nullTestSpec{"nullint64", "int64", [6]nullTestRow{
{NullInt64{31, true}, 1, NullInt64{31, true}},
{NullInt64{-22, false}, 1, NullInt64{0, false}},
{22, 1, NullInt64{22, true}},
{NullInt64{33, true}, 1, NullInt64{33, true}},
{NullInt64{222, false}, 1, NullInt64{0, false}},
{0, NullInt64{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt32Param(t *testing.T) {
spec := nullTestSpec{"nullint32", "int32", [6]nullTestRow{
{NullInt32{31, true}, 1, NullInt32{31, true}},
{NullInt32{-22, false}, 1, NullInt32{0, false}},
{22, 1, NullInt32{22, true}},
{NullInt32{33, true}, 1, NullInt32{33, true}},
{NullInt32{222, false}, 1, NullInt32{0, false}},
{0, NullInt32{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt16Param(t *testing.T) {
spec := nullTestSpec{"nullint16", "int16", [6]nullTestRow{
{NullInt16{31, true}, 1, NullInt16{31, true}},
{NullInt16{-22, false}, 1, NullInt16{0, false}},
{22, 1, NullInt16{22, true}},
{NullInt16{33, true}, 1, NullInt16{33, true}},
{NullInt16{222, false}, 1, NullInt16{0, false}},
{0, NullInt16{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullByteParam(t *testing.T) {
spec := nullTestSpec{"nullbyte", "byte", [6]nullTestRow{
{NullByte{31, true}, 1, NullByte{31, true}},
{NullByte{0, false}, 1, NullByte{0, false}},
{22, 1, NullByte{22, true}},
{NullByte{33, true}, 1, NullByte{33, true}},
{NullByte{222, false}, 1, NullByte{0, false}},
{0, NullByte{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullFloat64Param(t *testing.T) {
spec := nullTestSpec{"nullfloat64", "float64", [6]nullTestRow{
{NullFloat64{31.2, true}, 1, NullFloat64{31.2, true}},
{NullFloat64{13.1, false}, 1, NullFloat64{0, false}},
{-22.9, 1, NullFloat64{-22.9, true}},
{NullFloat64{33.81, true}, 1, NullFloat64{33.81, true}},
{NullFloat64{222, false}, 1, NullFloat64{0, false}},
{10, NullFloat64{31.2, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullBoolParam(t *testing.T) {
spec := nullTestSpec{"nullbool", "bool", [6]nullTestRow{
{NullBool{false, true}, true, NullBool{false, true}},
{NullBool{true, false}, false, NullBool{false, false}},
{true, true, NullBool{true, true}},
{NullBool{true, true}, false, NullBool{true, true}},
{NullBool{true, false}, true, NullBool{false, false}},
{true, NullBool{true, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullTimeParam(t *testing.T) {
t0 := time.Time{}
t1 := time.Date(2000, 1, 1, 8, 9, 10, 11, time.UTC)
t2 := time.Date(2010, 1, 1, 8, 9, 10, 11, time.UTC)
spec := nullTestSpec{"nulldatetime", "datetime", [6]nullTestRow{
{NullTime{t1, true}, t2, NullTime{t1, true}},
{NullTime{t1, false}, t2, NullTime{t0, false}},
{t1, t2, NullTime{t1, true}},
{NullTime{t1, true}, t2, NullTime{t1, true}},
{NullTime{t1, false}, t2, NullTime{t0, false}},
{t2, NullTime{t1, false}, nil},
}}
nullTestRun(t, spec)
}
func nullTestRun(t *testing.T, spec nullTestSpec) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, fmt.Sprintf("CREATE|t|id=int32,name=string,nullf=%s,notnullf=%s", spec.nullType, spec.notNullType))
// Inserts with db.Exec:
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 1, "alice", spec.rows[0].nullParam, spec.rows[0].notNullParam)
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 2, "bob", spec.rows[1].nullParam, spec.rows[1].notNullParam)
// Inserts with a prepared statement:
stmt, err := db.Prepare("INSERT|t|id=?,name=?,nullf=?,notnullf=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt.Close()
if _, err := stmt.Exec(3, "chris", spec.rows[2].nullParam, spec.rows[2].notNullParam); err != nil {
t.Errorf("exec insert chris: %v", err)
}
if _, err := stmt.Exec(4, "dave", spec.rows[3].nullParam, spec.rows[3].notNullParam); err != nil {
t.Errorf("exec insert dave: %v", err)
}
if _, err := stmt.Exec(5, "eleanor", spec.rows[4].nullParam, spec.rows[4].notNullParam); err != nil {
t.Errorf("exec insert eleanor: %v", err)
}
// Can't put null val into non-null col
if _, err := stmt.Exec(6, "bob", spec.rows[5].nullParam, spec.rows[5].notNullParam); err == nil {
t.Errorf("expected error inserting nil val with prepared statement Exec")
}
_, err = db.Exec("INSERT|t|id=?,name=?,nullf=?", 999, nil, nil)
if err == nil {
// TODO: this test fails, but it's just because
// fakeConn implements the optional Execer interface,
// so arguably this is the correct behavior. But
// maybe I should flesh out the fakeConn.Exec
// implementation so this properly fails.
// t.Errorf("expected error inserting nil name with Exec")
}
paramtype := reflect.TypeOf(spec.rows[0].nullParam)
bindVal := reflect.New(paramtype).Interface()
for i := 0; i < 5; i++ {
id := i + 1
if err := db.QueryRow("SELECT|t|nullf|id=?", id).Scan(bindVal); err != nil {
t.Errorf("id=%d Scan: %v", id, err)
}
bindValDeref := reflect.ValueOf(bindVal).Elem().Interface()
if !reflect.DeepEqual(bindValDeref, spec.rows[i].scanNullVal) {
t.Errorf("id=%d got %#v, want %#v", id, bindValDeref, spec.rows[i].scanNullVal)
}
}
}
// golang.org/issue/4859
func TestQueryRowNilScanDest(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name *string // nil pointer
err := db.QueryRow("SELECT|people|name|").Scan(name)
want := `sql: Scan error on column index 0, name "name": destination pointer is nil`
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err.Error(), want)
}
}
func TestIssue4902(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
opens0 := driver.openCount
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
}
opens := driver.openCount - opens0
if opens > 1 {
t.Errorf("opens = %d; want <= 1", opens)
t.Logf("db = %#v", db)
t.Logf("driver = %#v", driver)
t.Logf("stmt = %#v", stmt)
}
}
// Issue 3857
// This used to deadlock.
func TestSimultaneousQueries(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r1, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r1.Close()
r2, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r2.Close()
}
func TestMaxIdleConns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 1 {
t.Errorf("freeConns = %d; want 1", got)
}
db.SetMaxIdleConns(0)
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns after set to zero = %d; want 0", got)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns = %d; want 0", got)
}
}
func TestMaxOpenConns(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
// Force the number of open connections to 0 so we can get an accurate
// count for the test
db.clearAllConns(t)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
db.SetMaxIdleConns(10)
db.SetMaxOpenConns(10)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(20, time.Second); n > 20 {
t.Errorf("number of dependencies = %d; expected <= 20", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
driver.mu.Unlock()
if opens > 10 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Errorf("db connections opened = %d; want <= 10", opens)
db.dumpDeps(t)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(10, time.Second); n > 10 {
t.Errorf("number of dependencies = %d; expected <= 10", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(5)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(0)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.clearAllConns(t)
}
// Issue 9453: tests that SetMaxOpenConns can be lowered at runtime
// and affects the subsequent release of connections.
func TestMaxOpenConnsOnBusy(t *testing.T) {
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(3)
ctx := context.Background()
conn0, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn1, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn2, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
db.SetMaxOpenConns(2)
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn0.releaseConn(nil)
conn1.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn2.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
}
// Issue 10886: tests that all connection attempts return when more than
// DB.maxOpen connections are in flight and the first DB.maxOpen fail.
func TestPendingConnsAfterErr(t *testing.T) {
const (
maxOpen = 2
tryOpen = maxOpen*2 + 2
)
// No queries will be run.
db, err := Open("test", fakeDBName)
if err != nil {
t.Fatalf("Open: %v", err)
}
defer closeDB(t, db)
defer func() {
for k, v := range db.lastPut {
t.Logf("%p: %v", k, v)
}
}()
db.SetMaxOpenConns(maxOpen)
db.SetMaxIdleConns(0)
errOffline := errors.New("db offline")
defer func() { setHookOpenErr(nil) }()
errs := make(chan error, tryOpen)
var opening sync.WaitGroup
opening.Add(tryOpen)
setHookOpenErr(func() error {
// Wait for all connections to enqueue.
opening.Wait()
return errOffline
})
for i := 0; i < tryOpen; i++ {
go func() {
opening.Done() // signal one connection is in flight
_, err := db.Exec("will never run")
errs <- err
}()
}
opening.Wait() // wait for all workers to begin running
const timeout = 5 * time.Second
to := time.NewTimer(timeout)
defer to.Stop()
// check that all connections fail without deadlock
for i := 0; i < tryOpen; i++ {
select {
case err := <-errs:
if got, want := err, errOffline; got != want {
t.Errorf("unexpected err: got %v, want %v", got, want)
}
case <-to.C:
t.Fatalf("orphaned connection request(s), still waiting after %v", timeout)
}
}
// Wait a reasonable time for the database to close all connections.
tick := time.NewTicker(3 * time.Millisecond)
defer tick.Stop()
for {
select {
case <-tick.C:
db.mu.Lock()
if db.numOpen == 0 {
db.mu.Unlock()
return
}
db.mu.Unlock()
case <-to.C:
// Closing the database will check for numOpen and fail the test.
return
}
}
}
func TestSingleOpenConn(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// shouldn't deadlock
rows, err = db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
}
func TestStats(t *testing.T) {
db := newTestDB(t, "people")
stats := db.Stats()
if got := stats.OpenConnections; got != 1 {
t.Errorf("stats.OpenConnections = %d; want 1", got)
}
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
closeDB(t, db)
stats = db.Stats()
if got := stats.OpenConnections; got != 0 {
t.Errorf("stats.OpenConnections = %d; want 0", got)
}
}
func TestConnMaxLifetime(t *testing.T) {
t0 := time.Unix(1000000, 0)
offset := time.Duration(0)
nowFunc = func() time.Time { return t0.Add(offset) }
defer func() { nowFunc = time.Now }()
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
// Force the number of open connections to 0 so we can get an accurate
// count for the test
db.clearAllConns(t)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
db.SetMaxIdleConns(10)
db.SetMaxOpenConns(10)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
offset = time.Second
tx2, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
tx2.Commit()
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
driver.mu.Unlock()
if opens != 2 {
t.Errorf("opens = %d; want 2", opens)
}
if closes != 0 {
t.Errorf("closes = %d; want 0", closes)
}
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
// Expire first conn
offset = 11 * time.Second
db.SetConnMaxLifetime(10 * time.Second)
if err != nil {
t.Fatal(err)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx2, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
tx2.Commit()
driver.mu.Lock()
opens = driver.openCount - opens0
closes = driver.closeCount - closes0
driver.mu.Unlock()
if opens != 3 {
t.Errorf("opens = %d; want 3", opens)
}
if closes != 1 {
t.Errorf("closes = %d; want 1", closes)
}
}
// golang.org/issue/5323
func TestStmtCloseDeps(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
openDelta0 := opens0 - closes0
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(4, time.Second); n > 4 {
t.Errorf("number of dependencies = %d; expected <= 4", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
openDelta := (driver.openCount - driver.closeCount) - openDelta0
driver.mu.Unlock()
if openDelta > 2 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Logf("open delta = %d", openDelta)
t.Errorf("db connections opened = %d; want <= 2", openDelta)
db.dumpDeps(t)
}
if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
return len(stmt.css) <= nquery
}) {
t.Errorf("len(stmt.css) = %d; want <= %d", len(stmt.css), nquery)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(2, time.Second); n > 2 {
t.Errorf("number of dependencies = %d; expected <= 2", n)
db.dumpDeps(t)
}
db.clearAllConns(t)
}
// golang.org/issue/5046
func TestCloseConnBeforeStmts(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v; from %s", err, stack())
db.dumpDeps(t)
t.Errorf("DB = %#v", db)
}
})
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 freeConn; got %d", len(db.freeConn))
}
dc := db.freeConn[0]
if dc.closed {
t.Errorf("conn shouldn't be closed")
}
if n := len(dc.openStmt); n != 1 {
t.Errorf("driverConn num openStmt = %d; want 1", n)
}
err = db.Close()
if err != nil {
t.Errorf("db Close = %v", err)
}
if !dc.closed {
t.Errorf("after db.Close, driverConn should be closed")
}
if n := len(dc.openStmt); n != 0 {
t.Errorf("driverConn num openStmt = %d; want 0", n)
}
err = stmt.Close()
if err != nil {
t.Errorf("Stmt close = %v", err)
}
if !dc.closed {
t.Errorf("conn should be closed")
}
if dc.ci != nil {
t.Errorf("after Stmt Close, driverConn's Conn interface should be nil")
}
}
// golang.org/issue/5283: don't release the Rows' connection in Close
// before calling Stmt.Close.
func TestRowsCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
err = rows.Close()
if err != nil {
t.Fatal(err)
}
}
func TestRowsImplicitClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
want, fail := 2, errors.New("fail")
r := rows.rowsi.(*rowsCursor)
r.errPos, r.err = want, fail
got := 0
for rows.Next() {
got++
}
if got != want {
t.Errorf("got %d rows, want %d", got, want)
}
if err := rows.Err(); err != fail {
t.Errorf("got error %v, want %v", err, fail)
}
if !r.closed {
t.Errorf("r.closed is false, want true")
}
}
func TestStmtCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
_, err := db.Query("SELECT|non_existent|name|")
if err == nil {
t.Fatal("Querying non-existent table should fail")
}
}
// Test cases where there's more than maxBadConnRetries bad connections in the
// pool (issue 8834)
func TestManyErrBadConn(t *testing.T) {
manyErrBadConnSetup := func(first ...func(db *DB)) *DB {
db := newTestDB(t, "people")
for _, f := range first {
f(db)
}
nconn := maxBadConnRetries + 1
db.SetMaxIdleConns(nconn)
db.SetMaxOpenConns(nconn)
// open enough connections
func() {
for i := 0; i < nconn; i++ {
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
}
}()
db.mu.Lock()
defer db.mu.Unlock()
if db.numOpen != nconn {
t.Fatalf("unexpected numOpen %d (was expecting %d)", db.numOpen, nconn)
} else if len(db.freeConn) != nconn {
t.Fatalf("unexpected len(db.freeConn) %d (was expecting %d)", len(db.freeConn), nconn)
}
for _, conn := range db.freeConn {
conn.Lock()
conn.ci.(*fakeConn).stickyBad = true
conn.Unlock()
}
return db
}
// Query
db := manyErrBadConnSetup()
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// Exec
db = manyErrBadConnSetup()
defer closeDB(t, db)
_, err = db.Exec("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
// Begin
db = manyErrBadConnSetup()
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
// Prepare
db = manyErrBadConnSetup()
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Stmt.Exec
db = manyErrBadConnSetup(func(db *DB) {
stmt, err = db.Prepare("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
})
defer closeDB(t, db)
_, err = stmt.Exec()
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Stmt.Query
db = manyErrBadConnSetup(func(db *DB) {
stmt, err = db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
})
defer closeDB(t, db)
rows, err = stmt.Query()
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Conn
db = manyErrBadConnSetup()
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
err = conn.Close()
if err != nil {
t.Fatal(err)
}
// Ping
db = manyErrBadConnSetup()
defer closeDB(t, db)
err = db.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
}
// Issue 34775: Ensure that a Tx cannot commit after a rollback.
func TestTxCannotCommitAfterRollback(t *testing.T) {
db := newTestDB(t, "tx_status")
defer closeDB(t, db)
// First check query reporting is correct.
var txStatus string
err := db.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "autocommit"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Ignore dirty session for this test.
// A failing test should trigger the dirty session flag as well,
// but that isn't exactly what this should test for.
tx.txi.(*fakeTx).c.skipDirtySession = true
defer tx.Rollback()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
// 1. Begin a transaction.
// 2. (A) Start a query, (B) begin Tx rollback through a ctx cancel.
// 3. Check if 2.A has committed in Tx (pass) or outside of Tx (fail).
sendQuery := make(chan struct{})
// The Tx status is returned through the row results, ensure
// that the rows results are not canceled.
bypassRowsAwaitDone = true
hookTxGrabConn = func() {
cancel()
<-sendQuery
}
rollbackHook = func() {
close(sendQuery)
}
defer func() {
hookTxGrabConn = nil
rollbackHook = nil
bypassRowsAwaitDone = false
}()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
// A failure here would be expected if skipDirtySession was not set to true above.
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
}
// Issue 40985 transaction statement deadlock while context cancel.
func TestTxStmtDeadlock(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
stmt, err := tx.Prepare("SELECT|people|name,age|age=?")
if err != nil {
t.Fatal(err)
}
// Run number of stmt queries to reproduce deadlock from context cancel
for i := 0; i < 1e3; i++ {
// Encounter any close related errors (e.g. ErrTxDone, stmt is closed)
// is expected due to context cancel.
_, err = stmt.Query(1)
if err != nil {
break
}
}
_ = tx.Rollback()
}
// Issue32530 encounters an issue where a connection may
// expire right after it comes out of a used connection pool
// even when a new connection is requested.
func TestConnExpiresFreshOutOfPool(t *testing.T) {
execCases := []struct {
expired bool
badReset bool
}{
{false, false},
{true, false},
{false, true},
}
t0 := time.Unix(1000000, 0)
offset := time.Duration(0)
offsetMu := sync.RWMutex{}
nowFunc = func() time.Time {
offsetMu.RLock()
defer offsetMu.RUnlock()
return t0.Add(offset)
}
defer func() { nowFunc = time.Now }()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
for _, ec := range execCases {
ec := ec
name := fmt.Sprintf("expired=%t,badReset=%t", ec.expired, ec.badReset)
t.Run(name, func(t *testing.T) {
db.clearAllConns(t)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(10 * time.Second)
conn, err := db.conn(ctx, alwaysNewConn)
if err != nil {
t.Fatal(err)
}
afterPutConn := make(chan struct{})
waitingForConn := make(chan struct{})
go func() {
defer close(afterPutConn)
conn, err := db.conn(ctx, alwaysNewConn)
if err == nil {
db.putConn(conn, err, false)
} else {
t.Errorf("db.conn: %v", err)
}
}()
go func() {
defer close(waitingForConn)
for {
if t.Failed() {
return
}
db.mu.Lock()
ct := len(db.connRequests)
db.mu.Unlock()
if ct > 0 {
return
}
time.Sleep(10 * time.Millisecond)
}
}()
<-waitingForConn
if t.Failed() {
return
}
offsetMu.Lock()
if ec.expired {
offset = 11 * time.Second
} else {
offset = time.Duration(0)
}
offsetMu.Unlock()
conn.ci.(*fakeConn).stickyBad = ec.badReset
db.putConn(conn, err, true)
<-afterPutConn
})
}
}
// TestIssue20575 ensures the Rows from query does not block
// closing a transaction. Ensure Rows is closed while closing a trasaction.
func TestIssue20575(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
_, err = tx.QueryContext(ctx, "SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
// Do not close Rows from QueryContext.
err = tx.Rollback()
if err != nil {
t.Fatal(err)
}
select {
default:
case <-ctx.Done():
t.Fatal("timeout: failed to rollback query without closing rows:", ctx.Err())
}
}
// TestIssue20622 tests closing the transaction before rows is closed, requires
// the race detector to fail.
func TestIssue20622(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
rows, err := tx.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
count := 0
for rows.Next() {
count++
var age int
var name string
if err := rows.Scan(&age, &name); err != nil {
t.Fatal("scan failed", err)
}
if count == 1 {
cancel()
}
time.Sleep(100 * time.Millisecond)
}
rows.Close()
tx.Commit()
}
// golang.org/issue/5718
func TestErrBadConnReconnect(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
simulateBadConn := func(name string, hook *func() bool, op func() error) {
broken, retried := false, false
numOpen := db.numOpen
// simulate a broken connection on the first try
*hook = func() bool {
if !broken {
broken = true
return true
}
retried = true
return false
}
if err := op(); err != nil {
t.Errorf(name+": %v", err)
return
}
if !broken || !retried {
t.Error(name + ": Failed to simulate broken connection")
}
*hook = nil
if numOpen != db.numOpen {
t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen)
numOpen = db.numOpen
}
}
// db.Exec
dbExec := func() error {
_, err := db.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true)
return err
}
simulateBadConn("db.Exec prepare", &hookPrepareBadConn, dbExec)
simulateBadConn("db.Exec exec", &hookExecBadConn, dbExec)
// db.Query
dbQuery := func() error {
rows, err := db.Query("SELECT|t1|age,name|")
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("db.Query prepare", &hookPrepareBadConn, dbQuery)
simulateBadConn("db.Query query", &hookQueryBadConn, dbQuery)
// db.Prepare
simulateBadConn("db.Prepare", &hookPrepareBadConn, func() error {
stmt, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
return err
}
stmt.Close()
return nil
})
// Provide a way to force a re-prepare of a statement on next execution
forcePrepare := func(stmt *Stmt) {
stmt.css = nil
}
// stmt.Exec
stmt1, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt1.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt1)
stmtExec := func() error {
_, err := stmt1.Exec("Gopher", 3, false)
return err
}
simulateBadConn("stmt.Exec prepare", &hookPrepareBadConn, stmtExec)
simulateBadConn("stmt.Exec exec", &hookExecBadConn, stmtExec)
// stmt.Query
stmt2, err := db.Prepare("SELECT|t1|age,name|")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt2.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt2)
stmtQuery := func() error {
rows, err := stmt2.Query()
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("stmt.Query prepare", &hookPrepareBadConn, stmtQuery)
simulateBadConn("stmt.Query exec", &hookQueryBadConn, stmtQuery)
}
// golang.org/issue/11264
func TestTxEndBadConn(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
db.SetMaxIdleConns(1)
simulateBadConn := func(name string, hook *func() bool, op func() error) {
broken := false
numOpen := db.numOpen
*hook = func() bool {
if !broken {
broken = true
}
return broken
}
if err := op(); err != driver.ErrBadConn {
t.Errorf(name+": %v", err)
return
}
if !broken {
t.Error(name + ": Failed to simulate broken connection")
}
*hook = nil
if numOpen != db.numOpen {
t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen)
}
}
// db.Exec
dbExec := func(endTx func(tx *Tx) error) func() error {
return func() error {
tx, err := db.Begin()
if err != nil {
return err
}
_, err = tx.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true)
if err != nil {
return err
}
return endTx(tx)
}
}
simulateBadConn("db.Tx.Exec commit", &hookCommitBadConn, dbExec((*Tx).Commit))
simulateBadConn("db.Tx.Exec rollback", &hookRollbackBadConn, dbExec((*Tx).Rollback))
// db.Query
dbQuery := func(endTx func(tx *Tx) error) func() error {
return func() error {
tx, err := db.Begin()
if err != nil {
return err
}
rows, err := tx.Query("SELECT|t1|age,name|")
if err == nil {
err = rows.Close()
} else {
return err
}
return endTx(tx)
}
}
simulateBadConn("db.Tx.Query commit", &hookCommitBadConn, dbQuery((*Tx).Commit))
simulateBadConn("db.Tx.Query rollback", &hookRollbackBadConn, dbQuery((*Tx).Rollback))
}
type concurrentTest interface {
init(t testing.TB, db *DB)
finish(t testing.TB)
test(t testing.TB) error
}
type concurrentDBQueryTest struct {
db *DB
}
func (c *concurrentDBQueryTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBQueryTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentDBExecTest struct {
db *DB
}
func (c *concurrentDBExecTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBExecTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBExecTest) test(t testing.TB) error {
_, err := c.db.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentStmtQueryTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentStmtExecTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentTxQueryTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxQueryTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxExecTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxExecTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxExecTest) test(t testing.TB) error {
_, err := c.tx.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentTxStmtQueryTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxStmtExecTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentRandomTest struct {
tests []concurrentTest
}
func (c *concurrentRandomTest) init(t testing.TB, db *DB) {
c.tests = []concurrentTest{
new(concurrentDBQueryTest),
new(concurrentDBExecTest),
new(concurrentStmtQueryTest),
new(concurrentStmtExecTest),
new(concurrentTxQueryTest),
new(concurrentTxExecTest),
new(concurrentTxStmtQueryTest),
new(concurrentTxStmtExecTest),
}
for _, ct := range c.tests {
ct.init(t, db)
}
}
func (c *concurrentRandomTest) finish(t testing.TB) {
for _, ct := range c.tests {
ct.finish(t)
}
}
func (c *concurrentRandomTest) test(t testing.TB) error {
ct := c.tests[rand.Intn(len(c.tests))]
return ct.test(t)
}
func doConcurrentTest(t testing.TB, ct concurrentTest) {
maxProcs, numReqs := 1, 500
if testing.Short() {
maxProcs, numReqs = 4, 50
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
db := newTestDB(t, "people")
defer closeDB(t, db)
ct.init(t, db)
defer ct.finish(t)
var wg sync.WaitGroup
wg.Add(numReqs)
reqs := make(chan bool)
defer close(reqs)
for i := 0; i < maxProcs*2; i++ {
go func() {
for range reqs {
err := ct.test(t)
if err != nil {
wg.Done()
continue
}
wg.Done()
}
}()
}
for i := 0; i < numReqs; i++ {
reqs <- true
}
wg.Wait()
}
func TestIssue6081(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
drv := db.Driver().(*fakeDriver)
drv.mu.Lock()
opens0 := drv.openCount
closes0 := drv.closeCount
drv.mu.Unlock()
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
setRowsCloseHook(func(rows *Rows, err *error) {
*err = driver.ErrBadConn
})
defer setRowsCloseHook(nil)
for i := 0; i < 10; i++ {
rows, err := stmt.Query()
if err != nil {
t.Fatal(err)
}
rows.Close()
}
if n := len(stmt.css); n > 1 {
t.Errorf("len(css slice) = %d; want <= 1", n)
}
stmt.Close()
if n := len(stmt.css); n != 0 {
t.Errorf("len(css slice) after Close = %d; want 0", n)
}
drv.mu.Lock()
opens := drv.openCount - opens0
closes := drv.closeCount - closes0
drv.mu.Unlock()
if opens < 9 {
t.Errorf("opens = %d; want >= 9", opens)
}
if closes < 9 {
t.Errorf("closes = %d; want >= 9", closes)
}
}
// TestIssue18429 attempts to stress rolling back the transaction from a
// context cancel while simultaneously calling Tx.Rollback. Rolling back from a
// context happens concurrently so tx.rollback and tx.Commit must guard against
// double entry.
//
// In the test, a context is canceled while the query is in process so
// the internal rollback will run concurrently with the explicitly called
// Tx.Rollback.
//
// The addition of calling rows.Next also tests
// Issue 21117.
func TestIssue18429(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx := context.Background()
sem := make(chan bool, 20)
var wg sync.WaitGroup
const milliWait = 30
for i := 0; i < 100; i++ {
sem <- true
wg.Add(1)
go func() {
defer func() {
<-sem
wg.Done()
}()
qwait := (time.Duration(rand.Intn(milliWait)) * time.Millisecond).String()
ctx, cancel := context.WithTimeout(ctx, time.Duration(rand.Intn(milliWait))*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return
}
// This is expected to give a cancel error most, but not all the time.
// Test failure will happen with a panic or other race condition being
// reported.
rows, _ := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|")
if rows != nil {
var name string
// Call Next to test Issue 21117 and check for races.
for rows.Next() {
// Scan the buffer so it is read and checked for races.
rows.Scan(&name)
}
rows.Close()
}
// This call will race with the context cancel rollback to complete
// if the rollback itself isn't guarded.
tx.Rollback()
}()
}
wg.Wait()
}
// TestIssue20160 attempts to test a short context life on a stmt Query.
func TestIssue20160(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx := context.Background()
sem := make(chan bool, 20)
var wg sync.WaitGroup
const milliWait = 30
stmt, err := db.PrepareContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 100; i++ {
sem <- true
wg.Add(1)
go func() {
defer func() {
<-sem
wg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, time.Duration(rand.Intn(milliWait))*time.Millisecond)
defer cancel()
// This is expected to give a cancel error most, but not all the time.
// Test failure will happen with a panic or other race condition being
// reported.
rows, _ := stmt.QueryContext(ctx)
if rows != nil {
rows.Close()
}
}()
}
wg.Wait()
}
// TestIssue18719 closes the context right before use. The sql.driverConn
// will nil out the ci on close in a lock, but if another process uses it right after
// it will panic with on the nil ref.
//
// See https://golang.org/cl/35550 .
func TestIssue18719(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
hookTxGrabConn = func() {
cancel()
// Wait for the context to cancel and tx to rollback.
for tx.isDone() == false {
time.Sleep(3 * time.Millisecond)
}
}
defer func() { hookTxGrabConn = nil }()
// This call will grab the connection and cancel the context
// after it has done so. Code after must deal with the canceled state.
_, err = tx.QueryContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatalf("expected error %v but got %v", nil, err)
}
// Rows may be ignored because it will be closed when the context is canceled.
// Do not explicitly rollback. The rollback will happen from the
// canceled context.
cancel()
}
func TestIssue20647(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
stmt, err := conn.PrepareContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
rows1, err := stmt.QueryContext(ctx)
if err != nil {
t.Fatal("rows1", err)
}
defer rows1.Close()
rows2, err := stmt.QueryContext(ctx)
if err != nil {
t.Fatal("rows2", err)
}
defer rows2.Close()
if rows1.dc != rows2.dc {
t.Fatal("stmt prepared on Conn does not use same connection")
}
}
func TestConcurrency(t *testing.T) {
list := []struct {
name string
ct concurrentTest
}{
{"Query", new(concurrentDBQueryTest)},
{"Exec", new(concurrentDBExecTest)},
{"StmtQuery", new(concurrentStmtQueryTest)},
{"StmtExec", new(concurrentStmtExecTest)},
{"TxQuery", new(concurrentTxQueryTest)},
{"TxExec", new(concurrentTxExecTest)},
{"TxStmtQuery", new(concurrentTxStmtQueryTest)},
{"TxStmtExec", new(concurrentTxStmtExecTest)},
{"Random", new(concurrentRandomTest)},
}
for _, item := range list {
t.Run(item.name, func(t *testing.T) {
doConcurrentTest(t, item.ct)
})
}
}
func TestConnectionLeak(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
// Start by opening defaultMaxIdleConns
rows := make([]*Rows, defaultMaxIdleConns)
// We need to SetMaxOpenConns > MaxIdleConns, so the DB can open
// a new connection and we can fill the idle queue with the released
// connections.
db.SetMaxOpenConns(len(rows) + 1)
for ii := range rows {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r.Next()
if err := r.Err(); err != nil {
t.Fatal(err)
}
rows[ii] = r
}
// Now we have defaultMaxIdleConns busy connections. Open
// a new one, but wait until the busy connections are released
// before returning control to DB.
drv := db.Driver().(*fakeDriver)
drv.waitCh = make(chan struct{}, 1)
drv.waitingCh = make(chan struct{}, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return
}
r.Close()
wg.Done()
}()
// Wait until the goroutine we've just created has started waiting.
<-drv.waitingCh
// Now close the busy connections. This provides a connection for
// the blocked goroutine and then fills up the idle queue.
for _, v := range rows {
v.Close()
}
// At this point we give the new connection to DB. This connection is
// now useless, since the idle queue is full and there are no pending
// requests. DB should deal with this situation without leaking the
// connection.
drv.waitCh <- struct{}{}
wg.Wait()
}
func TestStatsMaxIdleClosedZero(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleClosed
for i := 0; i < 10; i++ {
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
st := db.Stats()
maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed
t.Logf("MaxIdleClosed: %d", maxIdleClosed)
if maxIdleClosed != 0 {
t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed)
}
}
func TestStatsMaxIdleClosedTen(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleClosed
for i := 0; i < 10; i++ {
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
st := db.Stats()
maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed
t.Logf("MaxIdleClosed: %d", maxIdleClosed)
if maxIdleClosed != 10 {
t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed)
}
}
func TestMaxIdleTime(t *testing.T) {
list := []struct {
wantMaxIdleTime time.Duration
wantIdleClosed int64
timeOffset time.Duration
}{
{time.Nanosecond, 1, 10 * time.Millisecond},
{time.Hour, 0, 10 * time.Millisecond},
}
baseTime := time.Unix(0, 0)
defer func() {
nowFunc = time.Now
}()
for _, item := range list {
nowFunc = func() time.Time {
return baseTime
}
t.Run(fmt.Sprintf("%v", item.wantMaxIdleTime), func(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxIdleTime(item.wantMaxIdleTime)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleTimeClosed
if err := db.Ping(); err != nil {
t.Fatal(err)
}
nowFunc = func() time.Time {
return baseTime.Add(item.timeOffset)
}
db.mu.Lock()
closing := db.connectionCleanerRunLocked()
db.mu.Unlock()
for _, c := range closing {
c.Close()
}
if g, w := int64(len(closing)), item.wantIdleClosed; g != w {
t.Errorf("got: %d; want %d closed conns", g, w)
}
st := db.Stats()
maxIdleClosed := st.MaxIdleTimeClosed - preMaxIdleClosed
if g, w := maxIdleClosed, item.wantIdleClosed; g != w {
t.Errorf(" got: %d; want %d max idle closed conns", g, w)
}
})
}
}
type nvcDriver struct {
fakeDriver
skipNamedValueCheck bool
}
func (d *nvcDriver) Open(dsn string) (driver.Conn, error) {
c, err := d.fakeDriver.Open(dsn)
fc := c.(*fakeConn)
fc.db.allowAny = true
return &nvcConn{fc, d.skipNamedValueCheck}, err
}
type nvcConn struct {
*fakeConn
skipNamedValueCheck bool
}
type decimalInt struct {
value int
}
type doNotInclude struct{}
var _ driver.NamedValueChecker = &nvcConn{}
func (c *nvcConn) CheckNamedValue(nv *driver.NamedValue) error {
if c.skipNamedValueCheck {
return driver.ErrSkip
}
switch v := nv.Value.(type) {
default:
return driver.ErrSkip
case Out:
switch ov := v.Dest.(type) {
default:
return errors.New("unknown NameValueCheck OUTPUT type")
case *string:
*ov = "from-server"
nv.Value = "OUT:*string"
}
return nil
case decimalInt, []int64:
return nil
case doNotInclude:
return driver.ErrRemoveArgument
}
}
func TestNamedValueChecker(t *testing.T) {
Register("NamedValueCheck", &nvcDriver{})
db, err := Open("NamedValueCheck", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = db.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = db.ExecContext(ctx, "CREATE|keys|dec1=any,str1=string,out1=string,array1=any")
if err != nil {
t.Fatal("exec create", err)
}
o1 := ""
_, err = db.ExecContext(ctx, "INSERT|keys|dec1=?A,str1=?,out1=?O1,array1=?", Named("A", decimalInt{123}), "hello", Named("O1", Out{Dest: &o1}), []int64{42, 128, 707}, doNotInclude{})
if err != nil {
t.Fatal("exec insert", err)
}
var (
str1 string
dec1 decimalInt
arr1 []int64
)
err = db.QueryRowContext(ctx, "SELECT|keys|dec1,str1,array1|").Scan(&dec1, &str1, &arr1)
if err != nil {
t.Fatal("select", err)
}
list := []struct{ got, want interface{} }{
{o1, "from-server"},
{dec1, decimalInt{123}},
{str1, "hello"},
{arr1, []int64{42, 128, 707}},
}
for index, item := range list {
if !reflect.DeepEqual(item.got, item.want) {
t.Errorf("got %#v wanted %#v for index %d", item.got, item.want, index)
}
}
}
func TestNamedValueCheckerSkip(t *testing.T) {
Register("NamedValueCheckSkip", &nvcDriver{skipNamedValueCheck: true})
db, err := Open("NamedValueCheckSkip", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = db.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = db.ExecContext(ctx, "CREATE|keys|dec1=any")
if err != nil {
t.Fatal("exec create", err)
}
_, err = db.ExecContext(ctx, "INSERT|keys|dec1=?A", Named("A", decimalInt{123}))
if err == nil {
t.Fatalf("expected error with bad argument, got %v", err)
}
}
func TestOpenConnector(t *testing.T) {
Register("testctx", &fakeDriverCtx{})
db, err := Open("testctx", "people")
if err != nil {
t.Fatal(err)
}
defer db.Close()
c, ok := db.connector.(*fakeConnector)
if !ok {
t.Fatal("not using *fakeConnector")
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
if !c.closed {
t.Fatal("connector is not closed")
}
}
type ctxOnlyDriver struct {
fakeDriver
}
func (d *ctxOnlyDriver) Open(dsn string) (driver.Conn, error) {
conn, err := d.fakeDriver.Open(dsn)
if err != nil {
return nil, err
}
return &ctxOnlyConn{fc: conn.(*fakeConn)}, nil
}
var (
_ driver.Conn = &ctxOnlyConn{}
_ driver.QueryerContext = &ctxOnlyConn{}
_ driver.ExecerContext = &ctxOnlyConn{}
)
type ctxOnlyConn struct {
fc *fakeConn
queryCtxCalled bool
execCtxCalled bool
}
func (c *ctxOnlyConn) Begin() (driver.Tx, error) {
return c.fc.Begin()
}
func (c *ctxOnlyConn) Close() error {
return c.fc.Close()
}
// Prepare is still part of the Conn interface, so while it isn't used
// must be defined for compatibility.
func (c *ctxOnlyConn) Prepare(q string) (driver.Stmt, error) {
panic("not used")
}
func (c *ctxOnlyConn) PrepareContext(ctx context.Context, q string) (driver.Stmt, error) {
return c.fc.PrepareContext(ctx, q)
}
func (c *ctxOnlyConn) QueryContext(ctx context.Context, q string, args []driver.NamedValue) (driver.Rows, error) {
c.queryCtxCalled = true
return c.fc.QueryContext(ctx, q, args)
}
func (c *ctxOnlyConn) ExecContext(ctx context.Context, q string, args []driver.NamedValue) (driver.Result, error) {
c.execCtxCalled = true
return c.fc.ExecContext(ctx, q, args)
}
// TestQueryExecContextOnly ensures drivers only need to implement QueryContext
// and ExecContext methods.
func TestQueryExecContextOnly(t *testing.T) {
// Ensure connection does not implement non-context interfaces.
var connType driver.Conn = &ctxOnlyConn{}
if _, ok := connType.(driver.Execer); ok {
t.Fatalf("%T must not implement driver.Execer", connType)
}
if _, ok := connType.(driver.Queryer); ok {
t.Fatalf("%T must not implement driver.Queryer", connType)
}
Register("ContextOnly", &ctxOnlyDriver{})
db, err := Open("ContextOnly", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal("db.Conn", err)
}
defer conn.Close()
coc := conn.dc.ci.(*ctxOnlyConn)
coc.fc.skipDirtySession = true
_, err = conn.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = conn.ExecContext(ctx, "CREATE|keys|v1=string")
if err != nil {
t.Fatal("exec create", err)
}
expectedValue := "value1"
_, err = conn.ExecContext(ctx, "INSERT|keys|v1=?", expectedValue)
if err != nil {
t.Fatal("exec insert", err)
}
rows, err := conn.QueryContext(ctx, "SELECT|keys|v1|")
if err != nil {
t.Fatal("query select", err)
}
v1 := ""
for rows.Next() {
err = rows.Scan(&v1)
if err != nil {
t.Fatal("rows scan", err)
}
}
rows.Close()
if v1 != expectedValue {
t.Fatalf("expected %q, got %q", expectedValue, v1)
}
if !coc.execCtxCalled {
t.Error("ExecContext not called")
}
if !coc.queryCtxCalled {
t.Error("QueryContext not called")
}
}
type alwaysErrScanner struct{}
var errTestScanWrap = errors.New("errTestScanWrap")
func (alwaysErrScanner) Scan(interface{}) error {
return errTestScanWrap
}
// Issue 38099: Ensure that Rows.Scan properly wraps underlying errors.
func TestRowsScanProperlyWrapsErrors(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age|")
if err != nil {
t.Fatalf("Query: %v", err)
}
var res alwaysErrScanner
for rows.Next() {
err = rows.Scan(&res)
if err == nil {
t.Fatal("expecting back an error")
}
if !errors.Is(err, errTestScanWrap) {
t.Fatalf("errors.Is mismatch\n%v\nWant: %v", err, errTestScanWrap)
}
// Ensure that error substring matching still correctly works.
if !strings.Contains(err.Error(), errTestScanWrap.Error()) {
t.Fatalf("Error %v does not contain %v", err, errTestScanWrap)
}
}
}
// badConn implements a bad driver.Conn, for TestBadDriver.
// The Exec method panics.
type badConn struct{}
func (bc badConn) Prepare(query string) (driver.Stmt, error) {
return nil, errors.New("badConn Prepare")
}
func (bc badConn) Close() error {
return nil
}
func (bc badConn) Begin() (driver.Tx, error) {
return nil, errors.New("badConn Begin")
}
func (bc badConn) Exec(query string, args []driver.Value) (driver.Result, error) {
panic("badConn.Exec")
}
// badDriver is a driver.Driver that uses badConn.
type badDriver struct{}
func (bd badDriver) Open(name string) (driver.Conn, error) {
return badConn{}, nil
}
// Issue 15901.
func TestBadDriver(t *testing.T) {
Register("bad", badDriver{})
db, err := Open("bad", "ignored")
if err != nil {
t.Fatal(err)
}
defer func() {
if r := recover(); r == nil {
t.Error("expected panic")
} else {
if want := "badConn.Exec"; r.(string) != want {
t.Errorf("panic was %v, expected %v", r, want)
}
}
}()
defer db.Close()
db.Exec("ignored")
}
type pingDriver struct {
fails bool
}
type pingConn struct {
badConn
driver *pingDriver
}
var pingError = errors.New("Ping failed")
func (pc pingConn) Ping(ctx context.Context) error {
if pc.driver.fails {
return pingError
}
return nil
}
var _ driver.Pinger = pingConn{}
func (pd *pingDriver) Open(name string) (driver.Conn, error) {
return pingConn{driver: pd}, nil
}
func TestPing(t *testing.T) {
driver := &pingDriver{}
Register("ping", driver)
db, err := Open("ping", "ignored")
if err != nil {
t.Fatal(err)
}
if err := db.Ping(); err != nil {
t.Errorf("err was %#v, expected nil", err)
return
}
driver.fails = true
if err := db.Ping(); err != pingError {
t.Errorf("err was %#v, expected pingError", err)
}
}
// Issue 18101.
func TestTypedString(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
type Str string
var scanned Str
err := db.QueryRow("SELECT|people|name|name=?", "Alice").Scan(&scanned)
if err != nil {
t.Fatal(err)
}
expected := Str("Alice")
if scanned != expected {
t.Errorf("expected %+v, got %+v", expected, scanned)
}
}
func BenchmarkConcurrentDBExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentDBExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentRandom(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentRandomTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}
database/sql: do not rely on timeout for deadlock test
Fixes #46783
Change-Id: I8a8d1716279a041a7411c0c47a440a7997b39c80
Reviewed-on: https://go-review.googlesource.com/c/go/+/328649
Run-TryBot: Daniel Theophanes <14db39248b80c6688f7e9808ba5055f2e56426f3@gmail.com>
TryBot-Result: Go Bot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Bryan C. Mills <1c8aad60184261ccede67f5c63a0d2a3bf3c9ff4@google.com>
Trust: Carlos Amedee <ab5e2bca84933118bbc9d48ffaccce3bac4eeb64@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sql
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"math/rand"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func init() {
type dbConn struct {
db *DB
c *driverConn
}
freedFrom := make(map[dbConn]string)
var mu sync.Mutex
getFreedFrom := func(c dbConn) string {
mu.Lock()
defer mu.Unlock()
return freedFrom[c]
}
setFreedFrom := func(c dbConn, s string) {
mu.Lock()
defer mu.Unlock()
freedFrom[c] = s
}
putConnHook = func(db *DB, c *driverConn) {
idx := -1
for i, v := range db.freeConn {
if v == c {
idx = i
break
}
}
if idx >= 0 {
// print before panic, as panic may get lost due to conflicting panic
// (all goroutines asleep) elsewhere, since we might not unlock
// the mutex in freeConn here.
println("double free of conn. conflicts are:\nA) " + getFreedFrom(dbConn{db, c}) + "\n\nand\nB) " + stack())
panic("double free of conn.")
}
setFreedFrom(dbConn{db, c}, stack())
}
}
const fakeDBName = "foo"
var chrisBirthday = time.Unix(123456789, 0)
func newTestDB(t testing.TB, name string) *DB {
return newTestDBConnector(t, &fakeConnector{name: fakeDBName}, name)
}
func newTestDBConnector(t testing.TB, fc *fakeConnector, name string) *DB {
fc.name = fakeDBName
db := OpenDB(fc)
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
if name == "people" {
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
exec(t, db, "INSERT|people|name=Alice,age=?,photo=APHOTO", 1)
exec(t, db, "INSERT|people|name=Bob,age=?,photo=BPHOTO", 2)
exec(t, db, "INSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
}
if name == "magicquery" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|magicquery|op=string,millis=int32")
exec(t, db, "INSERT|magicquery|op=sleep,millis=10")
}
if name == "tx_status" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|tx_status|tx_status=string")
exec(t, db, "INSERT|tx_status|tx_status=invalid")
}
return db
}
func TestOpenDB(t *testing.T) {
db := OpenDB(dsnConnector{dsn: fakeDBName, driver: fdriver})
if db.Driver() != fdriver {
t.Fatalf("OpenDB should return the driver of the Connector")
}
}
func TestDriverPanic(t *testing.T) {
// Test that if driver panics, database/sql does not deadlock.
db, err := Open("test", fakeDBName)
if err != nil {
t.Fatalf("Open: %v", err)
}
expectPanic := func(name string, f func()) {
defer func() {
err := recover()
if err == nil {
t.Fatalf("%s did not panic", name)
}
}()
f()
}
expectPanic("Exec Exec", func() { db.Exec("PANIC|Exec|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
expectPanic("Exec NumInput", func() { db.Exec("PANIC|NumInput|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
expectPanic("Exec Close", func() { db.Exec("PANIC|Close|WIPE") })
exec(t, db, "WIPE") // check not deadlocked
exec(t, db, "PANIC|Query|WIPE") // should run successfully: Exec does not call Query
exec(t, db, "WIPE") // check not deadlocked
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
expectPanic("Query Query", func() { db.Query("PANIC|Query|SELECT|people|age,name|") })
expectPanic("Query NumInput", func() { db.Query("PANIC|NumInput|SELECT|people|age,name|") })
expectPanic("Query Close", func() {
rows, err := db.Query("PANIC|Close|SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
})
db.Query("PANIC|Exec|SELECT|people|age,name|") // should run successfully: Query does not call Exec
exec(t, db, "WIPE") // check not deadlocked
}
func exec(t testing.TB, db *DB, query string, args ...interface{}) {
t.Helper()
_, err := db.Exec(query, args...)
if err != nil {
t.Fatalf("Exec of %q: %v", query, err)
}
}
func closeDB(t testing.TB, db *DB) {
if e := recover(); e != nil {
fmt.Printf("Panic: %v\n", e)
panic(e)
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db.mu.Lock()
for i, dc := range db.freeConn {
if n := len(dc.openStmt); n > 0 {
// Just a sanity check. This is legal in
// general, but if we make the tests clean up
// their statements first, then we can safely
// verify this is always zero here, and any
// other value is a leak.
t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, len(db.freeConn), n)
}
}
db.mu.Unlock()
err := db.Close()
if err != nil {
t.Fatalf("error closing DB: %v", err)
}
var numOpen int
if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
numOpen = db.numOpenConns()
return numOpen == 0
}) {
t.Fatalf("%d connections still open after closing DB", numOpen)
}
}
// numPrepares assumes that db has exactly 1 idle conn and returns
// its count of calls to Prepare
func numPrepares(t *testing.T, db *DB) int {
if n := len(db.freeConn); n != 1 {
t.Fatalf("free conns = %d; want 1", n)
}
return db.freeConn[0].ci.(*fakeConn).numPrepare
}
func (db *DB) numDeps() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.dep)
}
// Dependencies are closed via a goroutine, so this polls waiting for
// numDeps to fall to want, waiting up to d.
func (db *DB) numDepsPollUntil(want int, d time.Duration) int {
deadline := time.Now().Add(d)
for {
n := db.numDeps()
if n <= want || time.Now().After(deadline) {
return n
}
time.Sleep(50 * time.Millisecond)
}
}
func (db *DB) numFreeConns() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.freeConn)
}
func (db *DB) numOpenConns() int {
db.mu.Lock()
defer db.mu.Unlock()
return db.numOpen
}
// clearAllConns closes all connections in db.
func (db *DB) clearAllConns(t *testing.T) {
db.SetMaxIdleConns(0)
if g, w := db.numFreeConns(), 0; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(0, time.Second); n > 0 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
}
func (db *DB) dumpDeps(t *testing.T) {
for fc := range db.dep {
db.dumpDep(t, 0, fc, map[finalCloser]bool{})
}
}
func (db *DB) dumpDep(t *testing.T, depth int, dep finalCloser, seen map[finalCloser]bool) {
seen[dep] = true
indent := strings.Repeat(" ", depth)
ds := db.dep[dep]
for k := range ds {
t.Logf("%s%T (%p) waiting for -> %T (%p)", indent, dep, dep, k, k)
if fc, ok := k.(finalCloser); ok {
if !seen[fc] {
db.dumpDep(t, depth+1, fc, seen)
}
}
}
}
func TestQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
// TestQueryContext tests canceling the context while scanning the rows.
func TestQueryContext(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rows, err := db.QueryContext(ctx, "SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
index := 0
for rows.Next() {
if index == 2 {
cancel()
waitForRowsClose(t, rows, 5*time.Second)
}
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
if index == 2 {
break
}
t.Fatalf("Scan: %v", err)
}
if index == 2 && err != context.Canceled {
t.Fatalf("Scan: %v; want context.Canceled", err)
}
got = append(got, r)
index++
}
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.Canceled {
t.Fatalf("context err = %v; want context.Canceled", err)
}
default:
t.Fatalf("context err = nil; want context.Canceled")
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
waitForRowsClose(t, rows, 5*time.Second)
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func waitCondition(waitFor, checkEvery time.Duration, fn func() bool) bool {
deadline := time.Now().Add(waitFor)
for time.Now().Before(deadline) {
if fn() {
return true
}
time.Sleep(checkEvery)
}
return false
}
// waitForFree checks db.numFreeConns until either it equals want or
// the maxWait time elapses.
func waitForFree(t *testing.T, db *DB, maxWait time.Duration, want int) {
var numFree int
if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
numFree = db.numFreeConns()
return numFree == want
}) {
t.Fatalf("free conns after hitting EOF = %d; want %d", numFree, want)
}
}
func waitForRowsClose(t *testing.T, rows *Rows, maxWait time.Duration) {
if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
rows.closemu.RLock()
defer rows.closemu.RUnlock()
return rows.closed
}) {
t.Fatal("failed to close rows")
}
}
// TestQueryContextWait ensures that rows and all internal statements are closed when
// a query context is closed during execution.
func TestQueryContextWait(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
// TODO(kardianos): convert this from using a timeout to using an explicit
// cancel when the query signals that it is "executing" the query.
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err := db.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
// Verify closed rows connection after error condition.
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
// TODO(kardianos): if the context timeouts before the db.QueryContext
// executes this check may fail. After adjusting how the context
// is canceled above revert this back to a Fatal error.
t.Logf("executed %d Prepare statements; want 1", prepares)
}
}
// TestTxContextWait tests the transaction behavior when the tx context is canceled
// during execution of the query.
func TestTxContextWait(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
tx.keepConnOnRollback = false
go func() {
time.Sleep(15 * time.Millisecond)
cancel()
}()
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.Canceled {
t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
}
waitForFree(t, db, 5*time.Second, 0)
}
// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
// the final connection.
func TestTxContextWaitNoDiscard(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
// Guard against the context being canceled before BeginTx completes.
if err == context.DeadlineExceeded {
t.Skip("tx context canceled prior to first use")
}
t.Fatal(err)
}
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
waitForFree(t, db, 5*time.Second, 1)
}
// TestUnsupportedOptions checks that the database fails when a driver that
// doesn't implement ConnBeginTx is used with non-default options and an
// un-cancellable context.
func TestUnsupportedOptions(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
_, err := db.BeginTx(context.Background(), &TxOptions{
Isolation: LevelSerializable, ReadOnly: true,
})
if err == nil {
t.Fatal("expected error when using unsupported options, got nil")
}
}
func TestMultiResultSetQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|;SELECT|people|name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row1 struct {
age int
name string
}
type row2 struct {
name string
}
got1 := []row1{}
for rows.Next() {
var r row1
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got1 = append(got1, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want1 := []row1{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got1, want1) {
t.Errorf("mismatch.\n got1: %#v\nwant: %#v", got1, want1)
}
if !rows.NextResultSet() {
t.Errorf("expected another result set")
}
got2 := []row2{}
for rows.Next() {
var r row2
err = rows.Scan(&r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got2 = append(got2, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want2 := []row2{
{name: "Alice"},
{name: "Bob"},
{name: "Chris"},
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got2, want2)
}
if rows.NextResultSet() {
t.Errorf("expected no more result sets")
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
waitForFree(t, db, 5*time.Second, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestQueryNamedArg(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query(
// Ensure the name and age parameters only match on placeholder name, not position.
"SELECT|people|age,name|name=?name,age=?age",
Named("age", 2),
Named("name", "Bob"),
)
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 2, name: "Bob"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestPoolExhaustOnCancel(t *testing.T) {
if testing.Short() {
t.Skip("long test")
}
max := 3
var saturate, saturateDone sync.WaitGroup
saturate.Add(max)
saturateDone.Add(max)
donePing := make(chan bool)
state := 0
// waiter will be called for all queries, including
// initial setup queries. The state is only assigned when
// no queries are made.
//
// Only allow the first batch of queries to finish once the
// second batch of Ping queries have finished.
waiter := func(ctx context.Context) {
switch state {
case 0:
// Nothing. Initial database setup.
case 1:
saturate.Done()
select {
case <-ctx.Done():
case <-donePing:
}
case 2:
}
}
db := newTestDBConnector(t, &fakeConnector{waiter: waiter}, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(max)
// First saturate the connection pool.
// Then start new requests for a connection that is canceled after it is requested.
state = 1
for i := 0; i < max; i++ {
go func() {
rows, err := db.Query("SELECT|people|name,photo|")
if err != nil {
t.Errorf("Query: %v", err)
return
}
rows.Close()
saturateDone.Done()
}()
}
saturate.Wait()
if t.Failed() {
t.FailNow()
}
state = 2
// Now cancel the request while it is waiting.
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
for i := 0; i < max; i++ {
ctxReq, cancelReq := context.WithCancel(ctx)
go func() {
time.Sleep(100 * time.Millisecond)
cancelReq()
}()
err := db.PingContext(ctxReq)
if err != context.Canceled {
t.Fatalf("PingContext (Exhaust): %v", err)
}
}
close(donePing)
saturateDone.Wait()
// Now try to open a normal connection.
err := db.PingContext(ctx)
if err != nil {
t.Fatalf("PingContext (Normal): %v", err)
}
}
func TestRowsColumns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
cols, err := rows.Columns()
if err != nil {
t.Fatalf("Columns: %v", err)
}
want := []string{"age", "name"}
if !reflect.DeepEqual(cols, want) {
t.Errorf("got %#v; want %#v", cols, want)
}
if err := rows.Close(); err != nil {
t.Errorf("error closing rows: %s", err)
}
}
func TestRowsColumnTypes(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
tt, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("ColumnTypes: %v", err)
}
types := make([]reflect.Type, len(tt))
for i, tp := range tt {
st := tp.ScanType()
if st == nil {
t.Errorf("scantype is null for column %q", tp.Name())
continue
}
types[i] = st
}
values := make([]interface{}, len(tt))
for i := range values {
values[i] = reflect.New(types[i]).Interface()
}
ct := 0
for rows.Next() {
err = rows.Scan(values...)
if err != nil {
t.Fatalf("failed to scan values in %v", err)
}
if ct == 1 {
if age := *values[0].(*int32); age != 2 {
t.Errorf("Expected 2, got %v", age)
}
if name := *values[1].(*string); name != "Bob" {
t.Errorf("Expected Bob, got %v", name)
}
}
ct++
}
if ct != 3 {
t.Errorf("expected 3 rows, got %d", ct)
}
if err := rows.Close(); err != nil {
t.Errorf("error closing rows: %s", err)
}
}
func TestQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
var birthday time.Time
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
}
err = db.QueryRow("SELECT|people|bdate|age=?", 3).Scan(&birthday)
if err != nil || !birthday.Equal(chrisBirthday) {
t.Errorf("chris birthday = %v, err = %v; want %v", birthday, err, chrisBirthday)
}
err = db.QueryRow("SELECT|people|age,name|age=?", 2).Scan(&age, &name)
if err != nil {
t.Fatalf("age QueryRow+Scan: %v", err)
}
if name != "Bob" {
t.Errorf("expected name Bob, got %q", name)
}
if age != 2 {
t.Errorf("expected age 2, got %d", age)
}
err = db.QueryRow("SELECT|people|age,name|name=?", "Alice").Scan(&age, &name)
if err != nil {
t.Fatalf("name QueryRow+Scan: %v", err)
}
if name != "Alice" {
t.Errorf("expected name Alice, got %q", name)
}
if age != 1 {
t.Errorf("expected age 1, got %d", age)
}
var photo []byte
err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
if err != nil {
t.Fatalf("photo QueryRow+Scan: %v", err)
}
want := []byte("APHOTO")
if !reflect.DeepEqual(photo, want) {
t.Errorf("photo = %q; want %q", photo, want)
}
}
func TestRowErr(t *testing.T) {
db := newTestDB(t, "people")
err := db.QueryRowContext(context.Background(), "SELECT|people|bdate|age=?", 3).Err()
if err != nil {
t.Errorf("Unexpected err = %v; want %v", err, nil)
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
err = db.QueryRowContext(ctx, "SELECT|people|bdate|age=?", 3).Err()
exp := "context canceled"
if err == nil || !strings.Contains(err.Error(), exp) {
t.Errorf("Expected err = %v; got %v", exp, err)
}
}
func TestTxRollbackCommitErr(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
err = tx.Rollback()
if err != nil {
t.Errorf("expected nil error from Rollback; got %v", err)
}
err = tx.Commit()
if err != ErrTxDone {
t.Errorf("expected %q from Commit; got %q", ErrTxDone, err)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
err = tx.Commit()
if err != nil {
t.Errorf("expected nil error from Commit; got %v", err)
}
err = tx.Rollback()
if err != ErrTxDone {
t.Errorf("expected %q from Rollback; got %q", ErrTxDone, err)
}
}
func TestStatementErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("Close: %v", err)
}
var name string
err = stmt.QueryRow("foo").Scan(&name)
if err == nil {
t.Errorf("expected error from QueryRow.Scan after Stmt.Close")
}
}
func TestStatementQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
var age int
for n, tt := range []struct {
name string
want int
}{
{"Alice", 1},
{"Bob", 2},
{"Chris", 3},
} {
if err := stmt.QueryRow(tt.name).Scan(&age); err != nil {
t.Errorf("%d: on %q, QueryRow/Scan: %v", n, tt.name, err)
} else if age != tt.want {
t.Errorf("%d: age=%d, want %d", n, age, tt.want)
}
}
}
type stubDriverStmt struct {
err error
}
func (s stubDriverStmt) Close() error {
return s.err
}
func (s stubDriverStmt) NumInput() int {
return -1
}
func (s stubDriverStmt) Exec(args []driver.Value) (driver.Result, error) {
return nil, nil
}
func (s stubDriverStmt) Query(args []driver.Value) (driver.Rows, error) {
return nil, nil
}
// golang.org/issue/12798
func TestStatementClose(t *testing.T) {
want := errors.New("STMT ERROR")
tests := []struct {
stmt *Stmt
msg string
}{
{&Stmt{stickyErr: want}, "stickyErr not propagated"},
{&Stmt{cg: &Tx{}, cgds: &driverStmt{Locker: &sync.Mutex{}, si: stubDriverStmt{want}}}, "driverStmt.Close() error not propagated"},
}
for _, test := range tests {
if err := test.stmt.Close(); err != want {
t.Errorf("%s. Got stmt.Close() = %v, want = %v", test.msg, err, want)
}
}
}
// golang.org/issue/3734
func TestStatementQueryRowConcurrent(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
const n = 10
ch := make(chan error, n)
for i := 0; i < n; i++ {
go func() {
var age int
err := stmt.QueryRow("Alice").Scan(&age)
if err == nil && age != 1 {
err = fmt.Errorf("unexpected age %d", age)
}
ch <- err
}()
}
for i := 0; i < n; i++ {
if err := <-ch; err != nil {
t.Error(err)
}
}
}
// just a test of fakedb itself
func TestBogusPreboundParameters(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
_, err := db.Prepare("INSERT|t1|name=?,age=bogusconversion")
if err == nil {
t.Fatalf("expected error")
}
if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
t.Errorf("unexpected error: %v", err)
}
}
func TestExec(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Errorf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
type execTest struct {
args []interface{}
wantErr string
}
execTests := []execTest{
// Okay:
{[]interface{}{"Brad", 31}, ""},
{[]interface{}{"Brad", int64(31)}, ""},
{[]interface{}{"Bob", "32"}, ""},
{[]interface{}{7, 9}, ""},
// Invalid conversions:
{[]interface{}{"Brad", int64(0xFFFFFFFF)}, "sql: converting argument $2 type: sql/driver: value 4294967295 overflows int32"},
{[]interface{}{"Brad", "strconv fail"}, `sql: converting argument $2 type: sql/driver: value "strconv fail" can't be converted to int32`},
// Wrong number of args:
{[]interface{}{}, "sql: expected 2 arguments, got 0"},
{[]interface{}{1, 2, 3}, "sql: expected 2 arguments, got 3"},
}
for n, et := range execTests {
_, err := stmt.Exec(et.args...)
errStr := ""
if err != nil {
errStr = err.Error()
}
if errStr != et.wantErr {
t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
n, et.args, errStr, et.wantErr)
}
}
}
func TestTxPrepare(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
stmt, err := tx.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
_, err = stmt.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !stmt.closed {
t.Fatal("Stmt not closed after Commit")
}
}
func TestTxStmt(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !txs.closed {
t.Fatal("Stmt not closed after Commit")
}
}
func TestTxStmtPreparedOnce(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs1 := tx.Stmt(stmt)
txs2 := tx.Stmt(stmt)
_, err = txs1.Exec("Go", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
txs1.Close()
_, err = txs2.Exec("Gopher", 8)
if err != nil {
t.Fatalf("Exec = %v", err)
}
txs2.Close()
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestTxStmtClosedRePrepares(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("stmt.Close() = %v", err)
}
// tx.Stmt increments numPrepares because stmt is closed.
txs := tx.Stmt(stmt)
if txs.stickyErr != nil {
t.Fatal(txs.stickyErr)
}
if txs.parentStmt != nil {
t.Fatal("expected nil parentStmt")
}
_, err = txs.Exec(`Eric`, 82)
if err != nil {
t.Fatalf("txs.Exec = %v", err)
}
err = txs.Close()
if err != nil {
t.Fatalf("txs.Close = %v", err)
}
tx.Rollback()
if prepares := numPrepares(t, db) - prepares0; prepares != 2 {
t.Errorf("executed %d Prepare statements; want 2", prepares)
}
}
func TestParentStmtOutlivesTxStmt(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
// Make sure everything happens on the same connection.
db.SetMaxOpenConns(1)
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
if len(stmt.css) != 1 {
t.Fatalf("len(stmt.css) = %v; want 1", len(stmt.css))
}
err = txs.Close()
if err != nil {
t.Fatalf("txs.Close() = %v", err)
}
err = tx.Rollback()
if err != nil {
t.Fatalf("tx.Rollback() = %v", err)
}
// txs must not be valid.
_, err = txs.Exec("Suzan", 30)
if err == nil {
t.Fatalf("txs.Exec(), expected err")
}
// Stmt must still be valid.
_, err = stmt.Exec("Janina", 25)
if err != nil {
t.Fatalf("stmt.Exec() = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
// Test that tx.Stmt called with a statement already
// associated with tx as argument re-prepares the same
// statement again.
func TestTxStmtFromTxStmtRePrepares(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32")
prepares0 := numPrepares(t, db)
// db.Prepare increments numPrepares.
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs1 := tx.Stmt(stmt)
// tx.Stmt(txs1) increments numPrepares because txs1 already
// belongs to a transaction (albeit the same transaction).
txs2 := tx.Stmt(txs1)
if txs2.stickyErr != nil {
t.Fatal(txs2.stickyErr)
}
if txs2.parentStmt != nil {
t.Fatal("expected nil parentStmt")
}
_, err = txs2.Exec(`Eric`, 82)
if err != nil {
t.Fatal(err)
}
err = txs1.Close()
if err != nil {
t.Fatalf("txs1.Close = %v", err)
}
err = txs2.Close()
if err != nil {
t.Fatalf("txs1.Close = %v", err)
}
err = tx.Rollback()
if err != nil {
t.Fatalf("tx.Rollback = %v", err)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 2 {
t.Errorf("executed %d Prepare statements; want 2", prepares)
}
}
// Issue: https://golang.org/issue/2784
// This test didn't fail before because we got lucky with the fakedb driver.
// It was failing, and now not, in github.com/bradfitz/go-sql-test
func TestTxQuery(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
exec(t, db, "INSERT|t1|name=Alice")
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r, err := tx.Query("SELECT|t1|name|")
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(r.Err())
}
t.Fatal("expected one row")
}
var x string
err = r.Scan(&x)
if err != nil {
t.Fatal(err)
}
}
func TestTxQueryInvalid(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Query("SELECT|t1|name|")
if err == nil {
t.Fatal("Error expected")
}
}
// Tests fix for issue 4433, that retries in Begin happen when
// conn.Begin() returns ErrBadConn
func TestTxErrBadConn(t *testing.T) {
db, err := Open("test", fakeDBName+";badConn")
if err != nil {
t.Fatalf("Open: %v", err)
}
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
}
func TestConnQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
var name string
err = conn.QueryRowContext(ctx, "SELECT|people|name|age=?", 3).Scan(&name)
if err != nil {
t.Fatal(err)
}
if name != "Chris" {
t.Fatalf("unexpected result, got %q want Chris", name)
}
err = conn.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
}
func TestConnRaw(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
sawFunc := false
err = conn.Raw(func(dc interface{}) error {
sawFunc = true
if _, ok := dc.(*fakeConn); !ok {
return fmt.Errorf("got %T want *fakeConn", dc)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if !sawFunc {
t.Fatal("Raw func not called")
}
func() {
defer func() {
x := recover()
if x == nil {
t.Fatal("expected panic")
}
conn.closemu.Lock()
closed := conn.dc == nil
conn.closemu.Unlock()
if !closed {
t.Fatal("expected connection to be closed after panic")
}
}()
err = conn.Raw(func(dc interface{}) error {
panic("Conn.Raw panic should return an error")
})
t.Fatal("expected panic from Raw func")
}()
}
func TestCursorFake(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
exec(t, db, "CREATE|peoplecursor|list=table")
exec(t, db, "INSERT|peoplecursor|list=people!name!age")
rows, err := db.QueryContext(ctx, `SELECT|peoplecursor|list|`)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
if !rows.Next() {
t.Fatal("no rows")
}
var cursor = &Rows{}
err = rows.Scan(cursor)
if err != nil {
t.Fatal(err)
}
defer cursor.Close()
const expectedRows = 3
var currentRow int64
var n int64
var s string
for cursor.Next() {
currentRow++
err = cursor.Scan(&s, &n)
if err != nil {
t.Fatal(err)
}
if n != currentRow {
t.Errorf("expected number(Age)=%d, got %d", currentRow, n)
}
}
if currentRow != expectedRows {
t.Errorf("expected %d rows, got %d rows", expectedRows, currentRow)
}
}
func TestInvalidNilValues(t *testing.T) {
var date1 time.Time
var date2 int
tests := []struct {
name string
input interface{}
expectedError string
}{
{
name: "time.Time",
input: &date1,
expectedError: `sql: Scan error on column index 0, name "bdate": unsupported Scan, storing driver.Value type <nil> into type *time.Time`,
},
{
name: "int",
input: &date2,
expectedError: `sql: Scan error on column index 0, name "bdate": converting NULL to int is unsupported`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
err = conn.QueryRowContext(ctx, "SELECT|people|bdate|age=?", 1).Scan(tt.input)
if err == nil {
t.Fatal("expected error when querying nil column, but succeeded")
}
if err.Error() != tt.expectedError {
t.Fatalf("Expected error: %s\nReceived: %s", tt.expectedError, err.Error())
}
err = conn.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
})
}
}
func TestConnTx(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
tx, err := conn.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
insertName, insertAge := "Nancy", 33
_, err = tx.ExecContext(ctx, "INSERT|people|name=?,age=?,photo=APHOTO", insertName, insertAge)
if err != nil {
t.Fatal(err)
}
err = tx.Commit()
if err != nil {
t.Fatal(err)
}
var selectName string
err = conn.QueryRowContext(ctx, "SELECT|people|name|age=?", insertAge).Scan(&selectName)
if err != nil {
t.Fatal(err)
}
if selectName != insertName {
t.Fatalf("got %q want %q", selectName, insertName)
}
}
// TestConnIsValid verifies that a database connection that should be discarded,
// is actually discarded and does not re-enter the connection pool.
// If the IsValid method from *fakeConn is removed, this test will fail.
func TestConnIsValid(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
ctx := context.Background()
c, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
err = c.Raw(func(raw interface{}) error {
dc := raw.(*fakeConn)
dc.stickyBad = true
return nil
})
if err != nil {
t.Fatal(err)
}
c.Close()
if len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {
t.Fatal("bad connection returned to pool; expected bad connection to be discarded")
}
}
// Tests fix for issue 2542, that we release a lock when querying on
// a closed connection.
func TestIssue2542Deadlock(t *testing.T) {
db := newTestDB(t, "people")
closeDB(t, db)
for i := 0; i < 2; i++ {
_, err := db.Query("SELECT|people|age,name|")
if err == nil {
t.Fatalf("expected error")
}
}
}
// From golang.org/issue/3865
func TestCloseStmtBeforeRows(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
s, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r, err := s.Query()
if err != nil {
s.Close()
t.Fatal(err)
}
err = s.Close()
if err != nil {
t.Fatal(err)
}
r.Close()
}
// Tests fix for issue 2788, that we bind nil to a []byte if the
// value in the column is sql null
func TestNullByteSlice(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
exec(t, db, "INSERT|t|id=10,name=?", nil)
var name []byte
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatal(err)
}
if name != nil {
t.Fatalf("name []byte should be nil for null column value, got: %#v", name)
}
exec(t, db, "INSERT|t|id=11,name=?", "bob")
err = db.QueryRow("SELECT|t|name|id=?", 11).Scan(&name)
if err != nil {
t.Fatal(err)
}
if string(name) != "bob" {
t.Fatalf("name []byte should be bob, got: %q", string(name))
}
}
func TestPointerParamsAndScans(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
bob := "bob"
var name *string
name = &bob
exec(t, db, "INSERT|t|id=10,name=?", name)
name = nil
exec(t, db, "INSERT|t|id=20,name=?", name)
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatalf("querying id 10: %v", err)
}
if name == nil {
t.Errorf("id 10's name = nil; want bob")
} else if *name != "bob" {
t.Errorf("id 10's name = %q; want bob", *name)
}
err = db.QueryRow("SELECT|t|name|id=?", 20).Scan(&name)
if err != nil {
t.Fatalf("querying id 20: %v", err)
}
if name != nil {
t.Errorf("id 20 = %q; want nil", *name)
}
}
func TestQueryRowClosingStmt(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age, &name)
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 free conn")
}
fakeConn := db.freeConn[0].ci.(*fakeConn)
if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
t.Errorf("statement close mismatch: made %d, closed %d", made, closed)
}
}
var atomicRowsCloseHook atomic.Value // of func(*Rows, *error)
func init() {
rowsCloseHook = func() func(*Rows, *error) {
fn, _ := atomicRowsCloseHook.Load().(func(*Rows, *error))
return fn
}
}
func setRowsCloseHook(fn func(*Rows, *error)) {
if fn == nil {
// Can't change an atomic.Value back to nil, so set it to this
// no-op func instead.
fn = func(*Rows, *error) {}
}
atomicRowsCloseHook.Store(fn)
}
// Test issue 6651
func TestIssue6651(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var v string
want := "error in rows.Next"
rowsCursorNextHook = func(dest []driver.Value) error {
return fmt.Errorf(want)
}
defer func() { rowsCursorNextHook = nil }()
err := db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
rowsCursorNextHook = nil
want = "error in rows.Close"
setRowsCloseHook(func(rows *Rows, err *error) {
*err = fmt.Errorf(want)
})
defer setRowsCloseHook(nil)
err = db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
}
type nullTestRow struct {
nullParam interface{}
notNullParam interface{}
scanNullVal interface{}
}
type nullTestSpec struct {
nullType string
notNullType string
rows [6]nullTestRow
}
func TestNullStringParam(t *testing.T) {
spec := nullTestSpec{"nullstring", "string", [6]nullTestRow{
{NullString{"aqua", true}, "", NullString{"aqua", true}},
{NullString{"brown", false}, "", NullString{"", false}},
{"chartreuse", "", NullString{"chartreuse", true}},
{NullString{"darkred", true}, "", NullString{"darkred", true}},
{NullString{"eel", false}, "", NullString{"", false}},
{"foo", NullString{"black", false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt64Param(t *testing.T) {
spec := nullTestSpec{"nullint64", "int64", [6]nullTestRow{
{NullInt64{31, true}, 1, NullInt64{31, true}},
{NullInt64{-22, false}, 1, NullInt64{0, false}},
{22, 1, NullInt64{22, true}},
{NullInt64{33, true}, 1, NullInt64{33, true}},
{NullInt64{222, false}, 1, NullInt64{0, false}},
{0, NullInt64{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt32Param(t *testing.T) {
spec := nullTestSpec{"nullint32", "int32", [6]nullTestRow{
{NullInt32{31, true}, 1, NullInt32{31, true}},
{NullInt32{-22, false}, 1, NullInt32{0, false}},
{22, 1, NullInt32{22, true}},
{NullInt32{33, true}, 1, NullInt32{33, true}},
{NullInt32{222, false}, 1, NullInt32{0, false}},
{0, NullInt32{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt16Param(t *testing.T) {
spec := nullTestSpec{"nullint16", "int16", [6]nullTestRow{
{NullInt16{31, true}, 1, NullInt16{31, true}},
{NullInt16{-22, false}, 1, NullInt16{0, false}},
{22, 1, NullInt16{22, true}},
{NullInt16{33, true}, 1, NullInt16{33, true}},
{NullInt16{222, false}, 1, NullInt16{0, false}},
{0, NullInt16{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullByteParam(t *testing.T) {
spec := nullTestSpec{"nullbyte", "byte", [6]nullTestRow{
{NullByte{31, true}, 1, NullByte{31, true}},
{NullByte{0, false}, 1, NullByte{0, false}},
{22, 1, NullByte{22, true}},
{NullByte{33, true}, 1, NullByte{33, true}},
{NullByte{222, false}, 1, NullByte{0, false}},
{0, NullByte{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullFloat64Param(t *testing.T) {
spec := nullTestSpec{"nullfloat64", "float64", [6]nullTestRow{
{NullFloat64{31.2, true}, 1, NullFloat64{31.2, true}},
{NullFloat64{13.1, false}, 1, NullFloat64{0, false}},
{-22.9, 1, NullFloat64{-22.9, true}},
{NullFloat64{33.81, true}, 1, NullFloat64{33.81, true}},
{NullFloat64{222, false}, 1, NullFloat64{0, false}},
{10, NullFloat64{31.2, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullBoolParam(t *testing.T) {
spec := nullTestSpec{"nullbool", "bool", [6]nullTestRow{
{NullBool{false, true}, true, NullBool{false, true}},
{NullBool{true, false}, false, NullBool{false, false}},
{true, true, NullBool{true, true}},
{NullBool{true, true}, false, NullBool{true, true}},
{NullBool{true, false}, true, NullBool{false, false}},
{true, NullBool{true, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullTimeParam(t *testing.T) {
t0 := time.Time{}
t1 := time.Date(2000, 1, 1, 8, 9, 10, 11, time.UTC)
t2 := time.Date(2010, 1, 1, 8, 9, 10, 11, time.UTC)
spec := nullTestSpec{"nulldatetime", "datetime", [6]nullTestRow{
{NullTime{t1, true}, t2, NullTime{t1, true}},
{NullTime{t1, false}, t2, NullTime{t0, false}},
{t1, t2, NullTime{t1, true}},
{NullTime{t1, true}, t2, NullTime{t1, true}},
{NullTime{t1, false}, t2, NullTime{t0, false}},
{t2, NullTime{t1, false}, nil},
}}
nullTestRun(t, spec)
}
func nullTestRun(t *testing.T, spec nullTestSpec) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, fmt.Sprintf("CREATE|t|id=int32,name=string,nullf=%s,notnullf=%s", spec.nullType, spec.notNullType))
// Inserts with db.Exec:
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 1, "alice", spec.rows[0].nullParam, spec.rows[0].notNullParam)
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 2, "bob", spec.rows[1].nullParam, spec.rows[1].notNullParam)
// Inserts with a prepared statement:
stmt, err := db.Prepare("INSERT|t|id=?,name=?,nullf=?,notnullf=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt.Close()
if _, err := stmt.Exec(3, "chris", spec.rows[2].nullParam, spec.rows[2].notNullParam); err != nil {
t.Errorf("exec insert chris: %v", err)
}
if _, err := stmt.Exec(4, "dave", spec.rows[3].nullParam, spec.rows[3].notNullParam); err != nil {
t.Errorf("exec insert dave: %v", err)
}
if _, err := stmt.Exec(5, "eleanor", spec.rows[4].nullParam, spec.rows[4].notNullParam); err != nil {
t.Errorf("exec insert eleanor: %v", err)
}
// Can't put null val into non-null col
if _, err := stmt.Exec(6, "bob", spec.rows[5].nullParam, spec.rows[5].notNullParam); err == nil {
t.Errorf("expected error inserting nil val with prepared statement Exec")
}
_, err = db.Exec("INSERT|t|id=?,name=?,nullf=?", 999, nil, nil)
if err == nil {
// TODO: this test fails, but it's just because
// fakeConn implements the optional Execer interface,
// so arguably this is the correct behavior. But
// maybe I should flesh out the fakeConn.Exec
// implementation so this properly fails.
// t.Errorf("expected error inserting nil name with Exec")
}
paramtype := reflect.TypeOf(spec.rows[0].nullParam)
bindVal := reflect.New(paramtype).Interface()
for i := 0; i < 5; i++ {
id := i + 1
if err := db.QueryRow("SELECT|t|nullf|id=?", id).Scan(bindVal); err != nil {
t.Errorf("id=%d Scan: %v", id, err)
}
bindValDeref := reflect.ValueOf(bindVal).Elem().Interface()
if !reflect.DeepEqual(bindValDeref, spec.rows[i].scanNullVal) {
t.Errorf("id=%d got %#v, want %#v", id, bindValDeref, spec.rows[i].scanNullVal)
}
}
}
// golang.org/issue/4859
func TestQueryRowNilScanDest(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name *string // nil pointer
err := db.QueryRow("SELECT|people|name|").Scan(name)
want := `sql: Scan error on column index 0, name "name": destination pointer is nil`
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err.Error(), want)
}
}
func TestIssue4902(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
opens0 := driver.openCount
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
}
opens := driver.openCount - opens0
if opens > 1 {
t.Errorf("opens = %d; want <= 1", opens)
t.Logf("db = %#v", db)
t.Logf("driver = %#v", driver)
t.Logf("stmt = %#v", stmt)
}
}
// Issue 3857
// This used to deadlock.
func TestSimultaneousQueries(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r1, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r1.Close()
r2, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r2.Close()
}
func TestMaxIdleConns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 1 {
t.Errorf("freeConns = %d; want 1", got)
}
db.SetMaxIdleConns(0)
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns after set to zero = %d; want 0", got)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns = %d; want 0", got)
}
}
func TestMaxOpenConns(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
// Force the number of open connections to 0 so we can get an accurate
// count for the test
db.clearAllConns(t)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
db.SetMaxIdleConns(10)
db.SetMaxOpenConns(10)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(20, time.Second); n > 20 {
t.Errorf("number of dependencies = %d; expected <= 20", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
driver.mu.Unlock()
if opens > 10 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Errorf("db connections opened = %d; want <= 10", opens)
db.dumpDeps(t)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(10, time.Second); n > 10 {
t.Errorf("number of dependencies = %d; expected <= 10", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(5)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(0)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.clearAllConns(t)
}
// Issue 9453: tests that SetMaxOpenConns can be lowered at runtime
// and affects the subsequent release of connections.
func TestMaxOpenConnsOnBusy(t *testing.T) {
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(3)
ctx := context.Background()
conn0, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn1, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn2, err := db.conn(ctx, cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
db.SetMaxOpenConns(2)
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn0.releaseConn(nil)
conn1.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn2.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
}
// Issue 10886: tests that all connection attempts return when more than
// DB.maxOpen connections are in flight and the first DB.maxOpen fail.
func TestPendingConnsAfterErr(t *testing.T) {
const (
maxOpen = 2
tryOpen = maxOpen*2 + 2
)
// No queries will be run.
db, err := Open("test", fakeDBName)
if err != nil {
t.Fatalf("Open: %v", err)
}
defer closeDB(t, db)
defer func() {
for k, v := range db.lastPut {
t.Logf("%p: %v", k, v)
}
}()
db.SetMaxOpenConns(maxOpen)
db.SetMaxIdleConns(0)
errOffline := errors.New("db offline")
defer func() { setHookOpenErr(nil) }()
errs := make(chan error, tryOpen)
var opening sync.WaitGroup
opening.Add(tryOpen)
setHookOpenErr(func() error {
// Wait for all connections to enqueue.
opening.Wait()
return errOffline
})
for i := 0; i < tryOpen; i++ {
go func() {
opening.Done() // signal one connection is in flight
_, err := db.Exec("will never run")
errs <- err
}()
}
opening.Wait() // wait for all workers to begin running
const timeout = 5 * time.Second
to := time.NewTimer(timeout)
defer to.Stop()
// check that all connections fail without deadlock
for i := 0; i < tryOpen; i++ {
select {
case err := <-errs:
if got, want := err, errOffline; got != want {
t.Errorf("unexpected err: got %v, want %v", got, want)
}
case <-to.C:
t.Fatalf("orphaned connection request(s), still waiting after %v", timeout)
}
}
// Wait a reasonable time for the database to close all connections.
tick := time.NewTicker(3 * time.Millisecond)
defer tick.Stop()
for {
select {
case <-tick.C:
db.mu.Lock()
if db.numOpen == 0 {
db.mu.Unlock()
return
}
db.mu.Unlock()
case <-to.C:
// Closing the database will check for numOpen and fail the test.
return
}
}
}
func TestSingleOpenConn(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// shouldn't deadlock
rows, err = db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
}
func TestStats(t *testing.T) {
db := newTestDB(t, "people")
stats := db.Stats()
if got := stats.OpenConnections; got != 1 {
t.Errorf("stats.OpenConnections = %d; want 1", got)
}
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
closeDB(t, db)
stats = db.Stats()
if got := stats.OpenConnections; got != 0 {
t.Errorf("stats.OpenConnections = %d; want 0", got)
}
}
func TestConnMaxLifetime(t *testing.T) {
t0 := time.Unix(1000000, 0)
offset := time.Duration(0)
nowFunc = func() time.Time { return t0.Add(offset) }
defer func() { nowFunc = time.Now }()
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
// Force the number of open connections to 0 so we can get an accurate
// count for the test
db.clearAllConns(t)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
db.SetMaxIdleConns(10)
db.SetMaxOpenConns(10)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
offset = time.Second
tx2, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
tx2.Commit()
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
driver.mu.Unlock()
if opens != 2 {
t.Errorf("opens = %d; want 2", opens)
}
if closes != 0 {
t.Errorf("closes = %d; want 0", closes)
}
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
// Expire first conn
offset = 11 * time.Second
db.SetConnMaxLifetime(10 * time.Second)
if err != nil {
t.Fatal(err)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx2, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
tx2.Commit()
driver.mu.Lock()
opens = driver.openCount - opens0
closes = driver.closeCount - closes0
driver.mu.Unlock()
if opens != 3 {
t.Errorf("opens = %d; want 3", opens)
}
if closes != 1 {
t.Errorf("closes = %d; want 1", closes)
}
}
// golang.org/issue/5323
func TestStmtCloseDeps(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.Driver().(*fakeDriver)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
openDelta0 := opens0 - closes0
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(4, time.Second); n > 4 {
t.Errorf("number of dependencies = %d; expected <= 4", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
openDelta := (driver.openCount - driver.closeCount) - openDelta0
driver.mu.Unlock()
if openDelta > 2 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Logf("open delta = %d", openDelta)
t.Errorf("db connections opened = %d; want <= 2", openDelta)
db.dumpDeps(t)
}
if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
return len(stmt.css) <= nquery
}) {
t.Errorf("len(stmt.css) = %d; want <= %d", len(stmt.css), nquery)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(2, time.Second); n > 2 {
t.Errorf("number of dependencies = %d; expected <= 2", n)
db.dumpDeps(t)
}
db.clearAllConns(t)
}
// golang.org/issue/5046
func TestCloseConnBeforeStmts(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v; from %s", err, stack())
db.dumpDeps(t)
t.Errorf("DB = %#v", db)
}
})
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 freeConn; got %d", len(db.freeConn))
}
dc := db.freeConn[0]
if dc.closed {
t.Errorf("conn shouldn't be closed")
}
if n := len(dc.openStmt); n != 1 {
t.Errorf("driverConn num openStmt = %d; want 1", n)
}
err = db.Close()
if err != nil {
t.Errorf("db Close = %v", err)
}
if !dc.closed {
t.Errorf("after db.Close, driverConn should be closed")
}
if n := len(dc.openStmt); n != 0 {
t.Errorf("driverConn num openStmt = %d; want 0", n)
}
err = stmt.Close()
if err != nil {
t.Errorf("Stmt close = %v", err)
}
if !dc.closed {
t.Errorf("conn should be closed")
}
if dc.ci != nil {
t.Errorf("after Stmt Close, driverConn's Conn interface should be nil")
}
}
// golang.org/issue/5283: don't release the Rows' connection in Close
// before calling Stmt.Close.
func TestRowsCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
err = rows.Close()
if err != nil {
t.Fatal(err)
}
}
func TestRowsImplicitClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
want, fail := 2, errors.New("fail")
r := rows.rowsi.(*rowsCursor)
r.errPos, r.err = want, fail
got := 0
for rows.Next() {
got++
}
if got != want {
t.Errorf("got %d rows, want %d", got, want)
}
if err := rows.Err(); err != fail {
t.Errorf("got error %v, want %v", err, fail)
}
if !r.closed {
t.Errorf("r.closed is false, want true")
}
}
func TestStmtCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
_, err := db.Query("SELECT|non_existent|name|")
if err == nil {
t.Fatal("Querying non-existent table should fail")
}
}
// Test cases where there's more than maxBadConnRetries bad connections in the
// pool (issue 8834)
func TestManyErrBadConn(t *testing.T) {
manyErrBadConnSetup := func(first ...func(db *DB)) *DB {
db := newTestDB(t, "people")
for _, f := range first {
f(db)
}
nconn := maxBadConnRetries + 1
db.SetMaxIdleConns(nconn)
db.SetMaxOpenConns(nconn)
// open enough connections
func() {
for i := 0; i < nconn; i++ {
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
}
}()
db.mu.Lock()
defer db.mu.Unlock()
if db.numOpen != nconn {
t.Fatalf("unexpected numOpen %d (was expecting %d)", db.numOpen, nconn)
} else if len(db.freeConn) != nconn {
t.Fatalf("unexpected len(db.freeConn) %d (was expecting %d)", len(db.freeConn), nconn)
}
for _, conn := range db.freeConn {
conn.Lock()
conn.ci.(*fakeConn).stickyBad = true
conn.Unlock()
}
return db
}
// Query
db := manyErrBadConnSetup()
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// Exec
db = manyErrBadConnSetup()
defer closeDB(t, db)
_, err = db.Exec("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
// Begin
db = manyErrBadConnSetup()
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
// Prepare
db = manyErrBadConnSetup()
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Stmt.Exec
db = manyErrBadConnSetup(func(db *DB) {
stmt, err = db.Prepare("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
})
defer closeDB(t, db)
_, err = stmt.Exec()
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Stmt.Query
db = manyErrBadConnSetup(func(db *DB) {
stmt, err = db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
})
defer closeDB(t, db)
rows, err = stmt.Query()
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
// Conn
db = manyErrBadConnSetup()
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
err = conn.Close()
if err != nil {
t.Fatal(err)
}
// Ping
db = manyErrBadConnSetup()
defer closeDB(t, db)
err = db.PingContext(ctx)
if err != nil {
t.Fatal(err)
}
}
// Issue 34775: Ensure that a Tx cannot commit after a rollback.
func TestTxCannotCommitAfterRollback(t *testing.T) {
db := newTestDB(t, "tx_status")
defer closeDB(t, db)
// First check query reporting is correct.
var txStatus string
err := db.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "autocommit"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Ignore dirty session for this test.
// A failing test should trigger the dirty session flag as well,
// but that isn't exactly what this should test for.
tx.txi.(*fakeTx).c.skipDirtySession = true
defer tx.Rollback()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
// 1. Begin a transaction.
// 2. (A) Start a query, (B) begin Tx rollback through a ctx cancel.
// 3. Check if 2.A has committed in Tx (pass) or outside of Tx (fail).
sendQuery := make(chan struct{})
// The Tx status is returned through the row results, ensure
// that the rows results are not canceled.
bypassRowsAwaitDone = true
hookTxGrabConn = func() {
cancel()
<-sendQuery
}
rollbackHook = func() {
close(sendQuery)
}
defer func() {
hookTxGrabConn = nil
rollbackHook = nil
bypassRowsAwaitDone = false
}()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
// A failure here would be expected if skipDirtySession was not set to true above.
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
}
// Issue 40985 transaction statement deadlock while context cancel.
func TestTxStmtDeadlock(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
cancel()
if err != nil {
t.Fatal(err)
}
stmt, err := tx.Prepare("SELECT|people|name,age|age=?")
if err != nil {
t.Fatal(err)
}
// Run number of stmt queries to reproduce deadlock from context cancel
for i := 0; i < 1e3; i++ {
// Encounter any close related errors (e.g. ErrTxDone, stmt is closed)
// is expected due to context cancel.
_, err = stmt.Query(1)
if err != nil {
break
}
}
_ = tx.Rollback()
}
// Issue32530 encounters an issue where a connection may
// expire right after it comes out of a used connection pool
// even when a new connection is requested.
func TestConnExpiresFreshOutOfPool(t *testing.T) {
execCases := []struct {
expired bool
badReset bool
}{
{false, false},
{true, false},
{false, true},
}
t0 := time.Unix(1000000, 0)
offset := time.Duration(0)
offsetMu := sync.RWMutex{}
nowFunc = func() time.Time {
offsetMu.RLock()
defer offsetMu.RUnlock()
return t0.Add(offset)
}
defer func() { nowFunc = time.Now }()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
for _, ec := range execCases {
ec := ec
name := fmt.Sprintf("expired=%t,badReset=%t", ec.expired, ec.badReset)
t.Run(name, func(t *testing.T) {
db.clearAllConns(t)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(10 * time.Second)
conn, err := db.conn(ctx, alwaysNewConn)
if err != nil {
t.Fatal(err)
}
afterPutConn := make(chan struct{})
waitingForConn := make(chan struct{})
go func() {
defer close(afterPutConn)
conn, err := db.conn(ctx, alwaysNewConn)
if err == nil {
db.putConn(conn, err, false)
} else {
t.Errorf("db.conn: %v", err)
}
}()
go func() {
defer close(waitingForConn)
for {
if t.Failed() {
return
}
db.mu.Lock()
ct := len(db.connRequests)
db.mu.Unlock()
if ct > 0 {
return
}
time.Sleep(10 * time.Millisecond)
}
}()
<-waitingForConn
if t.Failed() {
return
}
offsetMu.Lock()
if ec.expired {
offset = 11 * time.Second
} else {
offset = time.Duration(0)
}
offsetMu.Unlock()
conn.ci.(*fakeConn).stickyBad = ec.badReset
db.putConn(conn, err, true)
<-afterPutConn
})
}
}
// TestIssue20575 ensures the Rows from query does not block
// closing a transaction. Ensure Rows is closed while closing a trasaction.
func TestIssue20575(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
_, err = tx.QueryContext(ctx, "SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
// Do not close Rows from QueryContext.
err = tx.Rollback()
if err != nil {
t.Fatal(err)
}
select {
default:
case <-ctx.Done():
t.Fatal("timeout: failed to rollback query without closing rows:", ctx.Err())
}
}
// TestIssue20622 tests closing the transaction before rows is closed, requires
// the race detector to fail.
func TestIssue20622(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
rows, err := tx.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
count := 0
for rows.Next() {
count++
var age int
var name string
if err := rows.Scan(&age, &name); err != nil {
t.Fatal("scan failed", err)
}
if count == 1 {
cancel()
}
time.Sleep(100 * time.Millisecond)
}
rows.Close()
tx.Commit()
}
// golang.org/issue/5718
func TestErrBadConnReconnect(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
simulateBadConn := func(name string, hook *func() bool, op func() error) {
broken, retried := false, false
numOpen := db.numOpen
// simulate a broken connection on the first try
*hook = func() bool {
if !broken {
broken = true
return true
}
retried = true
return false
}
if err := op(); err != nil {
t.Errorf(name+": %v", err)
return
}
if !broken || !retried {
t.Error(name + ": Failed to simulate broken connection")
}
*hook = nil
if numOpen != db.numOpen {
t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen)
numOpen = db.numOpen
}
}
// db.Exec
dbExec := func() error {
_, err := db.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true)
return err
}
simulateBadConn("db.Exec prepare", &hookPrepareBadConn, dbExec)
simulateBadConn("db.Exec exec", &hookExecBadConn, dbExec)
// db.Query
dbQuery := func() error {
rows, err := db.Query("SELECT|t1|age,name|")
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("db.Query prepare", &hookPrepareBadConn, dbQuery)
simulateBadConn("db.Query query", &hookQueryBadConn, dbQuery)
// db.Prepare
simulateBadConn("db.Prepare", &hookPrepareBadConn, func() error {
stmt, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
return err
}
stmt.Close()
return nil
})
// Provide a way to force a re-prepare of a statement on next execution
forcePrepare := func(stmt *Stmt) {
stmt.css = nil
}
// stmt.Exec
stmt1, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt1.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt1)
stmtExec := func() error {
_, err := stmt1.Exec("Gopher", 3, false)
return err
}
simulateBadConn("stmt.Exec prepare", &hookPrepareBadConn, stmtExec)
simulateBadConn("stmt.Exec exec", &hookExecBadConn, stmtExec)
// stmt.Query
stmt2, err := db.Prepare("SELECT|t1|age,name|")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt2.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt2)
stmtQuery := func() error {
rows, err := stmt2.Query()
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("stmt.Query prepare", &hookPrepareBadConn, stmtQuery)
simulateBadConn("stmt.Query exec", &hookQueryBadConn, stmtQuery)
}
// golang.org/issue/11264
func TestTxEndBadConn(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
db.SetMaxIdleConns(1)
simulateBadConn := func(name string, hook *func() bool, op func() error) {
broken := false
numOpen := db.numOpen
*hook = func() bool {
if !broken {
broken = true
}
return broken
}
if err := op(); err != driver.ErrBadConn {
t.Errorf(name+": %v", err)
return
}
if !broken {
t.Error(name + ": Failed to simulate broken connection")
}
*hook = nil
if numOpen != db.numOpen {
t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen)
}
}
// db.Exec
dbExec := func(endTx func(tx *Tx) error) func() error {
return func() error {
tx, err := db.Begin()
if err != nil {
return err
}
_, err = tx.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true)
if err != nil {
return err
}
return endTx(tx)
}
}
simulateBadConn("db.Tx.Exec commit", &hookCommitBadConn, dbExec((*Tx).Commit))
simulateBadConn("db.Tx.Exec rollback", &hookRollbackBadConn, dbExec((*Tx).Rollback))
// db.Query
dbQuery := func(endTx func(tx *Tx) error) func() error {
return func() error {
tx, err := db.Begin()
if err != nil {
return err
}
rows, err := tx.Query("SELECT|t1|age,name|")
if err == nil {
err = rows.Close()
} else {
return err
}
return endTx(tx)
}
}
simulateBadConn("db.Tx.Query commit", &hookCommitBadConn, dbQuery((*Tx).Commit))
simulateBadConn("db.Tx.Query rollback", &hookRollbackBadConn, dbQuery((*Tx).Rollback))
}
type concurrentTest interface {
init(t testing.TB, db *DB)
finish(t testing.TB)
test(t testing.TB) error
}
type concurrentDBQueryTest struct {
db *DB
}
func (c *concurrentDBQueryTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBQueryTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentDBExecTest struct {
db *DB
}
func (c *concurrentDBExecTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBExecTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBExecTest) test(t testing.TB) error {
_, err := c.db.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentStmtQueryTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentStmtExecTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentTxQueryTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxQueryTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxExecTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxExecTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxExecTest) test(t testing.TB) error {
_, err := c.tx.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentTxStmtQueryTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxStmtExecTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentRandomTest struct {
tests []concurrentTest
}
func (c *concurrentRandomTest) init(t testing.TB, db *DB) {
c.tests = []concurrentTest{
new(concurrentDBQueryTest),
new(concurrentDBExecTest),
new(concurrentStmtQueryTest),
new(concurrentStmtExecTest),
new(concurrentTxQueryTest),
new(concurrentTxExecTest),
new(concurrentTxStmtQueryTest),
new(concurrentTxStmtExecTest),
}
for _, ct := range c.tests {
ct.init(t, db)
}
}
func (c *concurrentRandomTest) finish(t testing.TB) {
for _, ct := range c.tests {
ct.finish(t)
}
}
func (c *concurrentRandomTest) test(t testing.TB) error {
ct := c.tests[rand.Intn(len(c.tests))]
return ct.test(t)
}
func doConcurrentTest(t testing.TB, ct concurrentTest) {
maxProcs, numReqs := 1, 500
if testing.Short() {
maxProcs, numReqs = 4, 50
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
db := newTestDB(t, "people")
defer closeDB(t, db)
ct.init(t, db)
defer ct.finish(t)
var wg sync.WaitGroup
wg.Add(numReqs)
reqs := make(chan bool)
defer close(reqs)
for i := 0; i < maxProcs*2; i++ {
go func() {
for range reqs {
err := ct.test(t)
if err != nil {
wg.Done()
continue
}
wg.Done()
}
}()
}
for i := 0; i < numReqs; i++ {
reqs <- true
}
wg.Wait()
}
func TestIssue6081(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
drv := db.Driver().(*fakeDriver)
drv.mu.Lock()
opens0 := drv.openCount
closes0 := drv.closeCount
drv.mu.Unlock()
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
setRowsCloseHook(func(rows *Rows, err *error) {
*err = driver.ErrBadConn
})
defer setRowsCloseHook(nil)
for i := 0; i < 10; i++ {
rows, err := stmt.Query()
if err != nil {
t.Fatal(err)
}
rows.Close()
}
if n := len(stmt.css); n > 1 {
t.Errorf("len(css slice) = %d; want <= 1", n)
}
stmt.Close()
if n := len(stmt.css); n != 0 {
t.Errorf("len(css slice) after Close = %d; want 0", n)
}
drv.mu.Lock()
opens := drv.openCount - opens0
closes := drv.closeCount - closes0
drv.mu.Unlock()
if opens < 9 {
t.Errorf("opens = %d; want >= 9", opens)
}
if closes < 9 {
t.Errorf("closes = %d; want >= 9", closes)
}
}
// TestIssue18429 attempts to stress rolling back the transaction from a
// context cancel while simultaneously calling Tx.Rollback. Rolling back from a
// context happens concurrently so tx.rollback and tx.Commit must guard against
// double entry.
//
// In the test, a context is canceled while the query is in process so
// the internal rollback will run concurrently with the explicitly called
// Tx.Rollback.
//
// The addition of calling rows.Next also tests
// Issue 21117.
func TestIssue18429(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx := context.Background()
sem := make(chan bool, 20)
var wg sync.WaitGroup
const milliWait = 30
for i := 0; i < 100; i++ {
sem <- true
wg.Add(1)
go func() {
defer func() {
<-sem
wg.Done()
}()
qwait := (time.Duration(rand.Intn(milliWait)) * time.Millisecond).String()
ctx, cancel := context.WithTimeout(ctx, time.Duration(rand.Intn(milliWait))*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return
}
// This is expected to give a cancel error most, but not all the time.
// Test failure will happen with a panic or other race condition being
// reported.
rows, _ := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|")
if rows != nil {
var name string
// Call Next to test Issue 21117 and check for races.
for rows.Next() {
// Scan the buffer so it is read and checked for races.
rows.Scan(&name)
}
rows.Close()
}
// This call will race with the context cancel rollback to complete
// if the rollback itself isn't guarded.
tx.Rollback()
}()
}
wg.Wait()
}
// TestIssue20160 attempts to test a short context life on a stmt Query.
func TestIssue20160(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx := context.Background()
sem := make(chan bool, 20)
var wg sync.WaitGroup
const milliWait = 30
stmt, err := db.PrepareContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 100; i++ {
sem <- true
wg.Add(1)
go func() {
defer func() {
<-sem
wg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, time.Duration(rand.Intn(milliWait))*time.Millisecond)
defer cancel()
// This is expected to give a cancel error most, but not all the time.
// Test failure will happen with a panic or other race condition being
// reported.
rows, _ := stmt.QueryContext(ctx)
if rows != nil {
rows.Close()
}
}()
}
wg.Wait()
}
// TestIssue18719 closes the context right before use. The sql.driverConn
// will nil out the ci on close in a lock, but if another process uses it right after
// it will panic with on the nil ref.
//
// See https://golang.org/cl/35550 .
func TestIssue18719(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
hookTxGrabConn = func() {
cancel()
// Wait for the context to cancel and tx to rollback.
for tx.isDone() == false {
time.Sleep(3 * time.Millisecond)
}
}
defer func() { hookTxGrabConn = nil }()
// This call will grab the connection and cancel the context
// after it has done so. Code after must deal with the canceled state.
_, err = tx.QueryContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatalf("expected error %v but got %v", nil, err)
}
// Rows may be ignored because it will be closed when the context is canceled.
// Do not explicitly rollback. The rollback will happen from the
// canceled context.
cancel()
}
func TestIssue20647(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
conn.dc.ci.(*fakeConn).skipDirtySession = true
defer conn.Close()
stmt, err := conn.PrepareContext(ctx, "SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
rows1, err := stmt.QueryContext(ctx)
if err != nil {
t.Fatal("rows1", err)
}
defer rows1.Close()
rows2, err := stmt.QueryContext(ctx)
if err != nil {
t.Fatal("rows2", err)
}
defer rows2.Close()
if rows1.dc != rows2.dc {
t.Fatal("stmt prepared on Conn does not use same connection")
}
}
func TestConcurrency(t *testing.T) {
list := []struct {
name string
ct concurrentTest
}{
{"Query", new(concurrentDBQueryTest)},
{"Exec", new(concurrentDBExecTest)},
{"StmtQuery", new(concurrentStmtQueryTest)},
{"StmtExec", new(concurrentStmtExecTest)},
{"TxQuery", new(concurrentTxQueryTest)},
{"TxExec", new(concurrentTxExecTest)},
{"TxStmtQuery", new(concurrentTxStmtQueryTest)},
{"TxStmtExec", new(concurrentTxStmtExecTest)},
{"Random", new(concurrentRandomTest)},
}
for _, item := range list {
t.Run(item.name, func(t *testing.T) {
doConcurrentTest(t, item.ct)
})
}
}
func TestConnectionLeak(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
// Start by opening defaultMaxIdleConns
rows := make([]*Rows, defaultMaxIdleConns)
// We need to SetMaxOpenConns > MaxIdleConns, so the DB can open
// a new connection and we can fill the idle queue with the released
// connections.
db.SetMaxOpenConns(len(rows) + 1)
for ii := range rows {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r.Next()
if err := r.Err(); err != nil {
t.Fatal(err)
}
rows[ii] = r
}
// Now we have defaultMaxIdleConns busy connections. Open
// a new one, but wait until the busy connections are released
// before returning control to DB.
drv := db.Driver().(*fakeDriver)
drv.waitCh = make(chan struct{}, 1)
drv.waitingCh = make(chan struct{}, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return
}
r.Close()
wg.Done()
}()
// Wait until the goroutine we've just created has started waiting.
<-drv.waitingCh
// Now close the busy connections. This provides a connection for
// the blocked goroutine and then fills up the idle queue.
for _, v := range rows {
v.Close()
}
// At this point we give the new connection to DB. This connection is
// now useless, since the idle queue is full and there are no pending
// requests. DB should deal with this situation without leaking the
// connection.
drv.waitCh <- struct{}{}
wg.Wait()
}
func TestStatsMaxIdleClosedZero(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleClosed
for i := 0; i < 10; i++ {
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
st := db.Stats()
maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed
t.Logf("MaxIdleClosed: %d", maxIdleClosed)
if maxIdleClosed != 0 {
t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed)
}
}
func TestStatsMaxIdleClosedTen(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleClosed
for i := 0; i < 10; i++ {
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
st := db.Stats()
maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed
t.Logf("MaxIdleClosed: %d", maxIdleClosed)
if maxIdleClosed != 10 {
t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed)
}
}
func TestMaxIdleTime(t *testing.T) {
list := []struct {
wantMaxIdleTime time.Duration
wantIdleClosed int64
timeOffset time.Duration
}{
{time.Nanosecond, 1, 10 * time.Millisecond},
{time.Hour, 0, 10 * time.Millisecond},
}
baseTime := time.Unix(0, 0)
defer func() {
nowFunc = time.Now
}()
for _, item := range list {
nowFunc = func() time.Time {
return baseTime
}
t.Run(fmt.Sprintf("%v", item.wantMaxIdleTime), func(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxIdleTime(item.wantMaxIdleTime)
db.SetConnMaxLifetime(0)
preMaxIdleClosed := db.Stats().MaxIdleTimeClosed
if err := db.Ping(); err != nil {
t.Fatal(err)
}
nowFunc = func() time.Time {
return baseTime.Add(item.timeOffset)
}
db.mu.Lock()
closing := db.connectionCleanerRunLocked()
db.mu.Unlock()
for _, c := range closing {
c.Close()
}
if g, w := int64(len(closing)), item.wantIdleClosed; g != w {
t.Errorf("got: %d; want %d closed conns", g, w)
}
st := db.Stats()
maxIdleClosed := st.MaxIdleTimeClosed - preMaxIdleClosed
if g, w := maxIdleClosed, item.wantIdleClosed; g != w {
t.Errorf(" got: %d; want %d max idle closed conns", g, w)
}
})
}
}
type nvcDriver struct {
fakeDriver
skipNamedValueCheck bool
}
func (d *nvcDriver) Open(dsn string) (driver.Conn, error) {
c, err := d.fakeDriver.Open(dsn)
fc := c.(*fakeConn)
fc.db.allowAny = true
return &nvcConn{fc, d.skipNamedValueCheck}, err
}
type nvcConn struct {
*fakeConn
skipNamedValueCheck bool
}
type decimalInt struct {
value int
}
type doNotInclude struct{}
var _ driver.NamedValueChecker = &nvcConn{}
func (c *nvcConn) CheckNamedValue(nv *driver.NamedValue) error {
if c.skipNamedValueCheck {
return driver.ErrSkip
}
switch v := nv.Value.(type) {
default:
return driver.ErrSkip
case Out:
switch ov := v.Dest.(type) {
default:
return errors.New("unknown NameValueCheck OUTPUT type")
case *string:
*ov = "from-server"
nv.Value = "OUT:*string"
}
return nil
case decimalInt, []int64:
return nil
case doNotInclude:
return driver.ErrRemoveArgument
}
}
func TestNamedValueChecker(t *testing.T) {
Register("NamedValueCheck", &nvcDriver{})
db, err := Open("NamedValueCheck", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = db.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = db.ExecContext(ctx, "CREATE|keys|dec1=any,str1=string,out1=string,array1=any")
if err != nil {
t.Fatal("exec create", err)
}
o1 := ""
_, err = db.ExecContext(ctx, "INSERT|keys|dec1=?A,str1=?,out1=?O1,array1=?", Named("A", decimalInt{123}), "hello", Named("O1", Out{Dest: &o1}), []int64{42, 128, 707}, doNotInclude{})
if err != nil {
t.Fatal("exec insert", err)
}
var (
str1 string
dec1 decimalInt
arr1 []int64
)
err = db.QueryRowContext(ctx, "SELECT|keys|dec1,str1,array1|").Scan(&dec1, &str1, &arr1)
if err != nil {
t.Fatal("select", err)
}
list := []struct{ got, want interface{} }{
{o1, "from-server"},
{dec1, decimalInt{123}},
{str1, "hello"},
{arr1, []int64{42, 128, 707}},
}
for index, item := range list {
if !reflect.DeepEqual(item.got, item.want) {
t.Errorf("got %#v wanted %#v for index %d", item.got, item.want, index)
}
}
}
func TestNamedValueCheckerSkip(t *testing.T) {
Register("NamedValueCheckSkip", &nvcDriver{skipNamedValueCheck: true})
db, err := Open("NamedValueCheckSkip", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = db.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = db.ExecContext(ctx, "CREATE|keys|dec1=any")
if err != nil {
t.Fatal("exec create", err)
}
_, err = db.ExecContext(ctx, "INSERT|keys|dec1=?A", Named("A", decimalInt{123}))
if err == nil {
t.Fatalf("expected error with bad argument, got %v", err)
}
}
func TestOpenConnector(t *testing.T) {
Register("testctx", &fakeDriverCtx{})
db, err := Open("testctx", "people")
if err != nil {
t.Fatal(err)
}
defer db.Close()
c, ok := db.connector.(*fakeConnector)
if !ok {
t.Fatal("not using *fakeConnector")
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
if !c.closed {
t.Fatal("connector is not closed")
}
}
type ctxOnlyDriver struct {
fakeDriver
}
func (d *ctxOnlyDriver) Open(dsn string) (driver.Conn, error) {
conn, err := d.fakeDriver.Open(dsn)
if err != nil {
return nil, err
}
return &ctxOnlyConn{fc: conn.(*fakeConn)}, nil
}
var (
_ driver.Conn = &ctxOnlyConn{}
_ driver.QueryerContext = &ctxOnlyConn{}
_ driver.ExecerContext = &ctxOnlyConn{}
)
type ctxOnlyConn struct {
fc *fakeConn
queryCtxCalled bool
execCtxCalled bool
}
func (c *ctxOnlyConn) Begin() (driver.Tx, error) {
return c.fc.Begin()
}
func (c *ctxOnlyConn) Close() error {
return c.fc.Close()
}
// Prepare is still part of the Conn interface, so while it isn't used
// must be defined for compatibility.
func (c *ctxOnlyConn) Prepare(q string) (driver.Stmt, error) {
panic("not used")
}
func (c *ctxOnlyConn) PrepareContext(ctx context.Context, q string) (driver.Stmt, error) {
return c.fc.PrepareContext(ctx, q)
}
func (c *ctxOnlyConn) QueryContext(ctx context.Context, q string, args []driver.NamedValue) (driver.Rows, error) {
c.queryCtxCalled = true
return c.fc.QueryContext(ctx, q, args)
}
func (c *ctxOnlyConn) ExecContext(ctx context.Context, q string, args []driver.NamedValue) (driver.Result, error) {
c.execCtxCalled = true
return c.fc.ExecContext(ctx, q, args)
}
// TestQueryExecContextOnly ensures drivers only need to implement QueryContext
// and ExecContext methods.
func TestQueryExecContextOnly(t *testing.T) {
// Ensure connection does not implement non-context interfaces.
var connType driver.Conn = &ctxOnlyConn{}
if _, ok := connType.(driver.Execer); ok {
t.Fatalf("%T must not implement driver.Execer", connType)
}
if _, ok := connType.(driver.Queryer); ok {
t.Fatalf("%T must not implement driver.Queryer", connType)
}
Register("ContextOnly", &ctxOnlyDriver{})
db, err := Open("ContextOnly", "")
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conn, err := db.Conn(ctx)
if err != nil {
t.Fatal("db.Conn", err)
}
defer conn.Close()
coc := conn.dc.ci.(*ctxOnlyConn)
coc.fc.skipDirtySession = true
_, err = conn.ExecContext(ctx, "WIPE")
if err != nil {
t.Fatal("exec wipe", err)
}
_, err = conn.ExecContext(ctx, "CREATE|keys|v1=string")
if err != nil {
t.Fatal("exec create", err)
}
expectedValue := "value1"
_, err = conn.ExecContext(ctx, "INSERT|keys|v1=?", expectedValue)
if err != nil {
t.Fatal("exec insert", err)
}
rows, err := conn.QueryContext(ctx, "SELECT|keys|v1|")
if err != nil {
t.Fatal("query select", err)
}
v1 := ""
for rows.Next() {
err = rows.Scan(&v1)
if err != nil {
t.Fatal("rows scan", err)
}
}
rows.Close()
if v1 != expectedValue {
t.Fatalf("expected %q, got %q", expectedValue, v1)
}
if !coc.execCtxCalled {
t.Error("ExecContext not called")
}
if !coc.queryCtxCalled {
t.Error("QueryContext not called")
}
}
type alwaysErrScanner struct{}
var errTestScanWrap = errors.New("errTestScanWrap")
func (alwaysErrScanner) Scan(interface{}) error {
return errTestScanWrap
}
// Issue 38099: Ensure that Rows.Scan properly wraps underlying errors.
func TestRowsScanProperlyWrapsErrors(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age|")
if err != nil {
t.Fatalf("Query: %v", err)
}
var res alwaysErrScanner
for rows.Next() {
err = rows.Scan(&res)
if err == nil {
t.Fatal("expecting back an error")
}
if !errors.Is(err, errTestScanWrap) {
t.Fatalf("errors.Is mismatch\n%v\nWant: %v", err, errTestScanWrap)
}
// Ensure that error substring matching still correctly works.
if !strings.Contains(err.Error(), errTestScanWrap.Error()) {
t.Fatalf("Error %v does not contain %v", err, errTestScanWrap)
}
}
}
// badConn implements a bad driver.Conn, for TestBadDriver.
// The Exec method panics.
type badConn struct{}
func (bc badConn) Prepare(query string) (driver.Stmt, error) {
return nil, errors.New("badConn Prepare")
}
func (bc badConn) Close() error {
return nil
}
func (bc badConn) Begin() (driver.Tx, error) {
return nil, errors.New("badConn Begin")
}
func (bc badConn) Exec(query string, args []driver.Value) (driver.Result, error) {
panic("badConn.Exec")
}
// badDriver is a driver.Driver that uses badConn.
type badDriver struct{}
func (bd badDriver) Open(name string) (driver.Conn, error) {
return badConn{}, nil
}
// Issue 15901.
func TestBadDriver(t *testing.T) {
Register("bad", badDriver{})
db, err := Open("bad", "ignored")
if err != nil {
t.Fatal(err)
}
defer func() {
if r := recover(); r == nil {
t.Error("expected panic")
} else {
if want := "badConn.Exec"; r.(string) != want {
t.Errorf("panic was %v, expected %v", r, want)
}
}
}()
defer db.Close()
db.Exec("ignored")
}
type pingDriver struct {
fails bool
}
type pingConn struct {
badConn
driver *pingDriver
}
var pingError = errors.New("Ping failed")
func (pc pingConn) Ping(ctx context.Context) error {
if pc.driver.fails {
return pingError
}
return nil
}
var _ driver.Pinger = pingConn{}
func (pd *pingDriver) Open(name string) (driver.Conn, error) {
return pingConn{driver: pd}, nil
}
func TestPing(t *testing.T) {
driver := &pingDriver{}
Register("ping", driver)
db, err := Open("ping", "ignored")
if err != nil {
t.Fatal(err)
}
if err := db.Ping(); err != nil {
t.Errorf("err was %#v, expected nil", err)
return
}
driver.fails = true
if err := db.Ping(); err != pingError {
t.Errorf("err was %#v, expected pingError", err)
}
}
// Issue 18101.
func TestTypedString(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
type Str string
var scanned Str
err := db.QueryRow("SELECT|people|name|name=?", "Alice").Scan(&scanned)
if err != nil {
t.Fatal(err)
}
expected := Str("Alice")
if scanned != expected {
t.Errorf("expected %+v, got %+v", expected, scanned)
}
}
func BenchmarkConcurrentDBExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentDBExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentRandom(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentRandomTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}
|
package main
import (
"bytes"
"encoding/json"
"net"
"net/http"
"sync"
)
type ipMap struct {
sync.RWMutex
m map[string]bool
}
type hookService struct {
client http.Client
AddIPsUri string
RemoveIPsUri string
SyncIPsUri string
hookedIPs ipMap
}
func (h *hookService) sendHTTPHook(ips []net.IP, u string) error {
buf := new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(ips)
if err != nil {
return err
}
req, err := http.NewRequest("POST", u, buf)
if err != nil {
return err
}
resp, err := h.client.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (h *hookService) Add(ip net.IP) error {
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
// Short circuit already sent IPs
if h.hookedIPs.m[ip.String()] == true {
return nil
}
err := h.sendHTTPHook([]net.IP{ip}, h.AddIPsUri)
if err != nil {
return err
}
h.hookedIPs.m[ip.String()] = true
return nil
}
func (h *hookService) Remove(ip net.IP) error {
err := h.sendHTTPHook([]net.IP{ip}, h.RemoveIPsUri)
if err != nil {
return err
}
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
delete(h.hookedIPs.m, ip.String())
return nil
}
func (h *hookService) Sync(ips []net.IP) error {
if h.SyncIPsUri != "" {
err := h.sendHTTPHook(ips, h.SyncIPsUri)
if err != nil {
return err
}
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
h.hookedIPs.m = make(map[string]bool)
for _, ip := range ips {
h.hookedIPs.m[ip.String()] = true
}
}
return nil
}
Only call add/remove HTTP hooks if URIs are set.
package main
import (
"bytes"
"encoding/json"
"net"
"net/http"
"sync"
)
type ipMap struct {
sync.RWMutex
m map[string]bool
}
type hookService struct {
client http.Client
AddIPsUri string
RemoveIPsUri string
SyncIPsUri string
hookedIPs ipMap
}
func (h *hookService) sendHTTPHook(ips []net.IP, u string) error {
buf := new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(ips)
if err != nil {
return err
}
req, err := http.NewRequest("POST", u, buf)
if err != nil {
return err
}
resp, err := h.client.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (h *hookService) Add(ip net.IP) error {
if h.AddIPsUri != "" {
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
// Short circuit already sent IPs
if h.hookedIPs.m[ip.String()] == true {
return nil
}
err := h.sendHTTPHook([]net.IP{ip}, h.AddIPsUri)
if err != nil {
return err
}
h.hookedIPs.m[ip.String()] = true
}
return nil
}
func (h *hookService) Remove(ip net.IP) error {
if h.RemoveIPsUri != "" {
err := h.sendHTTPHook([]net.IP{ip}, h.RemoveIPsUri)
if err != nil {
return err
}
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
delete(h.hookedIPs.m, ip.String())
}
return nil
}
func (h *hookService) Sync(ips []net.IP) error {
if h.SyncIPsUri != "" {
err := h.sendHTTPHook(ips, h.SyncIPsUri)
if err != nil {
return err
}
h.hookedIPs.Lock()
defer h.hookedIPs.Unlock()
h.hookedIPs.m = make(map[string]bool)
for _, ip := range ips {
h.hookedIPs.m[ip.String()] = true
}
}
return nil
}
|
package rocserv
import (
"context"
"sync"
"time"
"github.com/shawnfeng/sutil/slog"
)
const (
defaultCapacity = 32 // 初始连接wrapper数,可以比较小
defaultMaxCapacity = 512
defaultIdleTimeout = time.Second * 120
)
// ClientPool every addr has a connection pool, each backend server has more than one addr, in client side, it's ClientPool
type ClientPool struct {
calleeServiceKey string
capacity int
maxCapacity int
idleTimeout time.Duration
clientPool sync.Map
rpcFactory func(addr string) (rpcClientConn, error)
}
// NewClientPool constructor of pool, 如果连接数过低,修正为默认值
func NewClientPool(capacity, maxCapacity int, rpcFactory func(addr string) (rpcClientConn, error), calleeServiceKey string) *ClientPool {
return &ClientPool{capacity: capacity, maxCapacity: maxCapacity, rpcFactory: rpcFactory, calleeServiceKey: calleeServiceKey, idleTimeout: defaultIdleTimeout}
}
// Get get connection from pool, if reach max, create new connection and return
func (m *ClientPool) Get(addr string) (rpcClientConn, error) {
fun := "ClientPool.Get -->"
cp := m.getPool(addr)
ctx, cancel := context.WithTimeout(context.Background(), getConnTimeout)
defer cancel()
c, err := cp.Get(ctx)
if err != nil {
slog.Errorf("%s get conn from connection pool failed, addr: %s", fun, addr)
return nil, err
}
return c.(rpcClientConn), nil
}
// Put 连接池回收连接
func (m *ClientPool) Put(addr string, client rpcClientConn, err error) {
fun := "ClientPool.Put -->"
cp := m.getPool(addr)
// close client and don't put to pool
if err != nil {
slog.Warnf("%s put rpc client to pool with err: %v, callee_service: %s, addr: %s", fun, err, m.calleeServiceKey, addr)
client.Close()
cp.Put(nil)
return
}
cp.Put(client)
}
// Close close connection pool in client pool
func (m *ClientPool) Close() {
closeConnectionPool := func(key, value interface{}) bool {
if connectionPool, ok := value.(*ConnectionPool); ok {
connectionPool.Close()
}
return true
}
m.clientPool.Range(closeConnectionPool)
}
func (m *ClientPool) getPool(addr string) *ConnectionPool {
fun := "ClientPool.getPool -->"
var cp *ConnectionPool
value, ok := m.clientPool.Load(addr)
if ok == true {
cp = value.(*ConnectionPool)
} else {
slog.Infof("%s not found connection pool of callee_service: %s, addr: %s, create it", fun, m.calleeServiceKey, addr)
cp = NewConnectionPool(addr, m.capacity, m.maxCapacity, m.idleTimeout, m.rpcFactory, m.calleeServiceKey)
cp.Open()
m.clientPool.Store(addr, cp)
}
return cp
}
optimize log in client pool Get by add callee_service, err info, etc.
package rocserv
import (
"context"
"sync"
"time"
"github.com/shawnfeng/sutil/slog"
)
const (
defaultCapacity = 32 // 初始连接wrapper数,可以比较小
defaultMaxCapacity = 512
defaultIdleTimeout = time.Second * 120
)
// ClientPool every addr has a connection pool, each backend server has more than one addr, in client side, it's ClientPool
type ClientPool struct {
calleeServiceKey string
capacity int
maxCapacity int
idleTimeout time.Duration
clientPool sync.Map
rpcFactory func(addr string) (rpcClientConn, error)
}
// NewClientPool constructor of pool, 如果连接数过低,修正为默认值
func NewClientPool(capacity, maxCapacity int, rpcFactory func(addr string) (rpcClientConn, error), calleeServiceKey string) *ClientPool {
return &ClientPool{capacity: capacity, maxCapacity: maxCapacity, rpcFactory: rpcFactory, calleeServiceKey: calleeServiceKey, idleTimeout: defaultIdleTimeout}
}
// Get get connection from pool, if reach max, create new connection and return
func (m *ClientPool) Get(addr string) (rpcClientConn, error) {
fun := "ClientPool.Get -->"
cp := m.getPool(addr)
ctx, cancel := context.WithTimeout(context.Background(), getConnTimeout)
defer cancel()
c, err := cp.Get(ctx)
if err != nil {
slog.Errorf("%s get conn from connection pool failed, callee_service: %s, addr: %s, err: %v", fun, m.calleeServiceKey, addr, err)
return nil, err
}
return c.(rpcClientConn), nil
}
// Put 连接池回收连接
func (m *ClientPool) Put(addr string, client rpcClientConn, err error) {
fun := "ClientPool.Put -->"
cp := m.getPool(addr)
// close client and don't put to pool
if err != nil {
slog.Warnf("%s put rpc client to pool with err: %v, callee_service: %s, addr: %s", fun, err, m.calleeServiceKey, addr)
client.Close()
cp.Put(nil)
return
}
cp.Put(client)
}
// Close close connection pool in client pool
func (m *ClientPool) Close() {
closeConnectionPool := func(key, value interface{}) bool {
if connectionPool, ok := value.(*ConnectionPool); ok {
connectionPool.Close()
}
return true
}
m.clientPool.Range(closeConnectionPool)
}
func (m *ClientPool) getPool(addr string) *ConnectionPool {
fun := "ClientPool.getPool -->"
var cp *ConnectionPool
value, ok := m.clientPool.Load(addr)
if ok == true {
cp = value.(*ConnectionPool)
} else {
slog.Infof("%s not found connection pool of callee_service: %s, addr: %s, create it", fun, m.calleeServiceKey, addr)
cp = NewConnectionPool(addr, m.capacity, m.maxCapacity, m.idleTimeout, m.rpcFactory, m.calleeServiceKey)
cp.Open()
m.clientPool.Store(addr, cp)
}
return cp
}
|
package mackerel
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
)
type Host struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
Status string `json:"status,omitempty"`
Memo string `json:"memo,omitempty"`
Roles Roles `json:"roles,omitempty"`
RoleFullnames []string `json:"roleFullnames,omitempty"`
IsRetired bool `json:"isRetired,omitempty"`
CreatedAt int32 `json:"createdAt,omitempty"`
Meta HostMeta `json:"meta,omitempty"`
Interfaces []Interface `json:"interfaces,omitempty"`
}
type Roles map[string][]string
type HostMeta struct {
AgentRevision string `json:"agent-revision,omitempty"`
AgentVersion string `json:"agent-version,omitempty"`
BlockDevice BlockDevice `json:"block_device,omitempty"`
Cpu CPU `json:"cpu,omitempty"`
Filesystem FileSystem `json:"filesystem,omitempty"`
Kernel Kernel `json:"kernel,omitempty"`
Memory Memory `json:"memory,omitempty"`
}
type BlockDevice map[string]map[string]interface{}
type CPU []map[string]interface{}
type FileSystem map[string]interface{}
type Kernel map[string]string
type Memory map[string]string
type Interface struct {
Name string `json:"name,omitempty"`
IPAddress string `json:"ipAddress,omitempty"`
MacAddress string `json:"macAddress,omitempty"`
}
type FindHostsParam struct {
Service string
Roles []string
Name string
Statuses []string
}
type CreateHostParam struct {
Name string `json:"name,omitempty"`
Meta HostMeta `json:"meta,omitempty"`
Interfaces []Interface `json:"interfaces,omitempty"`
RoleFullnames []string `json:"roleFullnames,omitempty"`
}
type UpdateHostParam CreateHostParam
func (h *Host) GetRoleFullnames() []string {
if len(h.Roles) < 1 {
return nil
}
var fullnames []string
for service, roles := range h.Roles {
for _, role := range roles {
fullname := strings.Join([]string{service, role}, ":")
fullnames = append(fullnames, fullname)
}
}
return fullnames
}
func (c *Client) FindHost(id string) (*Host, error) {
req, err := http.NewRequest("GET", c.urlFor(fmt.Sprintf("/api/v0/hosts/%s", id)).String(), nil)
if err != nil {
return nil, err
}
resp, err := c.Request(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, errors.New("status code is not 200")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data struct {
Host *Host `json:"host"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
return data.Host, err
}
func (c *Client) FindHosts(param *FindHostsParam) ([]*Host, error) {
v := url.Values{}
if param.Service != "" {
v.Set("service", param.Service)
}
if len(param.Roles) >= 1 {
for _, role := range param.Roles {
v.Add("role", role)
}
}
if param.Name != "" {
v.Set("name", param.Name)
}
if len(param.Statuses) >= 1 {
for _, status := range param.Statuses {
v.Add("status", status)
}
}
req, err := http.NewRequest("GET", fmt.Sprintf("%s?%s", c.urlFor("/api/v0/hosts.json").String(), v.Encode()), nil)
if err != nil {
return nil, err
}
resp, err := c.Request(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, errors.New("status code is not 200")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data struct {
Hosts []*(Host) `json:"hosts"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
return data.Hosts, err
}
func (c *Client) CreateHost(param *CreateHostParam) (string, error) {
requestJson, err := json.Marshal(param)
if err != nil {
return "", err
}
req, err := http.NewRequest(
"POST",
c.urlFor("/api/v0/hosts").String(),
bytes.NewReader(requestJson),
)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var data struct {
Id string `json:"id"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return "", err
}
return data.Id, nil
}
func (c *Client) UpdateHost(hostId string, param *UpdateHostParam) (string, error) {
requestJson, err := json.Marshal(param)
if err != nil {
return "", err
}
req, err := http.NewRequest(
"PUT",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s", hostId)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var data struct {
Id string `json:"id"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return "", err
}
return data.Id, nil
}
func (c *Client) UpdateHostStatus(hostId string, status string) error {
requestJson, err := json.Marshal(map[string]string{
"status": status,
})
if err != nil {
return err
}
req, err := http.NewRequest(
"PUT",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s/status", hostId)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func (c *Client) RetireHost(id string) error {
requestJson, _ := json.Marshal("{}")
req, err := http.NewRequest(
"POST",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s/retire", id)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("status code is not 200")
}
return nil
}
Impl Host.DateFromCreatedAt and Host.DateStringFromCreatedAt
package mackerel
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
)
type Host struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
Status string `json:"status,omitempty"`
Memo string `json:"memo,omitempty"`
Roles Roles `json:"roles,omitempty"`
RoleFullnames []string `json:"roleFullnames,omitempty"`
IsRetired bool `json:"isRetired,omitempty"`
CreatedAt int32 `json:"createdAt,omitempty"`
Meta HostMeta `json:"meta,omitempty"`
Interfaces []Interface `json:"interfaces,omitempty"`
}
type Roles map[string][]string
type HostMeta struct {
AgentRevision string `json:"agent-revision,omitempty"`
AgentVersion string `json:"agent-version,omitempty"`
BlockDevice BlockDevice `json:"block_device,omitempty"`
Cpu CPU `json:"cpu,omitempty"`
Filesystem FileSystem `json:"filesystem,omitempty"`
Kernel Kernel `json:"kernel,omitempty"`
Memory Memory `json:"memory,omitempty"`
}
type BlockDevice map[string]map[string]interface{}
type CPU []map[string]interface{}
type FileSystem map[string]interface{}
type Kernel map[string]string
type Memory map[string]string
type Interface struct {
Name string `json:"name,omitempty"`
IPAddress string `json:"ipAddress,omitempty"`
MacAddress string `json:"macAddress,omitempty"`
}
type FindHostsParam struct {
Service string
Roles []string
Name string
Statuses []string
}
type CreateHostParam struct {
Name string `json:"name,omitempty"`
Meta HostMeta `json:"meta,omitempty"`
Interfaces []Interface `json:"interfaces,omitempty"`
RoleFullnames []string `json:"roleFullnames,omitempty"`
}
type UpdateHostParam CreateHostParam
func (h *Host) GetRoleFullnames() []string {
if len(h.Roles) < 1 {
return nil
}
var fullnames []string
for service, roles := range h.Roles {
for _, role := range roles {
fullname := strings.Join([]string{service, role}, ":")
fullnames = append(fullnames, fullname)
}
}
return fullnames
}
func (h *Host) DateFromCreatedAt() time.Time {
return time.Unix(int64(h.CreatedAt), 0)
}
func (h *Host) DateStringFromCreatedAt() string {
const layout = "Jan 2, 2006 at 3:04pm (MST)"
return h.DateFromCreatedAt().Format(layout)
}
func (c *Client) FindHost(id string) (*Host, error) {
req, err := http.NewRequest("GET", c.urlFor(fmt.Sprintf("/api/v0/hosts/%s", id)).String(), nil)
if err != nil {
return nil, err
}
resp, err := c.Request(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, errors.New("status code is not 200")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data struct {
Host *Host `json:"host"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
return data.Host, err
}
func (c *Client) FindHosts(param *FindHostsParam) ([]*Host, error) {
v := url.Values{}
if param.Service != "" {
v.Set("service", param.Service)
}
if len(param.Roles) >= 1 {
for _, role := range param.Roles {
v.Add("role", role)
}
}
if param.Name != "" {
v.Set("name", param.Name)
}
if len(param.Statuses) >= 1 {
for _, status := range param.Statuses {
v.Add("status", status)
}
}
req, err := http.NewRequest("GET", fmt.Sprintf("%s?%s", c.urlFor("/api/v0/hosts.json").String(), v.Encode()), nil)
if err != nil {
return nil, err
}
resp, err := c.Request(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, errors.New("status code is not 200")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data struct {
Hosts []*(Host) `json:"hosts"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
return data.Hosts, err
}
func (c *Client) CreateHost(param *CreateHostParam) (string, error) {
requestJson, err := json.Marshal(param)
if err != nil {
return "", err
}
req, err := http.NewRequest(
"POST",
c.urlFor("/api/v0/hosts").String(),
bytes.NewReader(requestJson),
)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var data struct {
Id string `json:"id"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return "", err
}
return data.Id, nil
}
func (c *Client) UpdateHost(hostId string, param *UpdateHostParam) (string, error) {
requestJson, err := json.Marshal(param)
if err != nil {
return "", err
}
req, err := http.NewRequest(
"PUT",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s", hostId)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var data struct {
Id string `json:"id"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return "", err
}
return data.Id, nil
}
func (c *Client) UpdateHostStatus(hostId string, status string) error {
requestJson, err := json.Marshal(map[string]string{
"status": status,
})
if err != nil {
return err
}
req, err := http.NewRequest(
"PUT",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s/status", hostId)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func (c *Client) RetireHost(id string) error {
requestJson, _ := json.Marshal("{}")
req, err := http.NewRequest(
"POST",
c.urlFor(fmt.Sprintf("/api/v0/hosts/%s/retire", id)).String(),
bytes.NewReader(requestJson),
)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := c.Request(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("status code is not 200")
}
return nil
}
|
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"archive/tar"
"encoding/json"
"path/filepath"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/velero/pkg/apis/velero/v1"
"github.com/heptio/velero/pkg/client"
"github.com/heptio/velero/pkg/cloudprovider"
"github.com/heptio/velero/pkg/discovery"
"github.com/heptio/velero/pkg/kuberesource"
"github.com/heptio/velero/pkg/podexec"
"github.com/heptio/velero/pkg/restic"
"github.com/heptio/velero/pkg/volume"
)
type itemBackupperFactory interface {
newItemBackupper(
backup *Request,
backedUpItems map[itemKey]struct{},
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
blockStoreGetter BlockStoreGetter,
) ItemBackupper
}
type defaultItemBackupperFactory struct{}
func (f *defaultItemBackupperFactory) newItemBackupper(
backupRequest *Request,
backedUpItems map[itemKey]struct{},
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
blockStoreGetter BlockStoreGetter,
) ItemBackupper {
ib := &defaultItemBackupper{
backupRequest: backupRequest,
backedUpItems: backedUpItems,
tarWriter: tarWriter,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
resticBackupper: resticBackupper,
resticSnapshotTracker: resticSnapshotTracker,
blockStoreGetter: blockStoreGetter,
itemHookHandler: &defaultItemHookHandler{
podCommandExecutor: podCommandExecutor,
},
}
// this is for testing purposes
ib.additionalItemBackupper = ib
return ib
}
type ItemBackupper interface {
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error
}
type defaultItemBackupper struct {
backupRequest *Request
backedUpItems map[itemKey]struct{}
tarWriter tarWriter
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
resticBackupper restic.Backupper
resticSnapshotTracker *pvcSnapshotTracker
blockStoreGetter BlockStoreGetter
itemHookHandler itemHookHandler
additionalItemBackupper ItemBackupper
snapshotLocationBlockStores map[string]cloudprovider.BlockStore
}
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
// namespaces IncludesExcludes list.
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
metadata, err := meta.Accessor(obj)
if err != nil {
return err
}
namespace := metadata.GetNamespace()
name := metadata.GetName()
log := logger.WithField("name", name)
if namespace != "" {
log = log.WithField("namespace", namespace)
}
// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that
// backupItem can be invoked by a custom action.
if namespace != "" && !ib.backupRequest.NamespaceIncludesExcludes.ShouldInclude(namespace) {
log.Info("Excluding item because namespace is excluded")
return nil
}
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
// false.
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backupRequest.Spec.IncludeClusterResources != nil && !*ib.backupRequest.Spec.IncludeClusterResources {
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
return nil
}
if !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Info("Excluding item because resource is excluded")
return nil
}
if metadata.GetDeletionTimestamp() != nil {
log.Info("Skipping item because it's being deleted.")
return nil
}
key := itemKey{
resource: groupResource.String(),
namespace: namespace,
name: name,
}
if _, exists := ib.backedUpItems[key]; exists {
log.Info("Skipping item because it's already been backed up.")
return nil
}
ib.backedUpItems[key] = struct{}{}
log.Info("Backing up resource")
log.Debug("Executing pre hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePre); err != nil {
return err
}
var (
backupErrs []error
pod *corev1api.Pod
resticVolumesToBackup []string
)
if groupResource == kuberesource.Pods {
// pod needs to be initialized for the unstructured converter
pod = new(corev1api.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
backupErrs = append(backupErrs, errors.WithStack(err))
// nil it on error since it's not valid
pod = nil
} else {
// get the volumes to backup using restic, and add any of them that are PVCs to the pvc snapshot
// tracker, so that when we backup PVCs/PVs via an item action in the next step, we don't snapshot
// PVs that will have their data backed up with restic.
resticVolumesToBackup = restic.GetVolumesToBackup(pod)
ib.resticSnapshotTracker.Track(pod, resticVolumesToBackup)
}
}
updatedObj, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata)
if err != nil {
log.WithError(err).Error("Error executing item actions")
backupErrs = append(backupErrs, err)
// if there was an error running actions, execute post hooks and return
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
return kubeerrs.NewAggregate(backupErrs)
}
obj = updatedObj
if metadata, err = meta.Accessor(obj); err != nil {
return errors.WithStack(err)
}
if groupResource == kuberesource.PersistentVolumes {
if err := ib.takePVSnapshot(obj, log); err != nil {
backupErrs = append(backupErrs, err)
}
}
if groupResource == kuberesource.Pods && pod != nil {
// this function will return partial results, so process volumeSnapshots
// even if there are errors.
volumeSnapshots, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup)
// annotate the pod with the successful volume snapshots
for volume, snapshot := range volumeSnapshots {
restic.SetPodSnapshotAnnotation(metadata, volume, snapshot)
}
backupErrs = append(backupErrs, errs...)
}
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
if len(backupErrs) != 0 {
return kubeerrs.NewAggregate(backupErrs)
}
var filePath string
if namespace != "" {
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
} else {
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
}
itemBytes, err := json.Marshal(obj.UnstructuredContent())
if err != nil {
return errors.WithStack(err)
}
hdr := &tar.Header{
Name: filePath,
Size: int64(len(itemBytes)),
Typeflag: tar.TypeReg,
Mode: 0755,
ModTime: time.Now(),
}
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
return errors.WithStack(err)
}
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
return errors.WithStack(err)
}
return nil
}
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a map of volume name -> snapshot ID
// for volumes that were successfully backed up, and a slice of any errors that were encountered.
func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) (map[string]string, []error) {
if len(volumes) == 0 {
return nil, nil
}
if ib.resticBackupper == nil {
log.Warn("No restic backupper, not backing up pod's volumes")
return nil, nil
}
return ib.resticBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, log)
}
func (ib *defaultItemBackupper) executeActions(
log logrus.FieldLogger,
obj runtime.Unstructured,
groupResource schema.GroupResource,
name, namespace string,
metadata metav1.Object,
) (runtime.Unstructured, error) {
for _, action := range ib.backupRequest.ResolvedActions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Debug("Skipping action because it does not apply to this resource")
continue
}
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
log.Debug("Skipping action because it does not apply to this namespace")
continue
}
if !action.selector.Matches(labels.Set(metadata.GetLabels())) {
log.Debug("Skipping action because label selector does not match")
continue
}
log.Info("Executing custom action")
updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backupRequest.Backup)
if err != nil {
// We want this to show up in the log file at the place where the error occurs. When we return
// the error, it get aggregated with all the other ones at the end of the backup, making it
// harder to tell when it happened.
log.WithError(err).Error("error executing custom action")
return nil, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)
}
obj = updatedItem
for _, additionalItem := range additionalItemIdentifiers {
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
if err != nil {
return nil, err
}
client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)
if err != nil {
return nil, err
}
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
return nil, err
}
}
}
return obj, nil
}
// blockStore instantiates and initializes a BlockStore given a VolumeSnapshotLocation,
// or returns an existing one if one's already been initialized for the location.
func (ib *defaultItemBackupper) blockStore(snapshotLocation *api.VolumeSnapshotLocation) (cloudprovider.BlockStore, error) {
if bs, ok := ib.snapshotLocationBlockStores[snapshotLocation.Name]; ok {
return bs, nil
}
bs, err := ib.blockStoreGetter.GetBlockStore(snapshotLocation.Spec.Provider)
if err != nil {
return nil, err
}
if err := bs.Init(snapshotLocation.Spec.Config); err != nil {
return nil, err
}
if ib.snapshotLocationBlockStores == nil {
ib.snapshotLocationBlockStores = make(map[string]cloudprovider.BlockStore)
}
ib.snapshotLocationBlockStores[snapshotLocation.Name] = bs
return bs, nil
}
// zoneLabel is the label that stores availability-zone info
// on PVs
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
// disk type and IOPS (if applicable) to be able to restore to current state later.
func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error {
log.Info("Executing takePVSnapshot")
if ib.backupRequest.Spec.SnapshotVolumes != nil && !*ib.backupRequest.Spec.SnapshotVolumes {
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
return nil
}
pv := new(corev1api.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pv); err != nil {
return errors.WithStack(err)
}
log = log.WithField("persistentVolume", pv.Name)
// If this PV is claimed, see if we've already taken a (restic) snapshot of the contents
// of this PV. If so, don't take a snapshot.
if pv.Spec.ClaimRef != nil {
if ib.resticSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
log.Info("Skipping Persistent Volume snapshot because volume has already been backed up.")
return nil
}
}
metadata, err := meta.Accessor(obj)
if err != nil {
return errors.WithStack(err)
}
pvFailureDomainZone := metadata.GetLabels()[zoneLabel]
if pvFailureDomainZone == "" {
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
}
var (
volumeID, location string
blockStore cloudprovider.BlockStore
)
for _, snapshotLocation := range ib.backupRequest.SnapshotLocations {
log := log.WithField("volumeSnapshotLocation", snapshotLocation.Name)
bs, err := ib.blockStore(snapshotLocation)
if err != nil {
log.WithError(err).Error("Error getting block store for volume snapshot location")
continue
}
if volumeID, err = bs.GetVolumeID(obj); err != nil {
log.WithError(err).Errorf("Error attempting to get volume ID for persistent volume")
continue
}
if volumeID == "" {
log.Infof("No volume ID returned by block store for persistent volume")
continue
}
log.Infof("Got volume ID for persistent volume")
blockStore = bs
location = snapshotLocation.Name
break
}
if blockStore == nil {
log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.")
return nil
}
log = log.WithField("volumeID", volumeID)
tags := map[string]string{
"velero.io/backup": ib.backupRequest.Name,
"velero.io/pv": metadata.GetName(),
}
log.Info("Getting volume information")
volumeType, iops, err := blockStore.GetVolumeInfo(volumeID, pvFailureDomainZone)
if err != nil {
log.WithError(err).Error("error getting volume info")
return errors.WithMessage(err, "error getting volume info")
}
log.Info("Snapshotting PersistentVolume")
snapshot := volumeSnapshot(ib.backupRequest.Backup, metadata.GetName(), volumeID, volumeType, pvFailureDomainZone, location, iops)
var errs []error
snapshotID, err := blockStore.CreateSnapshot(snapshot.Spec.ProviderVolumeID, snapshot.Spec.VolumeAZ, tags)
if err != nil {
log.WithError(err).Error("error creating snapshot")
errs = append(errs, errors.Wrap(err, "error taking snapshot of volume"))
snapshot.Status.Phase = volume.SnapshotPhaseFailed
} else {
snapshot.Status.Phase = volume.SnapshotPhaseCompleted
snapshot.Status.ProviderSnapshotID = snapshotID
}
ib.backupRequest.VolumeSnapshots = append(ib.backupRequest.VolumeSnapshots, snapshot)
// nil errors are automatically removed
return kubeerrs.NewAggregate(errs)
}
func volumeSnapshot(backup *api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
return &volume.Snapshot{
Spec: volume.SnapshotSpec{
BackupName: backup.Name,
BackupUID: string(backup.UID),
Location: location,
PersistentVolumeName: volumeName,
ProviderVolumeID: volumeID,
VolumeType: volumeType,
VolumeAZ: az,
VolumeIOPS: iops,
},
Status: volume.SnapshotStatus{
Phase: volume.SnapshotPhaseNew,
},
}
}
remove extraneous use of meta.Accessor
Signed-off-by: Steve Kriss <0fb57b59c5595f66e49bc4f1ca58af1a93526cb7@vmware.com>
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"archive/tar"
"encoding/json"
"path/filepath"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/velero/pkg/apis/velero/v1"
"github.com/heptio/velero/pkg/client"
"github.com/heptio/velero/pkg/cloudprovider"
"github.com/heptio/velero/pkg/discovery"
"github.com/heptio/velero/pkg/kuberesource"
"github.com/heptio/velero/pkg/podexec"
"github.com/heptio/velero/pkg/restic"
"github.com/heptio/velero/pkg/volume"
)
type itemBackupperFactory interface {
newItemBackupper(
backup *Request,
backedUpItems map[itemKey]struct{},
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
blockStoreGetter BlockStoreGetter,
) ItemBackupper
}
type defaultItemBackupperFactory struct{}
func (f *defaultItemBackupperFactory) newItemBackupper(
backupRequest *Request,
backedUpItems map[itemKey]struct{},
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
blockStoreGetter BlockStoreGetter,
) ItemBackupper {
ib := &defaultItemBackupper{
backupRequest: backupRequest,
backedUpItems: backedUpItems,
tarWriter: tarWriter,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
resticBackupper: resticBackupper,
resticSnapshotTracker: resticSnapshotTracker,
blockStoreGetter: blockStoreGetter,
itemHookHandler: &defaultItemHookHandler{
podCommandExecutor: podCommandExecutor,
},
}
// this is for testing purposes
ib.additionalItemBackupper = ib
return ib
}
type ItemBackupper interface {
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error
}
type defaultItemBackupper struct {
backupRequest *Request
backedUpItems map[itemKey]struct{}
tarWriter tarWriter
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
resticBackupper restic.Backupper
resticSnapshotTracker *pvcSnapshotTracker
blockStoreGetter BlockStoreGetter
itemHookHandler itemHookHandler
additionalItemBackupper ItemBackupper
snapshotLocationBlockStores map[string]cloudprovider.BlockStore
}
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
// namespaces IncludesExcludes list.
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
metadata, err := meta.Accessor(obj)
if err != nil {
return err
}
namespace := metadata.GetNamespace()
name := metadata.GetName()
log := logger.WithField("name", name)
if namespace != "" {
log = log.WithField("namespace", namespace)
}
// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that
// backupItem can be invoked by a custom action.
if namespace != "" && !ib.backupRequest.NamespaceIncludesExcludes.ShouldInclude(namespace) {
log.Info("Excluding item because namespace is excluded")
return nil
}
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
// false.
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backupRequest.Spec.IncludeClusterResources != nil && !*ib.backupRequest.Spec.IncludeClusterResources {
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
return nil
}
if !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Info("Excluding item because resource is excluded")
return nil
}
if metadata.GetDeletionTimestamp() != nil {
log.Info("Skipping item because it's being deleted.")
return nil
}
key := itemKey{
resource: groupResource.String(),
namespace: namespace,
name: name,
}
if _, exists := ib.backedUpItems[key]; exists {
log.Info("Skipping item because it's already been backed up.")
return nil
}
ib.backedUpItems[key] = struct{}{}
log.Info("Backing up resource")
log.Debug("Executing pre hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePre); err != nil {
return err
}
var (
backupErrs []error
pod *corev1api.Pod
resticVolumesToBackup []string
)
if groupResource == kuberesource.Pods {
// pod needs to be initialized for the unstructured converter
pod = new(corev1api.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
backupErrs = append(backupErrs, errors.WithStack(err))
// nil it on error since it's not valid
pod = nil
} else {
// get the volumes to backup using restic, and add any of them that are PVCs to the pvc snapshot
// tracker, so that when we backup PVCs/PVs via an item action in the next step, we don't snapshot
// PVs that will have their data backed up with restic.
resticVolumesToBackup = restic.GetVolumesToBackup(pod)
ib.resticSnapshotTracker.Track(pod, resticVolumesToBackup)
}
}
updatedObj, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata)
if err != nil {
log.WithError(err).Error("Error executing item actions")
backupErrs = append(backupErrs, err)
// if there was an error running actions, execute post hooks and return
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
return kubeerrs.NewAggregate(backupErrs)
}
obj = updatedObj
if metadata, err = meta.Accessor(obj); err != nil {
return errors.WithStack(err)
}
if groupResource == kuberesource.PersistentVolumes {
if err := ib.takePVSnapshot(obj, log); err != nil {
backupErrs = append(backupErrs, err)
}
}
if groupResource == kuberesource.Pods && pod != nil {
// this function will return partial results, so process volumeSnapshots
// even if there are errors.
volumeSnapshots, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup)
// annotate the pod with the successful volume snapshots
for volume, snapshot := range volumeSnapshots {
restic.SetPodSnapshotAnnotation(metadata, volume, snapshot)
}
backupErrs = append(backupErrs, errs...)
}
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
if len(backupErrs) != 0 {
return kubeerrs.NewAggregate(backupErrs)
}
var filePath string
if namespace != "" {
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
} else {
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
}
itemBytes, err := json.Marshal(obj.UnstructuredContent())
if err != nil {
return errors.WithStack(err)
}
hdr := &tar.Header{
Name: filePath,
Size: int64(len(itemBytes)),
Typeflag: tar.TypeReg,
Mode: 0755,
ModTime: time.Now(),
}
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
return errors.WithStack(err)
}
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
return errors.WithStack(err)
}
return nil
}
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a map of volume name -> snapshot ID
// for volumes that were successfully backed up, and a slice of any errors that were encountered.
func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) (map[string]string, []error) {
if len(volumes) == 0 {
return nil, nil
}
if ib.resticBackupper == nil {
log.Warn("No restic backupper, not backing up pod's volumes")
return nil, nil
}
return ib.resticBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, log)
}
func (ib *defaultItemBackupper) executeActions(
log logrus.FieldLogger,
obj runtime.Unstructured,
groupResource schema.GroupResource,
name, namespace string,
metadata metav1.Object,
) (runtime.Unstructured, error) {
for _, action := range ib.backupRequest.ResolvedActions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Debug("Skipping action because it does not apply to this resource")
continue
}
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
log.Debug("Skipping action because it does not apply to this namespace")
continue
}
if !action.selector.Matches(labels.Set(metadata.GetLabels())) {
log.Debug("Skipping action because label selector does not match")
continue
}
log.Info("Executing custom action")
updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backupRequest.Backup)
if err != nil {
// We want this to show up in the log file at the place where the error occurs. When we return
// the error, it get aggregated with all the other ones at the end of the backup, making it
// harder to tell when it happened.
log.WithError(err).Error("error executing custom action")
return nil, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)
}
obj = updatedItem
for _, additionalItem := range additionalItemIdentifiers {
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
if err != nil {
return nil, err
}
client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)
if err != nil {
return nil, err
}
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
return nil, err
}
}
}
return obj, nil
}
// blockStore instantiates and initializes a BlockStore given a VolumeSnapshotLocation,
// or returns an existing one if one's already been initialized for the location.
func (ib *defaultItemBackupper) blockStore(snapshotLocation *api.VolumeSnapshotLocation) (cloudprovider.BlockStore, error) {
if bs, ok := ib.snapshotLocationBlockStores[snapshotLocation.Name]; ok {
return bs, nil
}
bs, err := ib.blockStoreGetter.GetBlockStore(snapshotLocation.Spec.Provider)
if err != nil {
return nil, err
}
if err := bs.Init(snapshotLocation.Spec.Config); err != nil {
return nil, err
}
if ib.snapshotLocationBlockStores == nil {
ib.snapshotLocationBlockStores = make(map[string]cloudprovider.BlockStore)
}
ib.snapshotLocationBlockStores[snapshotLocation.Name] = bs
return bs, nil
}
// zoneLabel is the label that stores availability-zone info
// on PVs
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
// disk type and IOPS (if applicable) to be able to restore to current state later.
func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error {
log.Info("Executing takePVSnapshot")
if ib.backupRequest.Spec.SnapshotVolumes != nil && !*ib.backupRequest.Spec.SnapshotVolumes {
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
return nil
}
pv := new(corev1api.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pv); err != nil {
return errors.WithStack(err)
}
log = log.WithField("persistentVolume", pv.Name)
// If this PV is claimed, see if we've already taken a (restic) snapshot of the contents
// of this PV. If so, don't take a snapshot.
if pv.Spec.ClaimRef != nil {
if ib.resticSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
log.Info("Skipping Persistent Volume snapshot because volume has already been backed up.")
return nil
}
}
pvFailureDomainZone := pv.Labels[zoneLabel]
if pvFailureDomainZone == "" {
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
}
var (
volumeID, location string
blockStore cloudprovider.BlockStore
)
for _, snapshotLocation := range ib.backupRequest.SnapshotLocations {
log := log.WithField("volumeSnapshotLocation", snapshotLocation.Name)
bs, err := ib.blockStore(snapshotLocation)
if err != nil {
log.WithError(err).Error("Error getting block store for volume snapshot location")
continue
}
if volumeID, err = bs.GetVolumeID(obj); err != nil {
log.WithError(err).Errorf("Error attempting to get volume ID for persistent volume")
continue
}
if volumeID == "" {
log.Infof("No volume ID returned by block store for persistent volume")
continue
}
log.Infof("Got volume ID for persistent volume")
blockStore = bs
location = snapshotLocation.Name
break
}
if blockStore == nil {
log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.")
return nil
}
log = log.WithField("volumeID", volumeID)
tags := map[string]string{
"velero.io/backup": ib.backupRequest.Name,
"velero.io/pv": pv.Name,
}
log.Info("Getting volume information")
volumeType, iops, err := blockStore.GetVolumeInfo(volumeID, pvFailureDomainZone)
if err != nil {
log.WithError(err).Error("error getting volume info")
return errors.WithMessage(err, "error getting volume info")
}
log.Info("Snapshotting PersistentVolume")
snapshot := volumeSnapshot(ib.backupRequest.Backup, pv.Name, volumeID, volumeType, pvFailureDomainZone, location, iops)
var errs []error
snapshotID, err := blockStore.CreateSnapshot(snapshot.Spec.ProviderVolumeID, snapshot.Spec.VolumeAZ, tags)
if err != nil {
log.WithError(err).Error("error creating snapshot")
errs = append(errs, errors.Wrap(err, "error taking snapshot of volume"))
snapshot.Status.Phase = volume.SnapshotPhaseFailed
} else {
snapshot.Status.Phase = volume.SnapshotPhaseCompleted
snapshot.Status.ProviderSnapshotID = snapshotID
}
ib.backupRequest.VolumeSnapshots = append(ib.backupRequest.VolumeSnapshots, snapshot)
// nil errors are automatically removed
return kubeerrs.NewAggregate(errs)
}
func volumeSnapshot(backup *api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
return &volume.Snapshot{
Spec: volume.SnapshotSpec{
BackupName: backup.Name,
BackupUID: string(backup.UID),
Location: location,
PersistentVolumeName: volumeName,
ProviderVolumeID: volumeID,
VolumeType: volumeType,
VolumeAZ: az,
VolumeIOPS: iops,
},
Status: volume.SnapshotStatus{
Phase: volume.SnapshotPhaseNew,
},
}
}
|
package completion
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/zerowidth/gh-shorthand/pkg/alfred"
"github.com/zerowidth/gh-shorthand/pkg/config"
"github.com/zerowidth/gh-shorthand/pkg/parser"
"github.com/zerowidth/gh-shorthand/pkg/rpc"
)
const (
// rerunAfter defines how soon the alfred filter is invoked again.
// This number is an ideal, so true delay must be measured externally.
rerunAfter = 0.1
// how long in seconds to wait before requesting repo title or issue details
delay = 0.1
// how long to wait before issuing a search query
searchDelay = 0.5
// how long to wait before listing recent issues in a repo
issueListDelay = 1.0
)
// Used internally to collect the input and output for completion
type completion struct {
cfg config.Config
env Environment
result alfred.FilterResult
input string
parsed parser.Result
retry bool // for RPC calls on idle query input
rpcClient rpc.Client
}
// Complete runs the main completion code
func Complete(cfg config.Config, env Environment) alfred.FilterResult {
mode, input, ok := extractMode(env.Query)
if !ok {
// this didn't have a valid mode, just skip it.
return alfred.NewFilterResult()
}
bareUser := mode == "p"
ignoreNumeric := len(cfg.DefaultRepo) > 0
parsed := parser.Parse(cfg.RepoMap, cfg.UserMap, input, bareUser, ignoreNumeric)
c := completion{
cfg: cfg,
env: env,
result: alfred.NewFilterResult(),
input: input,
parsed: parsed,
rpcClient: rpc.NewClient(cfg.SocketPath),
}
c.appendParsedItems(mode)
c.finalizeResult()
return c.result
}
// given an input query, extract the mode and input string. returns false if
// mode+input is invalid.
//
// mode is an optional single character, followed by a space.
func extractMode(input string) (string, string, bool) {
var mode string
if len(input) == 1 {
mode = input[0:1]
input = ""
} else if len(input) > 1 {
mode = input[0:1]
if mode == " " {
input = input[1:]
} else {
// not a mode followed by space, it's invalid
if input[1:2] != " " {
return "", "", false
}
input = input[2:]
}
}
// default is "no mode", with empty input
return mode, input, true
}
func (c *completion) appendParsedItems(mode string) {
fullInput := c.env.Query
if !c.parsed.HasRepo() && len(c.cfg.DefaultRepo) > 0 && !c.parsed.HasOwner() && !c.parsed.HasPath() {
c.parsed.SetRepo(c.cfg.DefaultRepo)
}
switch mode {
case "": // no input, show default items
c.result.AppendItems(
defaultItems...,
)
case " ": // open repo, issue, and/or path
// repo required, no query allowed
if c.parsed.HasRepo() &&
(c.parsed.HasIssue() || c.parsed.HasPath() || c.parsed.EmptyQuery()) {
item := openRepoItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveIssue(&item)
} else {
c.retrieveRepo(&item)
}
c.result.AppendItems(item)
}
if !c.parsed.HasRepo() && !c.parsed.HasOwner() && c.parsed.HasPath() {
c.result.AppendItems(openPathItem(c.parsed.Path()))
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteOpenItem, autocompleteUserOpenItem, openEndedOpenItem)...)
case "i":
// repo required
if c.parsed.HasRepo() {
if c.parsed.EmptyQuery() {
issuesItem := openIssuesItem(c.parsed)
matches := c.retrieveRecentIssues(&issuesItem)
c.result.AppendItems(issuesItem)
c.result.AppendItems(searchIssuesItem(c.parsed, fullInput))
c.result.AppendItems(matches...)
} else {
searchItem := searchIssuesItem(c.parsed, fullInput)
matches := c.retrieveIssueSearchItems(&searchItem, c.parsed.Repo(), c.parsed.Query, false)
c.result.AppendItems(searchItem)
c.result.AppendItems(matches...)
}
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteIssueItem, autocompleteUserIssueItem, openEndedIssueItem)...)
case "p":
if c.parsed.HasOwner() && (c.parsed.HasIssue() || c.parsed.EmptyQuery()) {
if c.parsed.HasRepo() {
item := repoProjectsItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveRepoProject(&item)
c.result.AppendItems(item)
} else {
projects := c.retrieveRepoProjects(&item)
c.result.AppendItems(item)
c.result.AppendItems(projects...)
}
} else {
item := orgProjectsItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveOrgProject(&item)
c.result.AppendItems(item)
} else {
projects := c.retrieveOrgProjects(&item)
c.result.AppendItems(item)
c.result.AppendItems(projects...)
}
}
}
if !strings.Contains(c.input, " ") {
c.result.AppendItems(
autocompleteRepoItems(c.cfg, c.input, autocompleteProjectItem)...)
c.result.AppendItems(
autocompleteUserItems(c.cfg, c.input, c.parsed, false, autocompleteOrgProjectItem)...)
if len(c.input) == 0 || c.parsed.Repo() != c.input {
c.result.AppendItems(openEndedProjectItem(c.input))
}
}
case "n":
// repo required
if c.parsed.HasRepo() {
c.result.AppendItems(newIssueItem(c.parsed))
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteNewIssueItem, autocompleteUserNewIssueItem, openEndedNewIssueItem)...)
case "e":
c.result.AppendItems(
projectDirItems(c.cfg.ProjectDirMap(), c.input, modeEdit)...)
case "t":
c.result.AppendItems(
projectDirItems(c.cfg.ProjectDirMap(), c.input, modeTerm)...)
case "s":
searchItem := globalIssueSearchItem(c.input)
matches := c.retrieveIssueSearchItems(&searchItem, "", c.input, true)
c.result.AppendItems(searchItem)
c.result.AppendItems(matches...)
}
}
func openRepoItem(parsed parser.Result) alfred.Item {
uid := "gh:" + parsed.Repo()
title := "Open " + parsed.Repo()
arg := "open https://github.com/" + parsed.Repo()
icon := repoIcon
var mods *alfred.Mods
if parsed.HasIssue() {
uid += "#" + parsed.Issue()
title += "#" + parsed.Issue()
arg += "/issues/" + parsed.Issue()
icon = issueIcon
mods = issueMods(parsed.Repo(), parsed.Issue(), "")
}
if parsed.HasPath() {
uid += parsed.Path()
title += parsed.Path()
arg += parsed.Path()
icon = pathIcon
}
if !parsed.HasIssue() && !parsed.HasPath() {
mods = repoMods(parsed.Repo())
}
title += parsed.Annotation()
return alfred.Item{
UID: uid,
Title: title,
Arg: arg,
Valid: true,
Icon: icon,
Mods: mods,
}
}
func openPathItem(path string) alfred.Item {
return alfred.Item{
UID: "gh:" + path,
Title: fmt.Sprintf("Open %s", path),
Arg: "open https://github.com" + path,
Valid: true,
Icon: pathIcon,
}
}
func openIssuesItem(parsed parser.Result) (item alfred.Item) {
return alfred.Item{
UID: "ghi:" + parsed.Repo(),
Title: "List issues for " + parsed.Repo() + parsed.Annotation(),
Arg: "open https://github.com/" + parsed.Repo() + "/issues",
Valid: true,
Icon: issueListIcon,
}
}
func searchIssuesItem(parsed parser.Result, fullInput string) alfred.Item {
extra := parsed.Annotation()
if len(parsed.Query) > 0 {
escaped := url.PathEscape(parsed.Query)
arg := "open https://github.com/" + parsed.Repo() + "/search?utf8=✓&type=Issues&q=" + escaped
return alfred.Item{
UID: "ghis:" + parsed.Repo(),
Title: "Search issues in " + parsed.Repo() + extra + " for " + parsed.Query,
Arg: arg,
Valid: true,
Icon: searchIcon,
}
}
return alfred.Item{
Title: "Search issues in " + parsed.Repo() + extra + " for...",
Valid: false,
Icon: searchIcon,
Autocomplete: fullInput + " ",
}
}
func repoProjectsItem(parsed parser.Result) alfred.Item {
if parsed.HasIssue() {
return alfred.Item{
UID: "ghp:" + parsed.Repo() + "/" + parsed.Issue(),
Title: "Open project #" + parsed.Issue() + " in " + parsed.Repo() + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/" + parsed.Repo() + "/projects/" + parsed.Issue(),
Icon: projectIcon,
}
}
return alfred.Item{
UID: "ghp:" + parsed.Repo(),
Title: "List projects in " + parsed.Repo() + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/" + parsed.Repo() + "/projects",
Icon: projectIcon,
}
}
func orgProjectsItem(parsed parser.Result) alfred.Item {
if parsed.HasIssue() {
return alfred.Item{
UID: "ghp:" + parsed.User + "/" + parsed.Issue(),
Title: "Open project #" + parsed.Issue() + " for " + parsed.User + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/orgs/" + parsed.User + "/projects/" + parsed.Issue(),
Icon: projectIcon,
}
}
return alfred.Item{
UID: "ghp:" + parsed.User,
Title: "List projects for " + parsed.User + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/orgs/" + parsed.User + "/projects",
Icon: projectIcon,
}
}
func newIssueItem(parsed parser.Result) alfred.Item {
title := "New issue in " + parsed.Repo()
title += parsed.Annotation()
if parsed.EmptyQuery() {
return alfred.Item{
UID: "ghn:" + parsed.Repo(),
Title: title,
Arg: "open https://github.com/" + parsed.Repo() + "/issues/new",
Valid: true,
Icon: newIssueIcon,
}
}
escaped := url.PathEscape(parsed.Query)
arg := "open https://github.com/" + parsed.Repo() + "/issues/new?title=" + escaped
return alfred.Item{
UID: "ghn:" + parsed.Repo(),
Title: title + ": " + parsed.Query,
Arg: arg,
Valid: true,
Icon: newIssueIcon,
}
}
func globalIssueSearchItem(input string) alfred.Item {
if len(input) > 0 {
escaped := url.PathEscape(input)
arg := "open https://github.com/search?utf8=✓&type=Issues&q=" + escaped
return alfred.Item{
UID: "ghs:",
Title: "Search issues for " + input,
Arg: arg,
Valid: true,
Icon: searchIcon,
}
}
return alfred.Item{
Title: "Search issues for...",
Valid: false,
Icon: searchIcon,
Autocomplete: "s ",
}
}
func autocompleteOpenItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "gh:" + repo,
Title: fmt.Sprintf("Open %s (%s)", repo, key),
Arg: "open https://github.com/" + repo,
Valid: true,
Autocomplete: " " + key,
Icon: repoIcon,
}
}
func autocompleteUserOpenItem(key, user string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("Open %s/... (%s)", user, key),
Autocomplete: " " + key + "/",
Icon: repoIcon,
}
}
func autocompleteIssueItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghi:" + repo,
Title: fmt.Sprintf("List issues for %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/issues",
Valid: true,
Autocomplete: "i " + key,
Icon: issueListIcon,
}
}
func autocompleteUserIssueItem(key, repo string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List issues for %s/... (%s)", repo, key),
Autocomplete: "i " + key + "/",
Icon: issueListIcon,
}
}
func autocompleteProjectItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghp:" + repo,
Title: fmt.Sprintf("List projects in %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/projects",
Valid: true,
Autocomplete: "p " + key,
Icon: projectIcon,
}
}
func autocompleteOrgProjectItem(key, user string) alfred.Item {
return alfred.Item{
UID: "ghp:" + user,
Title: fmt.Sprintf("List projects for %s (%s)", user, key),
Arg: "open https://github.com/orgs/" + user + "/projects",
Valid: true,
Autocomplete: "p " + key,
Icon: projectIcon,
}
}
func autocompleteNewIssueItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghn:" + repo,
Title: fmt.Sprintf("New issue in %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/issues/new",
Valid: true,
Autocomplete: "n " + key,
Icon: newIssueIcon,
}
}
func autocompleteUserNewIssueItem(key, user string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("New issue in %s/... (%s)", user, key),
Autocomplete: "n " + key + "/",
Icon: newIssueIcon,
}
}
func openEndedOpenItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("Open %s...", input),
Autocomplete: " " + input,
Valid: false,
Icon: repoIcon,
}
}
func openEndedIssueItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List issues for %s...", input),
Autocomplete: "i " + input,
Valid: false,
Icon: issueListIcon,
}
}
func openEndedProjectItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List projects for %s...", input),
Autocomplete: "p " + input,
Valid: false,
Icon: projectIcon,
}
}
func openEndedNewIssueItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("New issue in %s...", input),
Autocomplete: "n " + input,
Valid: false,
Icon: newIssueIcon,
}
}
func autocompleteItems(cfg config.Config, input string, parsed parser.Result,
autocompleteRepoItem func(string, string) alfred.Item,
autocompleteUserItem func(string, string) alfred.Item,
openEndedItem func(string) alfred.Item) (items alfred.Items) {
if strings.Contains(input, " ") {
return
}
items = append(items,
autocompleteRepoItems(cfg, input, autocompleteRepoItem)...)
items = append(items,
autocompleteUserItems(cfg, input, parsed, true, autocompleteUserItem)...)
if len(input) == 0 || parsed.Repo() != input {
items = append(items, openEndedItem(input))
}
return
}
func autocompleteRepoItems(cfg config.Config, input string,
autocompleteRepoItem func(string, string) alfred.Item) (items alfred.Items) {
if len(input) > 0 {
for key, repo := range cfg.RepoMap {
if strings.HasPrefix(key, input) && len(key) > len(input) {
items = append(items, autocompleteRepoItem(key, repo))
}
}
}
return
}
func autocompleteUserItems(cfg config.Config, input string,
parsed parser.Result, includeMatchedUser bool,
autocompleteUserItem func(string, string) alfred.Item) (items alfred.Items) {
if len(input) > 0 {
for key, user := range cfg.UserMap {
prefixed := strings.HasPrefix(key, input) && len(key) > len(input)
matched := includeMatchedUser && key == parsed.UserMatch && !parsed.HasRepo()
if prefixed || matched {
items = append(items, autocompleteUserItem(key, user))
}
}
}
return
}
func findProjectDirs(root string) (dirs []string, err error) {
if entries, err := ioutil.ReadDir(root); err == nil {
for _, entry := range entries {
if entry.IsDir() {
dirs = append(dirs, entry.Name())
} else if entry.Mode()&os.ModeSymlink != 0 {
full := path.Join(root, entry.Name())
if link, err := os.Readlink(full); err != nil {
continue
} else {
if !path.IsAbs(link) {
if link, err = filepath.Abs(path.Join(root, link)); err != nil {
continue
}
}
if linkInfo, err := os.Stat(link); err != nil {
continue
} else {
if linkInfo.IsDir() {
dirs = append(dirs, entry.Name())
}
}
}
}
}
} else {
return dirs, err
}
return dirs, nil
}
func (c *completion) rpcRequest(path, query string, delay float64) rpc.Result {
if len(c.cfg.SocketPath) == 0 {
return rpc.Result{Complete: true} // RPC isn't enabled, don't worry about it
}
if c.env.Duration().Seconds() < delay {
c.retry = true
return rpc.Result{Complete: false}
}
res := c.rpcClient.Query(path, query)
if !res.Complete && len(res.Error) == 0 {
c.retry = true
}
return res
}
func ellipsis(prefix string, duration time.Duration) string {
return prefix + strings.Repeat(".", int((duration.Nanoseconds()/250000000)%4))
}
// retrieveRepo adds the repo description to the "open repo" item
// using an RPC call.
func (c *completion) retrieveRepo(item *alfred.Item) {
res := c.rpcRequest("/repo", c.parsed.Repo(), delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
}
if !res.Complete {
item.Subtitle = ellipsis("Retrieving description", c.env.Duration())
return
}
if len(res.Repos) == 0 {
item.Subtitle = "rpc error: missing repo in result"
return
}
item.Subtitle = res.Repos[0].Description
if item.Mods != nil {
item.Mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s: %s](https://github.com/%s)",
c.parsed.Repo(), res.Repos[0].Description, c.parsed.Repo()),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s",
c.parsed.Repo()),
Icon: markdownIcon,
}
}
}
// retrieveIssue adds the title and state to an "open issue" item
func (c *completion) retrieveIssue(item *alfred.Item) {
res := c.rpcRequest("/issue", c.parsed.Repo()+"#"+c.parsed.Issue(), delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving issue title", c.env.Duration())
return
} else if len(res.Issues) == 0 {
item.Subtitle = "rpc error: missing issue in result"
return
}
issue := res.Issues[0]
item.Subtitle = item.Title
item.Title = issue.Title
item.Icon = issueStateIcon(issue.Type, issue.State)
if item.Mods != nil {
item.Mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s: %s](https://github.com/%s/issues/%s)",
c.parsed.Repo(), c.parsed.Issue(), issue.Title, c.parsed.Repo(), c.parsed.Issue()),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s#%s",
c.parsed.Repo(), c.parsed.Issue()),
Icon: markdownIcon,
}
}
}
func (c *completion) retrieveRepoProject(item *alfred.Item) {
c.retrieveProject(item, c.parsed.Repo()+"/"+c.parsed.Issue())
}
func (c *completion) retrieveOrgProject(item *alfred.Item) {
c.retrieveProject(item, c.parsed.User+"/"+c.parsed.Issue())
}
func (c *completion) retrieveProject(item *alfred.Item, query string) {
res := c.rpcRequest("/project", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving project name", c.env.Duration())
return
} else if len(res.Projects) == 0 {
item.Subtitle = "rpc error: missing project in result"
return
}
project := res.Projects[0]
item.Subtitle = item.Title
item.Title = project.Name
item.Icon = projectStateIcon(project.State)
}
func (c *completion) retrieveOrgProjects(item *alfred.Item) alfred.Items {
return c.retrieveProjects(item, c.parsed.User)
}
func (c *completion) retrieveRepoProjects(item *alfred.Item) alfred.Items {
return c.retrieveProjects(item, c.parsed.Repo())
}
func (c *completion) retrieveProjects(item *alfred.Item, query string) (projects alfred.Items) {
res := c.rpcRequest("/projects", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving projects", c.env.Duration())
return
} else if len(res.Projects) == 0 {
item.Subtitle = "No projects found"
return
}
projects = append(projects, projectItemsFromProjects(res.Projects, "in "+c.parsed.Repo())...)
return
}
func projectItemsFromProjects(projects []rpc.Project, desc string) alfred.Items {
var items alfred.Items
for _, project := range projects {
// no UID so alfred doesn't remember these
items = append(items, alfred.Item{
Title: project.Name,
Subtitle: fmt.Sprintf("Open project #%d %s", project.Number, desc),
Valid: true,
Arg: "open " + project.URL,
Icon: projectStateIcon(project.State),
})
}
return items
}
func (c *completion) retrieveIssueSearchItems(item *alfred.Item, repo, query string, includeRepo bool) alfred.Items {
if len(repo) > 0 {
query += " repo:" + repo + " "
}
return c.searchIssues(item, query, includeRepo, searchDelay)
}
func (c *completion) retrieveRecentIssues(item *alfred.Item) alfred.Items {
return c.searchIssues(item, "repo:"+c.parsed.Repo()+" sort:updated-desc", false, issueListDelay)
}
func (c *completion) searchIssues(item *alfred.Item, query string, includeRepo bool, delay float64) alfred.Items {
var items alfred.Items
if !item.Valid {
return items
}
res := c.rpcRequest("/issues", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return items
} else if c.retry {
item.Subtitle = ellipsis("Searching issues", c.env.Duration())
return items
} else if len(res.Issues) == 0 {
item.Subtitle = "No issues found"
return items
}
items = append(items, issueItemsFromIssues(res.Issues, includeRepo)...)
return items
}
func issueItemsFromIssues(issues []rpc.Issue, includeRepo bool) alfred.Items {
var items alfred.Items
for _, issue := range issues {
itemTitle := fmt.Sprintf("#%s %s", issue.Number, issue.Title)
if includeRepo {
itemTitle = issue.Repo + itemTitle
}
arg := ""
if issue.Type == "Issue" {
arg = "open https://github.com/" + issue.Repo + "/issues/" + issue.Number
} else {
arg = "open https://github.com/" + issue.Repo + "/pull/" + issue.Number
}
// no UID so alfred doesn't remember these
items = append(items, alfred.Item{
Title: itemTitle,
Subtitle: fmt.Sprintf("Open %s#%s", issue.Repo, issue.Number),
Valid: true,
Arg: arg,
Icon: issueStateIcon(issue.Type, issue.State),
Mods: issueMods(issue.Repo, issue.Number, issue.Title),
})
}
return items
}
func repoMods(repo string) *alfred.Mods {
return &alfred.Mods{
Cmd: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s](https://github.com/%s)", repo, repo),
Subtitle: fmt.Sprintf("Insert Markdown link to %s", repo),
Icon: markdownIcon,
},
}
}
func issueMods(repo, number, title string) *alfred.Mods {
mods := &alfred.Mods{
Cmd: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s](https://github.com/%s/issues/%s)", repo, number, repo, number),
Subtitle: fmt.Sprintf("Insert Markdown link to %s#%s", repo, number),
Icon: markdownIcon,
},
Alt: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste %s#%s", repo, number),
Subtitle: fmt.Sprintf("Insert issue reference to %s#%s", repo, number),
Icon: issueIcon,
},
}
if len(title) > 0 {
mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s: %s](https://github.com/%s/issues/%s)", repo, number, title, repo, number),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s#%s", repo, number),
Icon: markdownIcon,
}
}
return mods
}
// ErrorItem returns an error message entry to display in alfred
func ErrorItem(title, subtitle string) alfred.Item {
return alfred.Item{
Title: title,
Subtitle: subtitle,
Icon: octicon("alert"),
Valid: false,
}
}
func (c *completion) finalizeResult() {
// automatically set "open <url>" urls to copy/large text
for i, item := range c.result.Items {
if item.Text == nil && strings.HasPrefix(item.Arg, "open ") {
url := item.Arg[5:]
c.result.Items[i].Text = &alfred.Text{Copy: url, LargeType: url}
}
}
// if any RPC-decorated items require a re-invocation of the script, save that
// information in the environment for the next time
if c.retry {
c.result.SetVariable("query", c.env.Query)
c.result.SetVariable("s", fmt.Sprintf("%d", c.env.Start.Unix()))
c.result.SetVariable("ns", fmt.Sprintf("%d", c.env.Start.Nanosecond()))
c.result.Rerun = rerunAfter
}
}
Split up completion struct definition with more docs
package completion
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/zerowidth/gh-shorthand/pkg/alfred"
"github.com/zerowidth/gh-shorthand/pkg/config"
"github.com/zerowidth/gh-shorthand/pkg/parser"
"github.com/zerowidth/gh-shorthand/pkg/rpc"
)
const (
// rerunAfter defines how soon the alfred filter is invoked again.
// This number is an ideal, so true delay must be measured externally.
rerunAfter = 0.1
// how long in seconds to wait before requesting repo title or issue details
delay = 0.1
// how long to wait before issuing a search query
searchDelay = 0.5
// how long to wait before listing recent issues in a repo
issueListDelay = 1.0
)
// Used internally to collect the input and output for completion
type completion struct {
// input
cfg config.Config // the gh-shorthand config
env Environment // the runtime environment from alfred
input string // the input string from the user (minus mode)
rpcClient rpc.Client
// intermediate processing:
parsed parser.Result
// output
result alfred.FilterResult // the final assembled result
retry bool // should this script be re-invoked? (for RPC)
}
// Complete runs the main completion code
func Complete(cfg config.Config, env Environment) alfred.FilterResult {
mode, input, ok := extractMode(env.Query)
if !ok {
// this didn't have a valid mode, just skip it.
return alfred.NewFilterResult()
}
bareUser := mode == "p"
ignoreNumeric := len(cfg.DefaultRepo) > 0
parsed := parser.Parse(cfg.RepoMap, cfg.UserMap, input, bareUser, ignoreNumeric)
c := completion{
cfg: cfg,
env: env,
result: alfred.NewFilterResult(),
input: input,
parsed: parsed,
rpcClient: rpc.NewClient(cfg.SocketPath),
}
c.appendParsedItems(mode)
c.finalizeResult()
return c.result
}
// given an input query, extract the mode and input string. returns false if
// mode+input is invalid.
//
// mode is an optional single character, followed by a space.
func extractMode(input string) (string, string, bool) {
var mode string
if len(input) == 1 {
mode = input[0:1]
input = ""
} else if len(input) > 1 {
mode = input[0:1]
if mode == " " {
input = input[1:]
} else {
// not a mode followed by space, it's invalid
if input[1:2] != " " {
return "", "", false
}
input = input[2:]
}
}
// default is "no mode", with empty input
return mode, input, true
}
func (c *completion) appendParsedItems(mode string) {
fullInput := c.env.Query
if !c.parsed.HasRepo() && len(c.cfg.DefaultRepo) > 0 && !c.parsed.HasOwner() && !c.parsed.HasPath() {
c.parsed.SetRepo(c.cfg.DefaultRepo)
}
switch mode {
case "": // no input, show default items
c.result.AppendItems(
defaultItems...,
)
case " ": // open repo, issue, and/or path
// repo required, no query allowed
if c.parsed.HasRepo() &&
(c.parsed.HasIssue() || c.parsed.HasPath() || c.parsed.EmptyQuery()) {
item := openRepoItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveIssue(&item)
} else {
c.retrieveRepo(&item)
}
c.result.AppendItems(item)
}
if !c.parsed.HasRepo() && !c.parsed.HasOwner() && c.parsed.HasPath() {
c.result.AppendItems(openPathItem(c.parsed.Path()))
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteOpenItem, autocompleteUserOpenItem, openEndedOpenItem)...)
case "i":
// repo required
if c.parsed.HasRepo() {
if c.parsed.EmptyQuery() {
issuesItem := openIssuesItem(c.parsed)
matches := c.retrieveRecentIssues(&issuesItem)
c.result.AppendItems(issuesItem)
c.result.AppendItems(searchIssuesItem(c.parsed, fullInput))
c.result.AppendItems(matches...)
} else {
searchItem := searchIssuesItem(c.parsed, fullInput)
matches := c.retrieveIssueSearchItems(&searchItem, c.parsed.Repo(), c.parsed.Query, false)
c.result.AppendItems(searchItem)
c.result.AppendItems(matches...)
}
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteIssueItem, autocompleteUserIssueItem, openEndedIssueItem)...)
case "p":
if c.parsed.HasOwner() && (c.parsed.HasIssue() || c.parsed.EmptyQuery()) {
if c.parsed.HasRepo() {
item := repoProjectsItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveRepoProject(&item)
c.result.AppendItems(item)
} else {
projects := c.retrieveRepoProjects(&item)
c.result.AppendItems(item)
c.result.AppendItems(projects...)
}
} else {
item := orgProjectsItem(c.parsed)
if c.parsed.HasIssue() {
c.retrieveOrgProject(&item)
c.result.AppendItems(item)
} else {
projects := c.retrieveOrgProjects(&item)
c.result.AppendItems(item)
c.result.AppendItems(projects...)
}
}
}
if !strings.Contains(c.input, " ") {
c.result.AppendItems(
autocompleteRepoItems(c.cfg, c.input, autocompleteProjectItem)...)
c.result.AppendItems(
autocompleteUserItems(c.cfg, c.input, c.parsed, false, autocompleteOrgProjectItem)...)
if len(c.input) == 0 || c.parsed.Repo() != c.input {
c.result.AppendItems(openEndedProjectItem(c.input))
}
}
case "n":
// repo required
if c.parsed.HasRepo() {
c.result.AppendItems(newIssueItem(c.parsed))
}
c.result.AppendItems(
autocompleteItems(c.cfg, c.input, c.parsed,
autocompleteNewIssueItem, autocompleteUserNewIssueItem, openEndedNewIssueItem)...)
case "e":
c.result.AppendItems(
projectDirItems(c.cfg.ProjectDirMap(), c.input, modeEdit)...)
case "t":
c.result.AppendItems(
projectDirItems(c.cfg.ProjectDirMap(), c.input, modeTerm)...)
case "s":
searchItem := globalIssueSearchItem(c.input)
matches := c.retrieveIssueSearchItems(&searchItem, "", c.input, true)
c.result.AppendItems(searchItem)
c.result.AppendItems(matches...)
}
}
func openRepoItem(parsed parser.Result) alfred.Item {
uid := "gh:" + parsed.Repo()
title := "Open " + parsed.Repo()
arg := "open https://github.com/" + parsed.Repo()
icon := repoIcon
var mods *alfred.Mods
if parsed.HasIssue() {
uid += "#" + parsed.Issue()
title += "#" + parsed.Issue()
arg += "/issues/" + parsed.Issue()
icon = issueIcon
mods = issueMods(parsed.Repo(), parsed.Issue(), "")
}
if parsed.HasPath() {
uid += parsed.Path()
title += parsed.Path()
arg += parsed.Path()
icon = pathIcon
}
if !parsed.HasIssue() && !parsed.HasPath() {
mods = repoMods(parsed.Repo())
}
title += parsed.Annotation()
return alfred.Item{
UID: uid,
Title: title,
Arg: arg,
Valid: true,
Icon: icon,
Mods: mods,
}
}
func openPathItem(path string) alfred.Item {
return alfred.Item{
UID: "gh:" + path,
Title: fmt.Sprintf("Open %s", path),
Arg: "open https://github.com" + path,
Valid: true,
Icon: pathIcon,
}
}
func openIssuesItem(parsed parser.Result) (item alfred.Item) {
return alfred.Item{
UID: "ghi:" + parsed.Repo(),
Title: "List issues for " + parsed.Repo() + parsed.Annotation(),
Arg: "open https://github.com/" + parsed.Repo() + "/issues",
Valid: true,
Icon: issueListIcon,
}
}
func searchIssuesItem(parsed parser.Result, fullInput string) alfred.Item {
extra := parsed.Annotation()
if len(parsed.Query) > 0 {
escaped := url.PathEscape(parsed.Query)
arg := "open https://github.com/" + parsed.Repo() + "/search?utf8=✓&type=Issues&q=" + escaped
return alfred.Item{
UID: "ghis:" + parsed.Repo(),
Title: "Search issues in " + parsed.Repo() + extra + " for " + parsed.Query,
Arg: arg,
Valid: true,
Icon: searchIcon,
}
}
return alfred.Item{
Title: "Search issues in " + parsed.Repo() + extra + " for...",
Valid: false,
Icon: searchIcon,
Autocomplete: fullInput + " ",
}
}
func repoProjectsItem(parsed parser.Result) alfred.Item {
if parsed.HasIssue() {
return alfred.Item{
UID: "ghp:" + parsed.Repo() + "/" + parsed.Issue(),
Title: "Open project #" + parsed.Issue() + " in " + parsed.Repo() + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/" + parsed.Repo() + "/projects/" + parsed.Issue(),
Icon: projectIcon,
}
}
return alfred.Item{
UID: "ghp:" + parsed.Repo(),
Title: "List projects in " + parsed.Repo() + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/" + parsed.Repo() + "/projects",
Icon: projectIcon,
}
}
func orgProjectsItem(parsed parser.Result) alfred.Item {
if parsed.HasIssue() {
return alfred.Item{
UID: "ghp:" + parsed.User + "/" + parsed.Issue(),
Title: "Open project #" + parsed.Issue() + " for " + parsed.User + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/orgs/" + parsed.User + "/projects/" + parsed.Issue(),
Icon: projectIcon,
}
}
return alfred.Item{
UID: "ghp:" + parsed.User,
Title: "List projects for " + parsed.User + parsed.Annotation(),
Valid: true,
Arg: "open https://github.com/orgs/" + parsed.User + "/projects",
Icon: projectIcon,
}
}
func newIssueItem(parsed parser.Result) alfred.Item {
title := "New issue in " + parsed.Repo()
title += parsed.Annotation()
if parsed.EmptyQuery() {
return alfred.Item{
UID: "ghn:" + parsed.Repo(),
Title: title,
Arg: "open https://github.com/" + parsed.Repo() + "/issues/new",
Valid: true,
Icon: newIssueIcon,
}
}
escaped := url.PathEscape(parsed.Query)
arg := "open https://github.com/" + parsed.Repo() + "/issues/new?title=" + escaped
return alfred.Item{
UID: "ghn:" + parsed.Repo(),
Title: title + ": " + parsed.Query,
Arg: arg,
Valid: true,
Icon: newIssueIcon,
}
}
func globalIssueSearchItem(input string) alfred.Item {
if len(input) > 0 {
escaped := url.PathEscape(input)
arg := "open https://github.com/search?utf8=✓&type=Issues&q=" + escaped
return alfred.Item{
UID: "ghs:",
Title: "Search issues for " + input,
Arg: arg,
Valid: true,
Icon: searchIcon,
}
}
return alfred.Item{
Title: "Search issues for...",
Valid: false,
Icon: searchIcon,
Autocomplete: "s ",
}
}
func autocompleteOpenItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "gh:" + repo,
Title: fmt.Sprintf("Open %s (%s)", repo, key),
Arg: "open https://github.com/" + repo,
Valid: true,
Autocomplete: " " + key,
Icon: repoIcon,
}
}
func autocompleteUserOpenItem(key, user string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("Open %s/... (%s)", user, key),
Autocomplete: " " + key + "/",
Icon: repoIcon,
}
}
func autocompleteIssueItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghi:" + repo,
Title: fmt.Sprintf("List issues for %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/issues",
Valid: true,
Autocomplete: "i " + key,
Icon: issueListIcon,
}
}
func autocompleteUserIssueItem(key, repo string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List issues for %s/... (%s)", repo, key),
Autocomplete: "i " + key + "/",
Icon: issueListIcon,
}
}
func autocompleteProjectItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghp:" + repo,
Title: fmt.Sprintf("List projects in %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/projects",
Valid: true,
Autocomplete: "p " + key,
Icon: projectIcon,
}
}
func autocompleteOrgProjectItem(key, user string) alfred.Item {
return alfred.Item{
UID: "ghp:" + user,
Title: fmt.Sprintf("List projects for %s (%s)", user, key),
Arg: "open https://github.com/orgs/" + user + "/projects",
Valid: true,
Autocomplete: "p " + key,
Icon: projectIcon,
}
}
func autocompleteNewIssueItem(key, repo string) alfred.Item {
return alfred.Item{
UID: "ghn:" + repo,
Title: fmt.Sprintf("New issue in %s (%s)", repo, key),
Arg: "open https://github.com/" + repo + "/issues/new",
Valid: true,
Autocomplete: "n " + key,
Icon: newIssueIcon,
}
}
func autocompleteUserNewIssueItem(key, user string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("New issue in %s/... (%s)", user, key),
Autocomplete: "n " + key + "/",
Icon: newIssueIcon,
}
}
func openEndedOpenItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("Open %s...", input),
Autocomplete: " " + input,
Valid: false,
Icon: repoIcon,
}
}
func openEndedIssueItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List issues for %s...", input),
Autocomplete: "i " + input,
Valid: false,
Icon: issueListIcon,
}
}
func openEndedProjectItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("List projects for %s...", input),
Autocomplete: "p " + input,
Valid: false,
Icon: projectIcon,
}
}
func openEndedNewIssueItem(input string) alfred.Item {
return alfred.Item{
Title: fmt.Sprintf("New issue in %s...", input),
Autocomplete: "n " + input,
Valid: false,
Icon: newIssueIcon,
}
}
func autocompleteItems(cfg config.Config, input string, parsed parser.Result,
autocompleteRepoItem func(string, string) alfred.Item,
autocompleteUserItem func(string, string) alfred.Item,
openEndedItem func(string) alfred.Item) (items alfred.Items) {
if strings.Contains(input, " ") {
return
}
items = append(items,
autocompleteRepoItems(cfg, input, autocompleteRepoItem)...)
items = append(items,
autocompleteUserItems(cfg, input, parsed, true, autocompleteUserItem)...)
if len(input) == 0 || parsed.Repo() != input {
items = append(items, openEndedItem(input))
}
return
}
func autocompleteRepoItems(cfg config.Config, input string,
autocompleteRepoItem func(string, string) alfred.Item) (items alfred.Items) {
if len(input) > 0 {
for key, repo := range cfg.RepoMap {
if strings.HasPrefix(key, input) && len(key) > len(input) {
items = append(items, autocompleteRepoItem(key, repo))
}
}
}
return
}
func autocompleteUserItems(cfg config.Config, input string,
parsed parser.Result, includeMatchedUser bool,
autocompleteUserItem func(string, string) alfred.Item) (items alfred.Items) {
if len(input) > 0 {
for key, user := range cfg.UserMap {
prefixed := strings.HasPrefix(key, input) && len(key) > len(input)
matched := includeMatchedUser && key == parsed.UserMatch && !parsed.HasRepo()
if prefixed || matched {
items = append(items, autocompleteUserItem(key, user))
}
}
}
return
}
func findProjectDirs(root string) (dirs []string, err error) {
if entries, err := ioutil.ReadDir(root); err == nil {
for _, entry := range entries {
if entry.IsDir() {
dirs = append(dirs, entry.Name())
} else if entry.Mode()&os.ModeSymlink != 0 {
full := path.Join(root, entry.Name())
if link, err := os.Readlink(full); err != nil {
continue
} else {
if !path.IsAbs(link) {
if link, err = filepath.Abs(path.Join(root, link)); err != nil {
continue
}
}
if linkInfo, err := os.Stat(link); err != nil {
continue
} else {
if linkInfo.IsDir() {
dirs = append(dirs, entry.Name())
}
}
}
}
}
} else {
return dirs, err
}
return dirs, nil
}
func (c *completion) rpcRequest(path, query string, delay float64) rpc.Result {
if len(c.cfg.SocketPath) == 0 {
return rpc.Result{Complete: true} // RPC isn't enabled, don't worry about it
}
if c.env.Duration().Seconds() < delay {
c.retry = true
return rpc.Result{Complete: false}
}
res := c.rpcClient.Query(path, query)
if !res.Complete && len(res.Error) == 0 {
c.retry = true
}
return res
}
func ellipsis(prefix string, duration time.Duration) string {
return prefix + strings.Repeat(".", int((duration.Nanoseconds()/250000000)%4))
}
// retrieveRepo adds the repo description to the "open repo" item
// using an RPC call.
func (c *completion) retrieveRepo(item *alfred.Item) {
res := c.rpcRequest("/repo", c.parsed.Repo(), delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
}
if !res.Complete {
item.Subtitle = ellipsis("Retrieving description", c.env.Duration())
return
}
if len(res.Repos) == 0 {
item.Subtitle = "rpc error: missing repo in result"
return
}
item.Subtitle = res.Repos[0].Description
if item.Mods != nil {
item.Mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s: %s](https://github.com/%s)",
c.parsed.Repo(), res.Repos[0].Description, c.parsed.Repo()),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s",
c.parsed.Repo()),
Icon: markdownIcon,
}
}
}
// retrieveIssue adds the title and state to an "open issue" item
func (c *completion) retrieveIssue(item *alfred.Item) {
res := c.rpcRequest("/issue", c.parsed.Repo()+"#"+c.parsed.Issue(), delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving issue title", c.env.Duration())
return
} else if len(res.Issues) == 0 {
item.Subtitle = "rpc error: missing issue in result"
return
}
issue := res.Issues[0]
item.Subtitle = item.Title
item.Title = issue.Title
item.Icon = issueStateIcon(issue.Type, issue.State)
if item.Mods != nil {
item.Mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s: %s](https://github.com/%s/issues/%s)",
c.parsed.Repo(), c.parsed.Issue(), issue.Title, c.parsed.Repo(), c.parsed.Issue()),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s#%s",
c.parsed.Repo(), c.parsed.Issue()),
Icon: markdownIcon,
}
}
}
func (c *completion) retrieveRepoProject(item *alfred.Item) {
c.retrieveProject(item, c.parsed.Repo()+"/"+c.parsed.Issue())
}
func (c *completion) retrieveOrgProject(item *alfred.Item) {
c.retrieveProject(item, c.parsed.User+"/"+c.parsed.Issue())
}
func (c *completion) retrieveProject(item *alfred.Item, query string) {
res := c.rpcRequest("/project", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving project name", c.env.Duration())
return
} else if len(res.Projects) == 0 {
item.Subtitle = "rpc error: missing project in result"
return
}
project := res.Projects[0]
item.Subtitle = item.Title
item.Title = project.Name
item.Icon = projectStateIcon(project.State)
}
func (c *completion) retrieveOrgProjects(item *alfred.Item) alfred.Items {
return c.retrieveProjects(item, c.parsed.User)
}
func (c *completion) retrieveRepoProjects(item *alfred.Item) alfred.Items {
return c.retrieveProjects(item, c.parsed.Repo())
}
func (c *completion) retrieveProjects(item *alfred.Item, query string) (projects alfred.Items) {
res := c.rpcRequest("/projects", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return
} else if c.retry {
item.Subtitle = ellipsis("Retrieving projects", c.env.Duration())
return
} else if len(res.Projects) == 0 {
item.Subtitle = "No projects found"
return
}
projects = append(projects, projectItemsFromProjects(res.Projects, "in "+c.parsed.Repo())...)
return
}
func projectItemsFromProjects(projects []rpc.Project, desc string) alfred.Items {
var items alfred.Items
for _, project := range projects {
// no UID so alfred doesn't remember these
items = append(items, alfred.Item{
Title: project.Name,
Subtitle: fmt.Sprintf("Open project #%d %s", project.Number, desc),
Valid: true,
Arg: "open " + project.URL,
Icon: projectStateIcon(project.State),
})
}
return items
}
func (c *completion) retrieveIssueSearchItems(item *alfred.Item, repo, query string, includeRepo bool) alfred.Items {
if len(repo) > 0 {
query += " repo:" + repo + " "
}
return c.searchIssues(item, query, includeRepo, searchDelay)
}
func (c *completion) retrieveRecentIssues(item *alfred.Item) alfred.Items {
return c.searchIssues(item, "repo:"+c.parsed.Repo()+" sort:updated-desc", false, issueListDelay)
}
func (c *completion) searchIssues(item *alfred.Item, query string, includeRepo bool, delay float64) alfred.Items {
var items alfred.Items
if !item.Valid {
return items
}
res := c.rpcRequest("/issues", query, delay)
if len(res.Error) > 0 {
item.Subtitle = res.Error
return items
} else if c.retry {
item.Subtitle = ellipsis("Searching issues", c.env.Duration())
return items
} else if len(res.Issues) == 0 {
item.Subtitle = "No issues found"
return items
}
items = append(items, issueItemsFromIssues(res.Issues, includeRepo)...)
return items
}
func issueItemsFromIssues(issues []rpc.Issue, includeRepo bool) alfred.Items {
var items alfred.Items
for _, issue := range issues {
itemTitle := fmt.Sprintf("#%s %s", issue.Number, issue.Title)
if includeRepo {
itemTitle = issue.Repo + itemTitle
}
arg := ""
if issue.Type == "Issue" {
arg = "open https://github.com/" + issue.Repo + "/issues/" + issue.Number
} else {
arg = "open https://github.com/" + issue.Repo + "/pull/" + issue.Number
}
// no UID so alfred doesn't remember these
items = append(items, alfred.Item{
Title: itemTitle,
Subtitle: fmt.Sprintf("Open %s#%s", issue.Repo, issue.Number),
Valid: true,
Arg: arg,
Icon: issueStateIcon(issue.Type, issue.State),
Mods: issueMods(issue.Repo, issue.Number, issue.Title),
})
}
return items
}
func repoMods(repo string) *alfred.Mods {
return &alfred.Mods{
Cmd: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s](https://github.com/%s)", repo, repo),
Subtitle: fmt.Sprintf("Insert Markdown link to %s", repo),
Icon: markdownIcon,
},
}
}
func issueMods(repo, number, title string) *alfred.Mods {
mods := &alfred.Mods{
Cmd: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s](https://github.com/%s/issues/%s)", repo, number, repo, number),
Subtitle: fmt.Sprintf("Insert Markdown link to %s#%s", repo, number),
Icon: markdownIcon,
},
Alt: &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste %s#%s", repo, number),
Subtitle: fmt.Sprintf("Insert issue reference to %s#%s", repo, number),
Icon: issueIcon,
},
}
if len(title) > 0 {
mods.Ctrl = &alfred.ModItem{
Valid: true,
Arg: fmt.Sprintf("paste [%s#%s: %s](https://github.com/%s/issues/%s)", repo, number, title, repo, number),
Subtitle: fmt.Sprintf("Insert Markdown link with description to %s#%s", repo, number),
Icon: markdownIcon,
}
}
return mods
}
// ErrorItem returns an error message entry to display in alfred
func ErrorItem(title, subtitle string) alfred.Item {
return alfred.Item{
Title: title,
Subtitle: subtitle,
Icon: octicon("alert"),
Valid: false,
}
}
func (c *completion) finalizeResult() {
// automatically set "open <url>" urls to copy/large text
for i, item := range c.result.Items {
if item.Text == nil && strings.HasPrefix(item.Arg, "open ") {
url := item.Arg[5:]
c.result.Items[i].Text = &alfred.Text{Copy: url, LargeType: url}
}
}
// if any RPC-decorated items require a re-invocation of the script, save that
// information in the environment for the next time
if c.retry {
c.result.SetVariable("query", c.env.Query)
c.result.SetVariable("s", fmt.Sprintf("%d", c.env.Start.Unix()))
c.result.SetVariable("ns", fmt.Sprintf("%d", c.env.Start.Nanosecond()))
c.result.Rerun = rerunAfter
}
}
|
package httphelper
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
type ErrorCode string
const (
NotFoundError ErrorCode = "not_found"
ObjectNotFoundError = "object_not_found"
ObjectExistsError = "object_exists"
SyntaxError = "syntax_error"
ValidationError = "validation_error"
UnknownError = "unknown_error"
)
var errorResponseCodes = map[ErrorCode]int{
NotFoundError: 404,
ObjectNotFoundError: 404,
ObjectExistsError: 409,
SyntaxError: 400,
ValidationError: 400,
UnknownError: 500,
}
type JSONError struct {
Code ErrorCode `json:"code"`
Message string `json:"message"`
Detail json.RawMessage `json:"detail,omitempty"`
}
func (jsonError JSONError) Error() string {
return fmt.Sprintf("%s: %s", jsonError.Code, jsonError.Message)
}
func Error(w http.ResponseWriter, err error) {
var jsonError JSONError
switch err.(type) {
case *json.SyntaxError, *json.UnmarshalTypeError:
jsonError = JSONError{
Code: SyntaxError,
Message: "The provided JSON input is invalid",
}
case JSONError:
jsonError = err.(JSONError)
case *JSONError:
jsonError = *err.(*JSONError)
default:
log.Println(err)
jsonError = JSONError{
Code: UnknownError,
Message: "Something went wrong",
}
}
responseCode, ok := errorResponseCodes[jsonError.Code]
if !ok {
responseCode = 500
}
JSON(w, responseCode, jsonError)
}
func JSON(w http.ResponseWriter, status int, v interface{}) {
var result []byte
var err error
result, err = json.Marshal(v)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
w.Write(result)
}
httphelper: Add CORSAllowAllHandler
Signed-off-by: Jesse Stuart <a5c95b3d7cb4d0ae05a15c79c79ab458dc2c8f9e@jessestuart.ca>
package httphelper
import (
"encoding/json"
"fmt"
"log"
"net/http"
"time"
"github.com/flynn/flynn/pkg/cors"
)
type ErrorCode string
const (
NotFoundError ErrorCode = "not_found"
ObjectNotFoundError = "object_not_found"
ObjectExistsError = "object_exists"
SyntaxError = "syntax_error"
ValidationError = "validation_error"
UnknownError = "unknown_error"
)
var errorResponseCodes = map[ErrorCode]int{
NotFoundError: 404,
ObjectNotFoundError: 404,
ObjectExistsError: 409,
SyntaxError: 400,
ValidationError: 400,
UnknownError: 500,
}
type JSONError struct {
Code ErrorCode `json:"code"`
Message string `json:"message"`
Detail json.RawMessage `json:"detail,omitempty"`
}
var CORSAllowAllHandler = cors.Allow(&cors.Options{
AllowAllOrigins: true,
AllowMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD"},
AllowHeaders: []string{"Authorization", "Accept", "Content-Type", "If-Match", "If-None-Match"},
ExposeHeaders: []string{"ETag"},
AllowCredentials: true,
MaxAge: time.Hour,
})
func (jsonError JSONError) Error() string {
return fmt.Sprintf("%s: %s", jsonError.Code, jsonError.Message)
}
func Error(w http.ResponseWriter, err error) {
var jsonError JSONError
switch err.(type) {
case *json.SyntaxError, *json.UnmarshalTypeError:
jsonError = JSONError{
Code: SyntaxError,
Message: "The provided JSON input is invalid",
}
case JSONError:
jsonError = err.(JSONError)
case *JSONError:
jsonError = *err.(*JSONError)
default:
log.Println(err)
jsonError = JSONError{
Code: UnknownError,
Message: "Something went wrong",
}
}
responseCode, ok := errorResponseCodes[jsonError.Code]
if !ok {
responseCode = 500
}
JSON(w, responseCode, jsonError)
}
func JSON(w http.ResponseWriter, status int, v interface{}) {
var result []byte
var err error
result, err = json.Marshal(v)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
w.Write(result)
}
|
package http
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/golang/glog"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
extv1beta1listers "k8s.io/client-go/listers/extensions/v1beta1"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
"github.com/jetstack/cert-manager/pkg/issuer/acme/http/solver"
)
const (
// HTTP01Timeout is the max amount of time to wait for an HTTP01 challenge
// to succeed
HTTP01Timeout = time.Minute * 15
// acmeSolverListenPort is the port acmesolver should listen on
acmeSolverListenPort = 8089
// orderURLLabelKey is the key used for the order URL label on resources
// created by the HTTP01 solver
certNameLabelKey = "certmanager.k8s.io/certificate"
orderURLAnnotationKey = "certmanager.k8s.io/acme-order-url"
domainLabelKey = "certmanager.k8s.io/acme-http-domain"
)
var (
certificateGvk = v1alpha1.SchemeGroupVersion.WithKind("Certificate")
)
// Solver is an implementation of the acme http-01 challenge solver protocol
type Solver struct {
issuer v1alpha1.GenericIssuer
client kubernetes.Interface
solverImage string
podLister corev1listers.PodLister
serviceLister corev1listers.ServiceLister
ingressLister extv1beta1listers.IngressLister
testReachability reachabilityTest
requiredPasses int
}
type reachabilityTest func(ctx context.Context, domain, path, key string) (bool, error)
// NewSolver returns a new ACME HTTP01 solver for the given Issuer and client.
// TODO: refactor this to have fewer args
func NewSolver(issuer v1alpha1.GenericIssuer, client kubernetes.Interface, podLister corev1listers.PodLister, serviceLister corev1listers.ServiceLister, ingressLister extv1beta1listers.IngressLister, solverImage string) *Solver {
return &Solver{
issuer: issuer,
client: client,
podLister: podLister,
serviceLister: serviceLister,
ingressLister: ingressLister,
solverImage: solverImage,
testReachability: testReachability,
requiredPasses: 5,
}
}
// Present will realise the resources required to solve the given HTTP01
// challenge validation in the apiserver. If those resources already exist, it
// will return nil (i.e. this function is idempotent).
func (s *Solver) Present(ctx context.Context, crt *v1alpha1.Certificate, domain, token, key string) error {
_, podErr := s.ensurePod(crt, domain, token, key)
svc, svcErr := s.ensureService(crt, domain, token, key)
if svcErr != nil {
return utilerrors.NewAggregate([]error{podErr, svcErr})
}
_, ingressErr := s.ensureIngress(crt, svc.Name, domain, token)
return utilerrors.NewAggregate([]error{podErr, svcErr, ingressErr})
}
func (s *Solver) Check(domain, token, key string) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), HTTP01Timeout)
defer cancel()
for i := 0; i < s.requiredPasses; i++ {
ok, err := s.testReachability(ctx, domain, fmt.Sprintf("%s/%s", solver.HTTPChallengePath, token), key)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
time.Sleep(time.Second * 2)
}
return true, nil
}
// CleanUp will ensure the created service, ingress and pod are clean/deleted of any
// cert-manager created data.
func (s *Solver) CleanUp(ctx context.Context, crt *v1alpha1.Certificate, domain, token, key string) error {
var errs []error
errs = append(errs, s.cleanupPods(crt, domain))
errs = append(errs, s.cleanupServices(crt, domain))
errs = append(errs, s.cleanupIngresses(crt, domain, token))
return utilerrors.NewAggregate(errs)
}
// testReachability will attempt to connect to the 'domain' with 'path' and
// check if the returned body equals 'key'
func testReachability(ctx context.Context, domain, path, key string) (bool, error) {
url := &url.URL{}
url.Scheme = "http"
url.Host = domain
url.Path = path
response, err := http.Get(url.String())
if err != nil {
return false, err
}
if response.StatusCode != http.StatusOK {
// TODO: log this elsewhere
glog.Infof("wrong status code '%d'", response.StatusCode)
return false, nil
}
defer response.Body.Close()
presentedKey, err := ioutil.ReadAll(response.Body)
if err != nil {
return false, err
}
if string(presentedKey) != key {
glog.Infof("presented key (%s) did not match expected (%s)", presentedKey, key)
return false, nil
}
return false, nil
}
Fix testReachability
package http
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/golang/glog"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
extv1beta1listers "k8s.io/client-go/listers/extensions/v1beta1"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
"github.com/jetstack/cert-manager/pkg/issuer/acme/http/solver"
)
const (
// HTTP01Timeout is the max amount of time to wait for an HTTP01 challenge
// to succeed
HTTP01Timeout = time.Minute * 15
// acmeSolverListenPort is the port acmesolver should listen on
acmeSolverListenPort = 8089
// orderURLLabelKey is the key used for the order URL label on resources
// created by the HTTP01 solver
certNameLabelKey = "certmanager.k8s.io/certificate"
orderURLAnnotationKey = "certmanager.k8s.io/acme-order-url"
domainLabelKey = "certmanager.k8s.io/acme-http-domain"
)
var (
certificateGvk = v1alpha1.SchemeGroupVersion.WithKind("Certificate")
)
// Solver is an implementation of the acme http-01 challenge solver protocol
type Solver struct {
issuer v1alpha1.GenericIssuer
client kubernetes.Interface
solverImage string
podLister corev1listers.PodLister
serviceLister corev1listers.ServiceLister
ingressLister extv1beta1listers.IngressLister
testReachability reachabilityTest
requiredPasses int
}
type reachabilityTest func(ctx context.Context, domain, path, key string) (bool, error)
// NewSolver returns a new ACME HTTP01 solver for the given Issuer and client.
// TODO: refactor this to have fewer args
func NewSolver(issuer v1alpha1.GenericIssuer, client kubernetes.Interface, podLister corev1listers.PodLister, serviceLister corev1listers.ServiceLister, ingressLister extv1beta1listers.IngressLister, solverImage string) *Solver {
return &Solver{
issuer: issuer,
client: client,
podLister: podLister,
serviceLister: serviceLister,
ingressLister: ingressLister,
solverImage: solverImage,
testReachability: testReachability,
requiredPasses: 5,
}
}
// Present will realise the resources required to solve the given HTTP01
// challenge validation in the apiserver. If those resources already exist, it
// will return nil (i.e. this function is idempotent).
func (s *Solver) Present(ctx context.Context, crt *v1alpha1.Certificate, domain, token, key string) error {
_, podErr := s.ensurePod(crt, domain, token, key)
svc, svcErr := s.ensureService(crt, domain, token, key)
if svcErr != nil {
return utilerrors.NewAggregate([]error{podErr, svcErr})
}
_, ingressErr := s.ensureIngress(crt, svc.Name, domain, token)
return utilerrors.NewAggregate([]error{podErr, svcErr, ingressErr})
}
func (s *Solver) Check(domain, token, key string) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), HTTP01Timeout)
defer cancel()
for i := 0; i < s.requiredPasses; i++ {
ok, err := s.testReachability(ctx, domain, fmt.Sprintf("%s/%s", solver.HTTPChallengePath, token), key)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
time.Sleep(time.Second * 2)
}
return true, nil
}
// CleanUp will ensure the created service, ingress and pod are clean/deleted of any
// cert-manager created data.
func (s *Solver) CleanUp(ctx context.Context, crt *v1alpha1.Certificate, domain, token, key string) error {
var errs []error
errs = append(errs, s.cleanupPods(crt, domain))
errs = append(errs, s.cleanupServices(crt, domain))
errs = append(errs, s.cleanupIngresses(crt, domain, token))
return utilerrors.NewAggregate(errs)
}
// testReachability will attempt to connect to the 'domain' with 'path' and
// check if the returned body equals 'key'
func testReachability(ctx context.Context, domain, path, key string) (bool, error) {
url := &url.URL{}
url.Scheme = "http"
url.Host = domain
url.Path = path
response, err := http.Get(url.String())
if err != nil {
return false, err
}
if response.StatusCode != http.StatusOK {
// TODO: log this elsewhere
glog.Infof("wrong status code '%d'", response.StatusCode)
return false, nil
}
defer response.Body.Close()
presentedKey, err := ioutil.ReadAll(response.Body)
if err != nil {
return false, err
}
if string(presentedKey) != key {
glog.Infof("presented key (%s) did not match expected (%s)", presentedKey, key)
return false, nil
}
return true, nil
}
|
package matterclient
import (
"strings"
"github.com/mattermost/mattermost-server/v5/model"
)
func (m *Client) parseResponse(rmsg *model.WebSocketResponse) {
m.logger.Infof("getting response: %#v", rmsg)
}
func (m *Client) DeleteMessage(postID string) error {
_, resp := m.Client.DeletePost(postID)
if resp.Error != nil {
return resp.Error
}
return nil
}
func (m *Client) EditMessage(postID string, text string) (string, error) {
post := &model.Post{Message: text, Id: postID}
res, resp := m.Client.UpdatePost(postID, post)
if resp.Error != nil {
return "", resp.Error
}
return res.Id, nil
}
func (m *Client) GetFileLinks(filenames []string) []string {
uriScheme := "https://"
if m.NoTLS {
uriScheme = "http://"
}
var output []string
for _, f := range filenames {
res, resp := m.Client.GetFileLink(f)
if resp.Error != nil {
// public links is probably disabled, create the link ourselves
output = append(output, uriScheme+m.Credentials.Server+model.API_URL_SUFFIX_V4+"/files/"+f)
continue
}
output = append(output, res)
}
return output
}
func (m *Client) GetPosts(channelID string, limit int) *model.PostList {
for {
res, resp := m.Client.GetPostsForChannel(channelID, 0, limit, "")
if resp.Error == nil {
return res
}
if err := m.HandleRatelimit("GetPostsForChannel", resp); err != nil {
return nil
}
}
}
func (m *Client) GetPostsSince(channelID string, time int64) *model.PostList {
for {
res, resp := m.Client.GetPostsSince(channelID, time)
if resp.Error == nil {
return res
}
if err := m.HandleRatelimit("GetPostsSince", resp); err != nil {
return nil
}
}
}
func (m *Client) GetPublicLink(filename string) string {
res, resp := m.Client.GetFileLink(filename)
if resp.Error != nil {
return ""
}
return res
}
func (m *Client) GetPublicLinks(filenames []string) []string {
var output []string
for _, f := range filenames {
res, resp := m.Client.GetFileLink(f)
if resp.Error != nil {
continue
}
output = append(output, res)
}
return output
}
func (m *Client) PostMessage(channelID string, text string, rootID string) (string, error) {
post := &model.Post{
ChannelId: channelID,
Message: text,
RootId: rootID,
}
for {
res, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return res.Id, nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return "", err
}
}
}
func (m *Client) PostMessageWithFiles(channelID string, text string, rootID string, fileIds []string) (string, error) {
post := &model.Post{
ChannelId: channelID,
Message: text,
RootId: rootID,
FileIds: fileIds,
}
for {
res, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return res.Id, nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return "", err
}
}
}
func (m *Client) SearchPosts(query string) *model.PostList {
res, resp := m.Client.SearchPosts(m.Team.ID, query, false)
if resp.Error != nil {
return nil
}
return res
}
// SendDirectMessage sends a direct message to specified user
func (m *Client) SendDirectMessage(toUserID string, msg string, rootID string) error {
return m.SendDirectMessageProps(toUserID, msg, rootID, nil)
}
func (m *Client) SendDirectMessageProps(toUserID string, msg string, rootID string, props map[string]interface{}) error {
m.logger.Debugf("SendDirectMessage to %s, msg %s", toUserID, msg)
for {
// create DM channel (only happens on first message)
_, resp := m.Client.CreateDirectChannel(m.User.Id, toUserID)
if resp.Error == nil {
break
}
if err := m.HandleRatelimit("CreateDirectChannel", resp); err != nil {
m.logger.Debugf("SendDirectMessage to %#v failed: %s", toUserID, err)
return err
}
}
channelName := model.GetDMNameFromIds(toUserID, m.User.Id)
// update our channels
if err := m.UpdateChannels(); err != nil {
m.logger.Errorf("failed to update channels: %#v", err)
}
// build & send the message
msg = strings.ReplaceAll(msg, "\r", "")
post := &model.Post{
ChannelId: m.GetChannelID(channelName, m.Team.ID),
Message: msg,
RootId: rootID,
}
post.SetProps(props)
for {
_, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return err
}
}
}
func (m *Client) UploadFile(data []byte, channelID string, filename string) (string, error) {
f, resp := m.Client.UploadFile(data, channelID, filename)
if resp.Error != nil {
return "", resp.Error
}
return f.FileInfos[0].Id, nil
}
Move ping responses to debug loglevel
package matterclient
import (
"strings"
"github.com/mattermost/mattermost-server/v5/model"
)
func (m *Client) parseResponse(rmsg *model.WebSocketResponse) {
m.logger.Debugf("getting response: %#v", rmsg)
}
func (m *Client) DeleteMessage(postID string) error {
_, resp := m.Client.DeletePost(postID)
if resp.Error != nil {
return resp.Error
}
return nil
}
func (m *Client) EditMessage(postID string, text string) (string, error) {
post := &model.Post{Message: text, Id: postID}
res, resp := m.Client.UpdatePost(postID, post)
if resp.Error != nil {
return "", resp.Error
}
return res.Id, nil
}
func (m *Client) GetFileLinks(filenames []string) []string {
uriScheme := "https://"
if m.NoTLS {
uriScheme = "http://"
}
var output []string
for _, f := range filenames {
res, resp := m.Client.GetFileLink(f)
if resp.Error != nil {
// public links is probably disabled, create the link ourselves
output = append(output, uriScheme+m.Credentials.Server+model.API_URL_SUFFIX_V4+"/files/"+f)
continue
}
output = append(output, res)
}
return output
}
func (m *Client) GetPosts(channelID string, limit int) *model.PostList {
for {
res, resp := m.Client.GetPostsForChannel(channelID, 0, limit, "")
if resp.Error == nil {
return res
}
if err := m.HandleRatelimit("GetPostsForChannel", resp); err != nil {
return nil
}
}
}
func (m *Client) GetPostsSince(channelID string, time int64) *model.PostList {
for {
res, resp := m.Client.GetPostsSince(channelID, time)
if resp.Error == nil {
return res
}
if err := m.HandleRatelimit("GetPostsSince", resp); err != nil {
return nil
}
}
}
func (m *Client) GetPublicLink(filename string) string {
res, resp := m.Client.GetFileLink(filename)
if resp.Error != nil {
return ""
}
return res
}
func (m *Client) GetPublicLinks(filenames []string) []string {
var output []string
for _, f := range filenames {
res, resp := m.Client.GetFileLink(f)
if resp.Error != nil {
continue
}
output = append(output, res)
}
return output
}
func (m *Client) PostMessage(channelID string, text string, rootID string) (string, error) {
post := &model.Post{
ChannelId: channelID,
Message: text,
RootId: rootID,
}
for {
res, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return res.Id, nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return "", err
}
}
}
func (m *Client) PostMessageWithFiles(channelID string, text string, rootID string, fileIds []string) (string, error) {
post := &model.Post{
ChannelId: channelID,
Message: text,
RootId: rootID,
FileIds: fileIds,
}
for {
res, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return res.Id, nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return "", err
}
}
}
func (m *Client) SearchPosts(query string) *model.PostList {
res, resp := m.Client.SearchPosts(m.Team.ID, query, false)
if resp.Error != nil {
return nil
}
return res
}
// SendDirectMessage sends a direct message to specified user
func (m *Client) SendDirectMessage(toUserID string, msg string, rootID string) error {
return m.SendDirectMessageProps(toUserID, msg, rootID, nil)
}
func (m *Client) SendDirectMessageProps(toUserID string, msg string, rootID string, props map[string]interface{}) error {
m.logger.Debugf("SendDirectMessage to %s, msg %s", toUserID, msg)
for {
// create DM channel (only happens on first message)
_, resp := m.Client.CreateDirectChannel(m.User.Id, toUserID)
if resp.Error == nil {
break
}
if err := m.HandleRatelimit("CreateDirectChannel", resp); err != nil {
m.logger.Debugf("SendDirectMessage to %#v failed: %s", toUserID, err)
return err
}
}
channelName := model.GetDMNameFromIds(toUserID, m.User.Id)
// update our channels
if err := m.UpdateChannels(); err != nil {
m.logger.Errorf("failed to update channels: %#v", err)
}
// build & send the message
msg = strings.ReplaceAll(msg, "\r", "")
post := &model.Post{
ChannelId: m.GetChannelID(channelName, m.Team.ID),
Message: msg,
RootId: rootID,
}
post.SetProps(props)
for {
_, resp := m.Client.CreatePost(post)
if resp.Error == nil {
return nil
}
if err := m.HandleRatelimit("CreatePost", resp); err != nil {
return err
}
}
}
func (m *Client) UploadFile(data []byte, channelID string, filename string) (string, error) {
f, resp := m.Client.UploadFile(data, channelID, filename)
if resp.Error != nil {
return "", resp.Error
}
return f.FileInfos[0].Id, nil
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// IAM Documentation: /docs/iam_roles.md
// TODO: We have a couple different code paths until we do lifecycles, and
// TODO: when we have a cluster or refactor some s3 code. The only code that
// TODO: is not shared by the different path is the s3 / state store stuff.
// TODO: Initial work has been done to lock down IAM actions based on resources
// TODO: and condition keys, but this can be extended further (with thorough testing).
package iam
import (
"bytes"
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/util/stringorslice"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/util/pkg/vfs"
)
// PolicyDefaultVersion is the default version included in all policy documents
const PolicyDefaultVersion = "2012-10-17"
// Policy Struct is a collection of fields that form a valid AWS policy document
type Policy struct {
Version string
Statement []*Statement
}
// AsJSON converts the policy document to JSON format (parsable by AWS)
func (p *Policy) AsJSON() (string, error) {
j, err := json.MarshalIndent(p, "", " ")
if err != nil {
return "", fmt.Errorf("error marshaling policy to JSON: %v", err)
}
return string(j), nil
}
// StatementEffect is required and specifies what type of access the statement results in
type StatementEffect string
// StatementEffectAllow allows access for the given resources in the statement (based on conditions)
const StatementEffectAllow StatementEffect = "Allow"
// StatementEffectDeny allows access for the given resources in the statement (based on conditions)
const StatementEffectDeny StatementEffect = "Deny"
// Condition is a map of Conditions to be evaluated for a given IAM Statement
type Condition map[string]interface{}
// Statement is an AWS IAM Policy Statement Object:
// http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Statement
type Statement struct {
Effect StatementEffect
Action stringorslice.StringOrSlice
Resource stringorslice.StringOrSlice
Condition Condition `json:",omitempty"`
}
// Equal compares two IAM Statements and returns a bool
// TODO: Extend to support Condition Keys
func (l *Statement) Equal(r *Statement) bool {
if l.Effect != r.Effect {
return false
}
if !l.Action.Equal(r.Action) {
return false
}
if !l.Resource.Equal(r.Resource) {
return false
}
return true
}
// PolicyBuilder struct defines all valid fields to be used when building the
// AWS IAM policy document for a given instance group role.
type PolicyBuilder struct {
Cluster *kops.Cluster
HostedZoneID string
KMSKeys []string
Region string
ResourceARN *string
Role kops.InstanceGroupRole
}
// BuildAWSPolicy builds a set of IAM policy statements based on the
// instance group type and IAM Legacy flag within the Cluster Spec
func (b *PolicyBuilder) BuildAWSPolicy() (*Policy, error) {
var p *Policy
var err error
// Retrieve all the KMS Keys in use
for _, e := range b.Cluster.Spec.EtcdClusters {
for _, m := range e.Members {
if m.KmsKeyId != nil {
b.KMSKeys = append(b.KMSKeys, *m.KmsKeyId)
}
}
}
switch b.Role {
case kops.InstanceGroupRoleBastion:
p, err = b.BuildAWSPolicyBastion()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Bastion Instance Group: %v", err)
}
case kops.InstanceGroupRoleNode:
p, err = b.BuildAWSPolicyNode()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Node Instance Group: %v", err)
}
case kops.InstanceGroupRoleMaster:
p, err = b.BuildAWSPolicyMaster()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Master Instance Group: %v", err)
}
default:
return nil, fmt.Errorf("unrecognised instance group type: %s", b.Role)
}
return p, nil
}
// BuildAWSPolicyMaster generates a custom policy for a Kubernetes master.
func (b *PolicyBuilder) BuildAWSPolicyMaster() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
addMasterEC2Policies(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
addMasterASPolicies(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
addMasterELBPolicies(p, resource, b.Cluster.Spec.IAM.Legacy)
addCertIAMPolicies(p, resource)
var err error
if p, err = b.AddS3Permissions(p); err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err)
}
if b.KMSKeys != nil && len(b.KMSKeys) != 0 {
addKMSIAMPolicies(p, stringorslice.Slice(b.KMSKeys), b.Cluster.Spec.IAM.Legacy)
}
if b.HostedZoneID != "" {
b.addRoute53Permissions(p, b.HostedZoneID)
}
if b.Cluster.Spec.IAM.Legacy {
addRoute53ListHostedZonesPermission(p)
}
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName(), b.IAMPrefix())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.LyftVPC != nil {
addLyftVPCPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni {
addCiliumEniPermissions(p, resource, b.Cluster.Spec.IAM.Legacy)
}
return p, nil
}
// BuildAWSPolicyNode generates a custom policy for a Kubernetes node.
func (b *PolicyBuilder) BuildAWSPolicyNode() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
addNodeEC2Policies(p, resource)
var err error
if p, err = b.AddS3Permissions(p); err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err)
}
if b.Cluster.Spec.IAM.Legacy {
if b.HostedZoneID != "" {
b.addRoute53Permissions(p, b.HostedZoneID)
}
addRoute53ListHostedZonesPermission(p)
}
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName(), b.IAMPrefix())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.LyftVPC != nil {
addLyftVPCPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
}
return p, nil
}
// BuildAWSPolicyBastion generates a custom policy for a bastion host.
func (b *PolicyBuilder) BuildAWSPolicyBastion() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
// Bastion hosts currently don't require any specific permissions.
// A trivial permission is granted, because empty policies are not allowed.
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:DescribeRegions"}),
Resource: resource,
})
return p, nil
}
// IAMPrefix returns the prefix for AWS ARNs in the current region, for use with IAM
// it is arn:aws everywhere but in cn-north and us-gov-west-1
func (b *PolicyBuilder) IAMPrefix() string {
switch b.Region {
case "cn-north-1":
return "arn:aws-cn"
case "cn-northwest-1":
return "arn:aws-cn"
case "us-gov-east-1":
return "arn:aws-us-gov"
case "us-gov-west-1":
return "arn:aws-us-gov"
default:
return "arn:aws"
}
}
// AddS3Permissions updates an IAM Policy with statements granting tailored
// access to S3 assets, depending on the instance group role
func (b *PolicyBuilder) AddS3Permissions(p *Policy) (*Policy, error) {
// For S3 IAM permissions we grant permissions to subtrees, so find the parents;
// we don't need to grant mypath and mypath/child.
var roots []string
{
var locations []string
for _, p := range []string{
b.Cluster.Spec.KeyStore,
b.Cluster.Spec.SecretStore,
b.Cluster.Spec.ConfigStore,
} {
if p == "" {
continue
}
if !strings.HasSuffix(p, "/") {
p = p + "/"
}
locations = append(locations, p)
}
for i, l := range locations {
isTopLevel := true
for j := range locations {
if i == j {
continue
}
if strings.HasPrefix(l, locations[j]) {
klog.V(4).Infof("Ignoring location %q because found parent %q", l, locations[j])
isTopLevel = false
}
}
if isTopLevel {
klog.V(4).Infof("Found root location %q", l)
roots = append(roots, l)
}
}
}
sort.Strings(roots)
s3Buckets := sets.NewString()
for _, root := range roots {
vfsPath, err := vfs.Context.BuildVfsPath(root)
if err != nil {
return nil, fmt.Errorf("cannot parse VFS path %q: %v", root, err)
}
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
iamS3Path := s3Path.Bucket() + "/" + s3Path.Key()
iamS3Path = strings.TrimSuffix(iamS3Path, "/")
s3Buckets.Insert(s3Path.Bucket())
if b.Cluster.Spec.IAM.Legacy {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"s3:*"}),
Resource: stringorslice.Of(
strings.Join([]string{b.IAMPrefix(), ":s3:::", iamS3Path, "/*"}, ""),
),
})
} else {
resources, err := ReadableStatePaths(b.Cluster, b.Role)
if err != nil {
return nil, err
}
sort.Strings(resources)
// Add the prefix for IAM
for i, r := range resources {
resources[i] = b.IAMPrefix() + ":s3:::" + iamS3Path + r
}
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"s3:Get*"}),
Resource: stringorslice.Of(resources...),
})
}
} else if _, ok := vfsPath.(*vfs.MemFSPath); ok {
// Tests -ignore - nothing we can do in terms of IAM policy
klog.Warningf("ignoring memfs path %q for IAM policy builder", vfsPath)
} else if _, ok := vfsPath.(*vfs.VaultPath); ok {
// Vault access needs to come from somewhere else
klog.Warningf("ignoring valult path %q for IAM policy builder", vfsPath)
} else {
// We could implement this approach, but it seems better to
// get all clouds using cluster-readable storage
return nil, fmt.Errorf("path is not cluster readable: %v", root)
}
}
writeablePaths, err := WriteableVFSPaths(b.Cluster, b.Role)
if err != nil {
return nil, err
}
for _, vfsPath := range writeablePaths {
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
iamS3Path := s3Path.Bucket() + "/" + s3Path.Key()
iamS3Path = strings.TrimSuffix(iamS3Path, "/")
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject",
}),
Resource: stringorslice.Of(
strings.Join([]string{b.IAMPrefix(), ":s3:::", iamS3Path, "/*"}, ""),
),
})
s3Buckets.Insert(s3Path.Bucket())
} else {
klog.Warningf("unknown writeable path, can't apply IAM policy: %q", vfsPath)
}
}
// We need some permissions on the buckets themselves
for _, s3Bucket := range s3Buckets.List() {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions",
),
Resource: stringorslice.Slice([]string{
strings.Join([]string{b.IAMPrefix(), ":s3:::", s3Bucket}, ""),
}),
})
}
return p, nil
}
func WriteableVFSPaths(cluster *kops.Cluster, role kops.InstanceGroupRole) ([]vfs.Path, error) {
var paths []vfs.Path
// On the master, grant IAM permissions to the backup store, if it is configured
if role == kops.InstanceGroupRoleMaster {
backupStores := sets.NewString()
for _, c := range cluster.Spec.EtcdClusters {
if c.Backups == nil || c.Backups.BackupStore == "" || backupStores.Has(c.Backups.BackupStore) {
continue
}
backupStore := c.Backups.BackupStore
vfsPath, err := vfs.Context.BuildVfsPath(backupStore)
if err != nil {
return nil, fmt.Errorf("cannot parse VFS path %q: %v", backupStore, err)
}
paths = append(paths, vfsPath)
backupStores.Insert(backupStore)
}
}
return paths, nil
}
// ReadableStatePaths returns the file paths that should be readable in the cluster's state store "directory"
func ReadableStatePaths(cluster *kops.Cluster, role kops.InstanceGroupRole) ([]string, error) {
var paths []string
if role == kops.InstanceGroupRoleMaster {
paths = append(paths, "/*")
} else if role == kops.InstanceGroupRoleNode {
paths = append(paths,
"/addons/*",
"/cluster.spec",
"/config",
"/instancegroup/*",
"/pki/issued/*",
"/pki/ssh/*",
"/secrets/dockerconfig",
)
// @check if bootstrap tokens are enabled and if so enable access to client certificate
if model.UseKopsControllerForNodeBootstrap(cluster) {
// no additional permissions
} else {
paths = append(paths, "/pki/private/kube-proxy/*")
if useBootstrapTokens(cluster) {
paths = append(paths, "/pki/private/node-authorizer-client/*")
} else {
paths = append(paths, "/pki/private/kubelet/*")
}
}
networkingSpec := cluster.Spec.Networking
if networkingSpec != nil {
// @check if kuberoute is enabled and permit access to the private key
if networkingSpec.Kuberouter != nil && !model.UseKopsControllerForNodeBootstrap(cluster) {
paths = append(paths, "/pki/private/kube-router/*")
}
// @check if calico is enabled as the CNI provider and permit access to the client TLS certificate by default
if networkingSpec.Calico != nil {
paths = append(paths, "/pki/private/calico-client/*")
}
// @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default
// As long as the Cilium Etcd cluster exists, we should do this
if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) && !model.UseKopsControllerForNodeBootstrap(cluster) {
paths = append(paths, "/pki/private/etcd-clients-ca-cilium/*")
}
}
}
return paths, nil
}
// PolicyResource defines the PolicyBuilder and DNSZone to use when building the
// IAM policy document for a given instance group role
type PolicyResource struct {
Builder *PolicyBuilder
DNSZone *awstasks.DNSZone
}
var _ fi.Resource = &PolicyResource{}
var _ fi.HasDependencies = &PolicyResource{}
// GetDependencies adds the DNSZone task to the list of dependencies if set
func (b *PolicyResource) GetDependencies(tasks map[string]fi.Task) []fi.Task {
var deps []fi.Task
if b.DNSZone != nil {
deps = append(deps, b.DNSZone)
}
return deps
}
// Open produces the AWS IAM policy for the given role
func (b *PolicyResource) Open() (io.Reader, error) {
// Defensive copy before mutation
pb := *b.Builder
if b.DNSZone != nil {
hostedZoneID := fi.StringValue(b.DNSZone.ZoneID)
if hostedZoneID == "" {
// Dependency analysis failure?
return nil, fmt.Errorf("DNS ZoneID not set")
}
pb.HostedZoneID = hostedZoneID
}
policy, err := pb.BuildAWSPolicy()
if err != nil {
return nil, fmt.Errorf("error building IAM policy: %v", err)
}
j, err := policy.AsJSON()
if err != nil {
return nil, fmt.Errorf("error building IAM policy: %v", err)
}
return bytes.NewReader([]byte(j)), nil
}
// useBootstrapTokens check if we are using bootstrap tokens - @TODO, i don't like this we should probably pass in
// the kops model into the builder rather than duplicating the code. I'll leave for another PR
func useBootstrapTokens(cluster *kops.Cluster) bool {
if cluster.Spec.KubeAPIServer == nil {
return false
}
return fi.BoolValue(cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
}
func addECRPermissions(p *Policy) {
// TODO - I think we can just have GetAuthorizationToken here, as we are not
// TODO - making any API calls except for GetAuthorizationToken.
// We provide ECR access on the nodes (naturally), but we also provide access on the master.
// We shouldn't be running lots of pods on the master, but it is perfectly reasonable to run
// a private logging pod or similar.
// At this point we allow all regions with ECR, since ECR is region specific.
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage",
),
Resource: stringorslice.Slice([]string{"*"}),
})
}
func (b *PolicyBuilder) addRoute53Permissions(p *Policy, hostedZoneID string) {
// TODO: Route53 currently not supported in China, need to check and fail/return
// Remove /hostedzone/ prefix (if present)
hostedZoneID = strings.TrimPrefix(hostedZoneID, "/")
hostedZoneID = strings.TrimPrefix(hostedZoneID, "hostedzone/")
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of("route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"),
Resource: stringorslice.Slice([]string{b.IAMPrefix() + ":route53:::hostedzone/" + hostedZoneID}),
})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:GetChange"}),
Resource: stringorslice.Slice([]string{b.IAMPrefix() + ":route53:::change/*"}),
})
wildcard := stringorslice.Slice([]string{"*"})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:ListHostedZones"}),
Resource: wildcard,
})
}
func addKMSIAMPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"kms:ListGrants",
"kms:RevokeGrant",
),
Resource: resource,
})
}
// TODO could use "kms:ViaService" Condition Key here?
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"kms:CreateGrant",
"kms:Decrypt",
"kms:DescribeKey",
"kms:Encrypt",
"kms:GenerateDataKey*",
"kms:ReEncrypt*",
),
Resource: resource,
})
}
func addNodeEC2Policies(p *Policy, resource stringorslice.StringOrSlice) {
// Protokube makes a DescribeInstances call, DescribeRegions when finding S3 State Bucket
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:DescribeInstances", "ec2:DescribeRegions"}),
Resource: resource,
})
}
func addMasterEC2Policies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
// The legacy IAM policy grants full ec2 API access
if legacyIAM {
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:*"}),
Resource: resource,
},
)
} else {
// Describe* calls don't support any additional IAM restrictions
// The non-Describe* ec2 calls support different types of filtering:
// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html
// We try to lock down the permissions here in non-legacy mode,
// but there are still some improvements we can make:
// CreateVolume - supports filtering on tags, but we need to switch to pass tags to CreateVolume
// CreateTags - supports filtering on existing tags. Also supports filtering on VPC for some resources (e.g. security groups)
// Network Routing Permissions - May not be required with the CNI Networking provider
// Comments are which cloudprovider code file makes the call
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:DescribeAccountAttributes", // aws.go
"ec2:DescribeInstances", // aws.go
"ec2:DescribeInternetGateways", // aws.go
"ec2:DescribeRegions", // s3context.go
"ec2:DescribeRouteTables", // aws.go
"ec2:DescribeSecurityGroups", // aws.go
"ec2:DescribeSubnets", // aws.go
"ec2:DescribeVolumes", // aws.go
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:CreateSecurityGroup", // aws.go
"ec2:CreateTags", // aws.go, tag.go
"ec2:CreateVolume", // aws.go
"ec2:DescribeVolumesModifications", // aws.go
"ec2:ModifyInstanceAttribute", // aws.go
"ec2:ModifyVolume", // aws.go
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ec2:AttachVolume", // aws.go
"ec2:AuthorizeSecurityGroupIngress", // aws.go
"ec2:CreateRoute", // aws.go
"ec2:DeleteRoute", // aws.go
"ec2:DeleteSecurityGroup", // aws.go
"ec2:DeleteVolume", // aws.go
"ec2:DetachVolume", // aws.go
"ec2:RevokeSecurityGroupIngress", // aws.go
),
Resource: resource,
Condition: Condition{
"StringEquals": map[string]string{
"ec2:ResourceTag/KubernetesCluster": clusterName,
},
},
},
)
}
}
func addMasterELBPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"elasticloadbalancing:*"}),
Resource: resource,
})
} else {
// Comments are which cloudprovider code file makes the call
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"elasticloadbalancing:AddTags", // aws_loadbalancer.go
"elasticloadbalancing:AttachLoadBalancerToSubnets", // aws_loadbalancer.go
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancerPolicy", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancerListeners", // aws_loadbalancer.go
"elasticloadbalancing:ConfigureHealthCheck", // aws_loadbalancer.go
"elasticloadbalancing:DeleteLoadBalancer", // aws.go
"elasticloadbalancing:DeleteLoadBalancerListeners", // aws_loadbalancer.go
"elasticloadbalancing:DescribeLoadBalancers", // aws.go
"elasticloadbalancing:DescribeLoadBalancerAttributes", // aws.go
"elasticloadbalancing:DetachLoadBalancerFromSubnets", // aws_loadbalancer.go
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:ModifyLoadBalancerAttributes", // aws_loadbalancer.go
"elasticloadbalancing:RegisterInstancesWithLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", // aws_loadbalancer.go
),
Resource: resource,
})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ec2:DescribeVpcs", // aws_loadbalancer.go
"elasticloadbalancing:AddTags", // aws_loadbalancer.go
"elasticloadbalancing:CreateListener", // aws_loadbalancer.go
"elasticloadbalancing:CreateTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:DeleteListener", // aws_loadbalancer.go
"elasticloadbalancing:DeleteTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:DeregisterTargets", // aws_loadbalancer.go
"elasticloadbalancing:DescribeListeners", // aws_loadbalancer.go
"elasticloadbalancing:DescribeLoadBalancerPolicies", // aws_loadbalancer.go
"elasticloadbalancing:DescribeTargetGroups", // aws_loadbalancer.go
"elasticloadbalancing:DescribeTargetHealth", // aws_loadbalancer.go
"elasticloadbalancing:ModifyListener", // aws_loadbalancer.go
"elasticloadbalancing:ModifyTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:RegisterTargets", // aws_loadbalancer.go
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener", // aws_loadbalancer.go
),
Resource: resource,
})
}
}
func addMasterASPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions",
}),
Resource: resource,
})
} else {
// Comments are which cloudprovider / autoscaler code file makes the call
// TODO: Make optional only if using autoscalers
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"autoscaling:DescribeAutoScalingGroups", // aws_instancegroups.go
"autoscaling:DescribeLaunchConfigurations", // aws.go
"autoscaling:DescribeTags", // auto_scaling.go
"ec2:DescribeLaunchTemplateVersions",
),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"autoscaling:SetDesiredCapacity", // aws_manager.go
"autoscaling:TerminateInstanceInAutoScalingGroup", // aws_manager.go
"autoscaling:UpdateAutoScalingGroup", // aws_instancegroups.go
),
Resource: resource,
Condition: Condition{
"StringEquals": map[string]string{
"autoscaling:ResourceTag/KubernetesCluster": clusterName,
},
},
},
)
}
}
func addCertIAMPolicies(p *Policy, resource stringorslice.StringOrSlice) {
// TODO: Make optional only if using IAM SSL Certs on ELBs
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"iam:ListServerCertificates",
"iam:GetServerCertificate",
),
Resource: resource,
})
}
func addRoute53ListHostedZonesPermission(p *Policy) {
wildcard := stringorslice.Slice([]string{"*"})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:ListHostedZones"}),
Resource: wildcard,
})
}
func addLyftVPCPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:AssignPrivateIpAddresses",
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstanceTypes",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:UnassignPrivateIpAddresses",
}),
Resource: resource,
},
)
}
func addCiliumEniPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:DescribeSubnets",
"ec2:AttachNetworkInterface",
"ec2:AssignPrivateIpAddresses",
"ec2:UnassignPrivateIpAddresses",
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeSecurityGroups",
"ec2:DetachNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeVpcs",
}),
Resource: resource,
},
)
}
func addAmazonVPCCNIPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string, iamPrefix string) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:AssignPrivateIpAddresses",
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeTags",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:UnassignPrivateIpAddresses",
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:CreateTags",
}),
Resource: stringorslice.Slice([]string{
strings.Join([]string{iamPrefix, ":ec2:*:*:network-interface/*"}, ""),
})},
)
}
func createResource(b *PolicyBuilder) stringorslice.StringOrSlice {
var resource stringorslice.StringOrSlice
if b.ResourceARN != nil {
resource = stringorslice.Slice([]string{*b.ResourceARN})
} else {
resource = stringorslice.Slice([]string{"*"})
}
return resource
}
Don't give access to calico-client key when not needed
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// IAM Documentation: /docs/iam_roles.md
// TODO: We have a couple different code paths until we do lifecycles, and
// TODO: when we have a cluster or refactor some s3 code. The only code that
// TODO: is not shared by the different path is the s3 / state store stuff.
// TODO: Initial work has been done to lock down IAM actions based on resources
// TODO: and condition keys, but this can be extended further (with thorough testing).
package iam
import (
"bytes"
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/util/stringorslice"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/util/pkg/vfs"
)
// PolicyDefaultVersion is the default version included in all policy documents
const PolicyDefaultVersion = "2012-10-17"
// Policy Struct is a collection of fields that form a valid AWS policy document
type Policy struct {
Version string
Statement []*Statement
}
// AsJSON converts the policy document to JSON format (parsable by AWS)
func (p *Policy) AsJSON() (string, error) {
j, err := json.MarshalIndent(p, "", " ")
if err != nil {
return "", fmt.Errorf("error marshaling policy to JSON: %v", err)
}
return string(j), nil
}
// StatementEffect is required and specifies what type of access the statement results in
type StatementEffect string
// StatementEffectAllow allows access for the given resources in the statement (based on conditions)
const StatementEffectAllow StatementEffect = "Allow"
// StatementEffectDeny allows access for the given resources in the statement (based on conditions)
const StatementEffectDeny StatementEffect = "Deny"
// Condition is a map of Conditions to be evaluated for a given IAM Statement
type Condition map[string]interface{}
// Statement is an AWS IAM Policy Statement Object:
// http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Statement
type Statement struct {
Effect StatementEffect
Action stringorslice.StringOrSlice
Resource stringorslice.StringOrSlice
Condition Condition `json:",omitempty"`
}
// Equal compares two IAM Statements and returns a bool
// TODO: Extend to support Condition Keys
func (l *Statement) Equal(r *Statement) bool {
if l.Effect != r.Effect {
return false
}
if !l.Action.Equal(r.Action) {
return false
}
if !l.Resource.Equal(r.Resource) {
return false
}
return true
}
// PolicyBuilder struct defines all valid fields to be used when building the
// AWS IAM policy document for a given instance group role.
type PolicyBuilder struct {
Cluster *kops.Cluster
HostedZoneID string
KMSKeys []string
Region string
ResourceARN *string
Role kops.InstanceGroupRole
}
// BuildAWSPolicy builds a set of IAM policy statements based on the
// instance group type and IAM Legacy flag within the Cluster Spec
func (b *PolicyBuilder) BuildAWSPolicy() (*Policy, error) {
var p *Policy
var err error
// Retrieve all the KMS Keys in use
for _, e := range b.Cluster.Spec.EtcdClusters {
for _, m := range e.Members {
if m.KmsKeyId != nil {
b.KMSKeys = append(b.KMSKeys, *m.KmsKeyId)
}
}
}
switch b.Role {
case kops.InstanceGroupRoleBastion:
p, err = b.BuildAWSPolicyBastion()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Bastion Instance Group: %v", err)
}
case kops.InstanceGroupRoleNode:
p, err = b.BuildAWSPolicyNode()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Node Instance Group: %v", err)
}
case kops.InstanceGroupRoleMaster:
p, err = b.BuildAWSPolicyMaster()
if err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM Policy for Master Instance Group: %v", err)
}
default:
return nil, fmt.Errorf("unrecognised instance group type: %s", b.Role)
}
return p, nil
}
// BuildAWSPolicyMaster generates a custom policy for a Kubernetes master.
func (b *PolicyBuilder) BuildAWSPolicyMaster() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
addMasterEC2Policies(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
addMasterASPolicies(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
addMasterELBPolicies(p, resource, b.Cluster.Spec.IAM.Legacy)
addCertIAMPolicies(p, resource)
var err error
if p, err = b.AddS3Permissions(p); err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err)
}
if b.KMSKeys != nil && len(b.KMSKeys) != 0 {
addKMSIAMPolicies(p, stringorslice.Slice(b.KMSKeys), b.Cluster.Spec.IAM.Legacy)
}
if b.HostedZoneID != "" {
b.addRoute53Permissions(p, b.HostedZoneID)
}
if b.Cluster.Spec.IAM.Legacy {
addRoute53ListHostedZonesPermission(p)
}
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName(), b.IAMPrefix())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.LyftVPC != nil {
addLyftVPCPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni {
addCiliumEniPermissions(p, resource, b.Cluster.Spec.IAM.Legacy)
}
return p, nil
}
// BuildAWSPolicyNode generates a custom policy for a Kubernetes node.
func (b *PolicyBuilder) BuildAWSPolicyNode() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
addNodeEC2Policies(p, resource)
var err error
if p, err = b.AddS3Permissions(p); err != nil {
return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err)
}
if b.Cluster.Spec.IAM.Legacy {
if b.HostedZoneID != "" {
b.addRoute53Permissions(p, b.HostedZoneID)
}
addRoute53ListHostedZonesPermission(p)
}
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName(), b.IAMPrefix())
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.LyftVPC != nil {
addLyftVPCPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName())
}
return p, nil
}
// BuildAWSPolicyBastion generates a custom policy for a bastion host.
func (b *PolicyBuilder) BuildAWSPolicyBastion() (*Policy, error) {
resource := createResource(b)
p := &Policy{
Version: PolicyDefaultVersion,
}
// Bastion hosts currently don't require any specific permissions.
// A trivial permission is granted, because empty policies are not allowed.
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:DescribeRegions"}),
Resource: resource,
})
return p, nil
}
// IAMPrefix returns the prefix for AWS ARNs in the current region, for use with IAM
// it is arn:aws everywhere but in cn-north and us-gov-west-1
func (b *PolicyBuilder) IAMPrefix() string {
switch b.Region {
case "cn-north-1":
return "arn:aws-cn"
case "cn-northwest-1":
return "arn:aws-cn"
case "us-gov-east-1":
return "arn:aws-us-gov"
case "us-gov-west-1":
return "arn:aws-us-gov"
default:
return "arn:aws"
}
}
// AddS3Permissions updates an IAM Policy with statements granting tailored
// access to S3 assets, depending on the instance group role
func (b *PolicyBuilder) AddS3Permissions(p *Policy) (*Policy, error) {
// For S3 IAM permissions we grant permissions to subtrees, so find the parents;
// we don't need to grant mypath and mypath/child.
var roots []string
{
var locations []string
for _, p := range []string{
b.Cluster.Spec.KeyStore,
b.Cluster.Spec.SecretStore,
b.Cluster.Spec.ConfigStore,
} {
if p == "" {
continue
}
if !strings.HasSuffix(p, "/") {
p = p + "/"
}
locations = append(locations, p)
}
for i, l := range locations {
isTopLevel := true
for j := range locations {
if i == j {
continue
}
if strings.HasPrefix(l, locations[j]) {
klog.V(4).Infof("Ignoring location %q because found parent %q", l, locations[j])
isTopLevel = false
}
}
if isTopLevel {
klog.V(4).Infof("Found root location %q", l)
roots = append(roots, l)
}
}
}
sort.Strings(roots)
s3Buckets := sets.NewString()
for _, root := range roots {
vfsPath, err := vfs.Context.BuildVfsPath(root)
if err != nil {
return nil, fmt.Errorf("cannot parse VFS path %q: %v", root, err)
}
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
iamS3Path := s3Path.Bucket() + "/" + s3Path.Key()
iamS3Path = strings.TrimSuffix(iamS3Path, "/")
s3Buckets.Insert(s3Path.Bucket())
if b.Cluster.Spec.IAM.Legacy {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"s3:*"}),
Resource: stringorslice.Of(
strings.Join([]string{b.IAMPrefix(), ":s3:::", iamS3Path, "/*"}, ""),
),
})
} else {
resources, err := ReadableStatePaths(b.Cluster, b.Role)
if err != nil {
return nil, err
}
sort.Strings(resources)
// Add the prefix for IAM
for i, r := range resources {
resources[i] = b.IAMPrefix() + ":s3:::" + iamS3Path + r
}
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"s3:Get*"}),
Resource: stringorslice.Of(resources...),
})
}
} else if _, ok := vfsPath.(*vfs.MemFSPath); ok {
// Tests -ignore - nothing we can do in terms of IAM policy
klog.Warningf("ignoring memfs path %q for IAM policy builder", vfsPath)
} else if _, ok := vfsPath.(*vfs.VaultPath); ok {
// Vault access needs to come from somewhere else
klog.Warningf("ignoring valult path %q for IAM policy builder", vfsPath)
} else {
// We could implement this approach, but it seems better to
// get all clouds using cluster-readable storage
return nil, fmt.Errorf("path is not cluster readable: %v", root)
}
}
writeablePaths, err := WriteableVFSPaths(b.Cluster, b.Role)
if err != nil {
return nil, err
}
for _, vfsPath := range writeablePaths {
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
iamS3Path := s3Path.Bucket() + "/" + s3Path.Key()
iamS3Path = strings.TrimSuffix(iamS3Path, "/")
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject",
}),
Resource: stringorslice.Of(
strings.Join([]string{b.IAMPrefix(), ":s3:::", iamS3Path, "/*"}, ""),
),
})
s3Buckets.Insert(s3Path.Bucket())
} else {
klog.Warningf("unknown writeable path, can't apply IAM policy: %q", vfsPath)
}
}
// We need some permissions on the buckets themselves
for _, s3Bucket := range s3Buckets.List() {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions",
),
Resource: stringorslice.Slice([]string{
strings.Join([]string{b.IAMPrefix(), ":s3:::", s3Bucket}, ""),
}),
})
}
return p, nil
}
func WriteableVFSPaths(cluster *kops.Cluster, role kops.InstanceGroupRole) ([]vfs.Path, error) {
var paths []vfs.Path
// On the master, grant IAM permissions to the backup store, if it is configured
if role == kops.InstanceGroupRoleMaster {
backupStores := sets.NewString()
for _, c := range cluster.Spec.EtcdClusters {
if c.Backups == nil || c.Backups.BackupStore == "" || backupStores.Has(c.Backups.BackupStore) {
continue
}
backupStore := c.Backups.BackupStore
vfsPath, err := vfs.Context.BuildVfsPath(backupStore)
if err != nil {
return nil, fmt.Errorf("cannot parse VFS path %q: %v", backupStore, err)
}
paths = append(paths, vfsPath)
backupStores.Insert(backupStore)
}
}
return paths, nil
}
// ReadableStatePaths returns the file paths that should be readable in the cluster's state store "directory"
func ReadableStatePaths(cluster *kops.Cluster, role kops.InstanceGroupRole) ([]string, error) {
var paths []string
if role == kops.InstanceGroupRoleMaster {
paths = append(paths, "/*")
} else if role == kops.InstanceGroupRoleNode {
paths = append(paths,
"/addons/*",
"/cluster.spec",
"/config",
"/instancegroup/*",
"/pki/issued/*",
"/pki/ssh/*",
"/secrets/dockerconfig",
)
// Give access to keys for client certificates as needed.
if !model.UseKopsControllerForNodeBootstrap(cluster) {
paths = append(paths, "/pki/private/kube-proxy/*")
if useBootstrapTokens(cluster) {
paths = append(paths, "/pki/private/node-authorizer-client/*")
} else {
paths = append(paths, "/pki/private/kubelet/*")
}
networkingSpec := cluster.Spec.Networking
if networkingSpec != nil {
// @check if kuberoute is enabled and permit access to the private key
if networkingSpec.Kuberouter != nil {
paths = append(paths, "/pki/private/kube-router/*")
}
// @check if calico is enabled as the CNI provider and permit access to the client TLS certificate by default
if networkingSpec.Calico != nil {
calicoClientCert := false
for _, x := range cluster.Spec.EtcdClusters {
if x.Provider == kops.EtcdProviderTypeManager {
calicoClientCert = false
break
}
if x.EnableEtcdTLS {
calicoClientCert = true
}
}
if calicoClientCert {
paths = append(paths, "/pki/private/calico-client/*")
}
}
// @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default
// As long as the Cilium Etcd cluster exists, we should do this
if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) {
paths = append(paths, "/pki/private/etcd-clients-ca-cilium/*")
}
}
}
}
return paths, nil
}
// PolicyResource defines the PolicyBuilder and DNSZone to use when building the
// IAM policy document for a given instance group role
type PolicyResource struct {
Builder *PolicyBuilder
DNSZone *awstasks.DNSZone
}
var _ fi.Resource = &PolicyResource{}
var _ fi.HasDependencies = &PolicyResource{}
// GetDependencies adds the DNSZone task to the list of dependencies if set
func (b *PolicyResource) GetDependencies(tasks map[string]fi.Task) []fi.Task {
var deps []fi.Task
if b.DNSZone != nil {
deps = append(deps, b.DNSZone)
}
return deps
}
// Open produces the AWS IAM policy for the given role
func (b *PolicyResource) Open() (io.Reader, error) {
// Defensive copy before mutation
pb := *b.Builder
if b.DNSZone != nil {
hostedZoneID := fi.StringValue(b.DNSZone.ZoneID)
if hostedZoneID == "" {
// Dependency analysis failure?
return nil, fmt.Errorf("DNS ZoneID not set")
}
pb.HostedZoneID = hostedZoneID
}
policy, err := pb.BuildAWSPolicy()
if err != nil {
return nil, fmt.Errorf("error building IAM policy: %v", err)
}
j, err := policy.AsJSON()
if err != nil {
return nil, fmt.Errorf("error building IAM policy: %v", err)
}
return bytes.NewReader([]byte(j)), nil
}
// useBootstrapTokens check if we are using bootstrap tokens - @TODO, i don't like this we should probably pass in
// the kops model into the builder rather than duplicating the code. I'll leave for another PR
func useBootstrapTokens(cluster *kops.Cluster) bool {
if cluster.Spec.KubeAPIServer == nil {
return false
}
return fi.BoolValue(cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
}
func addECRPermissions(p *Policy) {
// TODO - I think we can just have GetAuthorizationToken here, as we are not
// TODO - making any API calls except for GetAuthorizationToken.
// We provide ECR access on the nodes (naturally), but we also provide access on the master.
// We shouldn't be running lots of pods on the master, but it is perfectly reasonable to run
// a private logging pod or similar.
// At this point we allow all regions with ECR, since ECR is region specific.
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage",
),
Resource: stringorslice.Slice([]string{"*"}),
})
}
func (b *PolicyBuilder) addRoute53Permissions(p *Policy, hostedZoneID string) {
// TODO: Route53 currently not supported in China, need to check and fail/return
// Remove /hostedzone/ prefix (if present)
hostedZoneID = strings.TrimPrefix(hostedZoneID, "/")
hostedZoneID = strings.TrimPrefix(hostedZoneID, "hostedzone/")
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of("route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"),
Resource: stringorslice.Slice([]string{b.IAMPrefix() + ":route53:::hostedzone/" + hostedZoneID}),
})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:GetChange"}),
Resource: stringorslice.Slice([]string{b.IAMPrefix() + ":route53:::change/*"}),
})
wildcard := stringorslice.Slice([]string{"*"})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:ListHostedZones"}),
Resource: wildcard,
})
}
func addKMSIAMPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"kms:ListGrants",
"kms:RevokeGrant",
),
Resource: resource,
})
}
// TODO could use "kms:ViaService" Condition Key here?
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"kms:CreateGrant",
"kms:Decrypt",
"kms:DescribeKey",
"kms:Encrypt",
"kms:GenerateDataKey*",
"kms:ReEncrypt*",
),
Resource: resource,
})
}
func addNodeEC2Policies(p *Policy, resource stringorslice.StringOrSlice) {
// Protokube makes a DescribeInstances call, DescribeRegions when finding S3 State Bucket
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:DescribeInstances", "ec2:DescribeRegions"}),
Resource: resource,
})
}
func addMasterEC2Policies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
// The legacy IAM policy grants full ec2 API access
if legacyIAM {
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"ec2:*"}),
Resource: resource,
},
)
} else {
// Describe* calls don't support any additional IAM restrictions
// The non-Describe* ec2 calls support different types of filtering:
// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html
// We try to lock down the permissions here in non-legacy mode,
// but there are still some improvements we can make:
// CreateVolume - supports filtering on tags, but we need to switch to pass tags to CreateVolume
// CreateTags - supports filtering on existing tags. Also supports filtering on VPC for some resources (e.g. security groups)
// Network Routing Permissions - May not be required with the CNI Networking provider
// Comments are which cloudprovider code file makes the call
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:DescribeAccountAttributes", // aws.go
"ec2:DescribeInstances", // aws.go
"ec2:DescribeInternetGateways", // aws.go
"ec2:DescribeRegions", // s3context.go
"ec2:DescribeRouteTables", // aws.go
"ec2:DescribeSecurityGroups", // aws.go
"ec2:DescribeSubnets", // aws.go
"ec2:DescribeVolumes", // aws.go
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:CreateSecurityGroup", // aws.go
"ec2:CreateTags", // aws.go, tag.go
"ec2:CreateVolume", // aws.go
"ec2:DescribeVolumesModifications", // aws.go
"ec2:ModifyInstanceAttribute", // aws.go
"ec2:ModifyVolume", // aws.go
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ec2:AttachVolume", // aws.go
"ec2:AuthorizeSecurityGroupIngress", // aws.go
"ec2:CreateRoute", // aws.go
"ec2:DeleteRoute", // aws.go
"ec2:DeleteSecurityGroup", // aws.go
"ec2:DeleteVolume", // aws.go
"ec2:DetachVolume", // aws.go
"ec2:RevokeSecurityGroupIngress", // aws.go
),
Resource: resource,
Condition: Condition{
"StringEquals": map[string]string{
"ec2:ResourceTag/KubernetesCluster": clusterName,
},
},
},
)
}
}
func addMasterELBPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"elasticloadbalancing:*"}),
Resource: resource,
})
} else {
// Comments are which cloudprovider code file makes the call
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"elasticloadbalancing:AddTags", // aws_loadbalancer.go
"elasticloadbalancing:AttachLoadBalancerToSubnets", // aws_loadbalancer.go
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancerPolicy", // aws_loadbalancer.go
"elasticloadbalancing:CreateLoadBalancerListeners", // aws_loadbalancer.go
"elasticloadbalancing:ConfigureHealthCheck", // aws_loadbalancer.go
"elasticloadbalancing:DeleteLoadBalancer", // aws.go
"elasticloadbalancing:DeleteLoadBalancerListeners", // aws_loadbalancer.go
"elasticloadbalancing:DescribeLoadBalancers", // aws.go
"elasticloadbalancing:DescribeLoadBalancerAttributes", // aws.go
"elasticloadbalancing:DetachLoadBalancerFromSubnets", // aws_loadbalancer.go
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:ModifyLoadBalancerAttributes", // aws_loadbalancer.go
"elasticloadbalancing:RegisterInstancesWithLoadBalancer", // aws_loadbalancer.go
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", // aws_loadbalancer.go
),
Resource: resource,
})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"ec2:DescribeVpcs", // aws_loadbalancer.go
"elasticloadbalancing:AddTags", // aws_loadbalancer.go
"elasticloadbalancing:CreateListener", // aws_loadbalancer.go
"elasticloadbalancing:CreateTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:DeleteListener", // aws_loadbalancer.go
"elasticloadbalancing:DeleteTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:DeregisterTargets", // aws_loadbalancer.go
"elasticloadbalancing:DescribeListeners", // aws_loadbalancer.go
"elasticloadbalancing:DescribeLoadBalancerPolicies", // aws_loadbalancer.go
"elasticloadbalancing:DescribeTargetGroups", // aws_loadbalancer.go
"elasticloadbalancing:DescribeTargetHealth", // aws_loadbalancer.go
"elasticloadbalancing:ModifyListener", // aws_loadbalancer.go
"elasticloadbalancing:ModifyTargetGroup", // aws_loadbalancer.go
"elasticloadbalancing:RegisterTargets", // aws_loadbalancer.go
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener", // aws_loadbalancer.go
),
Resource: resource,
})
}
}
func addMasterASPolicies(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
if legacyIAM {
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions",
}),
Resource: resource,
})
} else {
// Comments are which cloudprovider / autoscaler code file makes the call
// TODO: Make optional only if using autoscalers
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"autoscaling:DescribeAutoScalingGroups", // aws_instancegroups.go
"autoscaling:DescribeLaunchConfigurations", // aws.go
"autoscaling:DescribeTags", // auto_scaling.go
"ec2:DescribeLaunchTemplateVersions",
),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"autoscaling:SetDesiredCapacity", // aws_manager.go
"autoscaling:TerminateInstanceInAutoScalingGroup", // aws_manager.go
"autoscaling:UpdateAutoScalingGroup", // aws_instancegroups.go
),
Resource: resource,
Condition: Condition{
"StringEquals": map[string]string{
"autoscaling:ResourceTag/KubernetesCluster": clusterName,
},
},
},
)
}
}
func addCertIAMPolicies(p *Policy, resource stringorslice.StringOrSlice) {
// TODO: Make optional only if using IAM SSL Certs on ELBs
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Of(
"iam:ListServerCertificates",
"iam:GetServerCertificate",
),
Resource: resource,
})
}
func addRoute53ListHostedZonesPermission(p *Policy) {
wildcard := stringorslice.Slice([]string{"*"})
p.Statement = append(p.Statement, &Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{"route53:ListHostedZones"}),
Resource: wildcard,
})
}
func addLyftVPCPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:AssignPrivateIpAddresses",
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstanceTypes",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:UnassignPrivateIpAddresses",
}),
Resource: resource,
},
)
}
func addCiliumEniPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:DescribeSubnets",
"ec2:AttachNetworkInterface",
"ec2:AssignPrivateIpAddresses",
"ec2:UnassignPrivateIpAddresses",
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeSecurityGroups",
"ec2:DetachNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeVpcs",
}),
Resource: resource,
},
)
}
func addAmazonVPCCNIPermissions(p *Policy, resource stringorslice.StringOrSlice, legacyIAM bool, clusterName string, iamPrefix string) {
if legacyIAM {
// Legacy IAM provides ec2:*, so no additional permissions required
return
}
p.Statement = append(p.Statement,
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:AssignPrivateIpAddresses",
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeTags",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:UnassignPrivateIpAddresses",
}),
Resource: resource,
},
&Statement{
Effect: StatementEffectAllow,
Action: stringorslice.Slice([]string{
"ec2:CreateTags",
}),
Resource: stringorslice.Slice([]string{
strings.Join([]string{iamPrefix, ":ec2:*:*:network-interface/*"}, ""),
})},
)
}
func createResource(b *PolicyBuilder) stringorslice.StringOrSlice {
var resource stringorslice.StringOrSlice
if b.ResourceARN != nil {
resource = stringorslice.Slice([]string{*b.ResourceARN})
} else {
resource = stringorslice.Slice([]string{"*"})
}
return resource
}
|
package proxy
import (
"errors"
"sort"
"time"
"github.com/fagongzi/gateway/pkg/pb/metapb"
"github.com/fagongzi/gateway/pkg/util"
"github.com/fagongzi/log"
)
var (
errServerExists = errors.New("Server already exist")
errClusterExists = errors.New("Cluster already exist")
errBindExists = errors.New("Bind already exist")
errAPIExists = errors.New("API already exist")
errProxyExists = errors.New("Proxy already exist")
errRoutingExists = errors.New("Routing already exist")
errServerNotFound = errors.New("Server not found")
errClusterNotFound = errors.New("Cluster not found")
errBindNotFound = errors.New("Bind not found")
errProxyNotFound = errors.New("Proxy not found")
errAPINotFound = errors.New("API not found")
errRoutingNotFound = errors.New("Routing not found")
limit = int64(32)
)
func (r *dispatcher) load() {
go r.watch()
r.loadProxies()
r.loadClusters()
r.loadServers()
r.loadBinds()
r.loadAPIs()
r.loadRoutings()
}
func (r *dispatcher) loadProxies() {
log.Infof("load proxies")
err := r.store.GetProxies(limit, func(value *metapb.Proxy) error {
return r.addProxy(value)
})
if nil != err {
log.Errorf("load proxies failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadClusters() {
log.Infof("load clusters")
err := r.store.GetClusters(limit, func(value interface{}) error {
return r.addCluster(value.(*metapb.Cluster))
})
if nil != err {
log.Errorf("load clusters failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadServers() {
log.Infof("load servers")
err := r.store.GetServers(limit, func(value interface{}) error {
return r.addServer(value.(*metapb.Server))
})
if nil != err {
log.Errorf("load servers failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadRoutings() {
log.Infof("load routings")
err := r.store.GetRoutings(limit, func(value interface{}) error {
return r.addRouting(value.(*metapb.Routing))
})
if nil != err {
log.Errorf("load servers failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadBinds() {
log.Infof("load binds")
for clusterID := range r.clusters {
servers, err := r.store.GetBindServers(clusterID)
if nil != err {
log.Errorf("load binds from store failed, errors:\n%+v",
err)
return
}
for _, serverID := range servers {
b := &metapb.Bind{
ClusterID: clusterID,
ServerID: serverID,
}
err = r.addBind(b)
if nil != err {
log.Fatalf("bind <%s> add failed, errors:\n%+v",
b.String(),
err)
}
}
}
}
func (r *dispatcher) loadAPIs() {
log.Infof("load apis")
err := r.store.GetAPIs(limit, func(value interface{}) error {
return r.addAPI(value.(*metapb.API))
})
if nil != err {
log.Errorf("load apis failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) addRouting(meta *metapb.Routing) error {
if _, ok := r.routings[meta.ID]; ok {
return errRoutingExists
}
newValues := r.copyRoutings(0)
newValues[meta.ID] = newRoutingRuntime(meta)
r.routings = newValues
log.Infof("routing <%d> added, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) updateRouting(meta *metapb.Routing) error {
rt, ok := r.routings[meta.ID]
if !ok {
return errRoutingNotFound
}
newValues := r.copyRoutings(0)
rt = newValues[meta.ID]
rt.updateMeta(meta)
r.routings = newValues
log.Infof("routing <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeRouting(id uint64) error {
if _, ok := r.routings[id]; !ok {
return errRoutingNotFound
}
newValues := r.copyRoutings(id)
r.routings = newValues
log.Infof("routing <%d> deleted",
id)
return nil
}
func (r *dispatcher) addProxy(meta *metapb.Proxy) error {
key := util.GetAddrFormat(meta.Addr)
if _, ok := r.proxies[key]; ok {
return errProxyExists
}
r.proxies[key] = meta
r.refreshAllQPS()
log.Infof("proxy <%s> added", key)
return nil
}
func (r *dispatcher) removeProxy(addr string) error {
if _, ok := r.proxies[addr]; !ok {
return errProxyNotFound
}
delete(r.proxies, addr)
r.refreshAllQPS()
log.Infof("proxy <%s> deleted", addr)
return nil
}
func (r *dispatcher) addAPI(api *metapb.API) error {
if _, ok := r.apis[api.ID]; ok {
return errAPIExists
}
a := newAPIRuntime(api, r.tw, r.refreshQPS(api.MaxQPS))
newValues := r.copyAPIs(0)
newValues[api.ID] = a
newKeys := sortAPIs(newValues)
if a.cb != nil {
r.addAnalysis(api.ID, a.cb)
}
r.apis = newValues
r.apiSortedKeys = newKeys
log.Infof("api <%d> added, data <%s>",
api.ID,
api.String())
return nil
}
func (r *dispatcher) updateAPI(api *metapb.API) error {
_, ok := r.apis[api.ID]
if !ok {
return errAPINotFound
}
newValues := r.copyAPIs(0)
rt := newValues[api.ID]
rt.activeQPS = r.refreshQPS(api.MaxQPS)
rt.updateMeta(api)
newKeys := sortAPIs(newValues)
if rt.cb != nil {
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
}
r.apis = newValues
r.apiSortedKeys = newKeys
log.Infof("api <%d> updated, data <%s>",
api.ID,
api.String())
return nil
}
func (r *dispatcher) removeAPI(id uint64) error {
if _, ok := r.apis[id]; !ok {
return errAPINotFound
}
newValues := r.copyAPIs(id)
newKeys := sortAPIs(newValues)
r.apiSortedKeys = newKeys
r.apis = newValues
log.Infof("api <%d> removed", id)
return nil
}
func (r *dispatcher) refreshAllQPS() {
for _, svr := range r.servers {
svr.activeQPS = r.refreshQPS(svr.meta.MaxQPS)
svr.updateMeta(svr.meta)
r.addToCheck(svr)
}
for _, api := range r.apis {
api.activeQPS = r.refreshQPS(api.meta.MaxQPS)
api.updateMeta(api.meta)
}
}
func (r *dispatcher) refreshQPS(value int64) int64 {
if len(r.proxies) > 0 {
return value / int64(len(r.proxies))
}
return value
}
func (r *dispatcher) addServer(svr *metapb.Server) error {
if _, ok := r.servers[svr.ID]; ok {
return errServerExists
}
newValues := r.copyServers(0)
rt := newServerRuntime(svr, r.tw, r.refreshQPS(svr.MaxQPS))
newValues[svr.ID] = rt
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
r.addToCheck(rt)
r.servers = newValues
log.Infof("server <%d> added, data <%s>",
svr.ID,
svr.String())
return nil
}
func (r *dispatcher) updateServer(meta *metapb.Server) error {
rt, ok := r.servers[meta.ID]
if !ok {
return errServerNotFound
}
// stop old heath check
rt.heathTimeout.Stop()
newValues := r.copyServers(0)
rt = newValues[meta.ID]
rt.activeQPS = r.refreshQPS(meta.MaxQPS)
rt.updateMeta(meta)
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
r.addToCheck(rt)
r.servers = newValues
log.Infof("server <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeServer(id uint64) error {
rt, ok := r.servers[id]
if !ok {
return errServerNotFound
}
// stop old heath check
rt.heathTimeout.Stop()
newValues := r.copyServers(id)
newBinds := r.copyBinds(metapb.Bind{
ServerID: id,
})
r.servers = newValues
r.binds = newBinds
log.Infof("server <%d> removed",
rt.meta.ID)
return nil
}
func (r *dispatcher) addAnalysis(id uint64, cb *metapb.CircuitBreaker) {
r.analysiser.RemoveTarget(id)
r.analysiser.AddTarget(id, time.Second)
if cb != nil {
r.analysiser.AddTarget(id, time.Duration(cb.RateCheckPeriod))
}
}
func (r *dispatcher) addCluster(cluster *metapb.Cluster) error {
if _, ok := r.clusters[cluster.ID]; ok {
return errClusterExists
}
newValues := r.copyClusters(0)
newValues[cluster.ID] = newClusterRuntime(cluster)
r.clusters = newValues
log.Infof("cluster <%d> added, data <%s>",
cluster.ID,
cluster.String())
return nil
}
func (r *dispatcher) updateCluster(meta *metapb.Cluster) error {
_, ok := r.clusters[meta.ID]
if !ok {
return errClusterNotFound
}
newValues := r.copyClusters(0)
rt := newValues[meta.ID]
rt.updateMeta(meta)
r.clusters = newValues
log.Infof("cluster <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeCluster(id uint64) error {
_, ok := r.clusters[id]
if !ok {
return errClusterNotFound
}
newValues := r.copyClusters(id)
newBinds := r.copyBinds(metapb.Bind{
ClusterID: id,
})
r.binds = newBinds
r.clusters = newValues
log.Infof("cluster <%d> removed",
id)
return nil
}
func (r *dispatcher) addBind(bind *metapb.Bind) error {
server, ok := r.servers[bind.ServerID]
if !ok {
log.Warnf("bind failed, server <%d> not found",
bind.ServerID)
return errServerNotFound
}
if _, ok := r.clusters[bind.ClusterID]; !ok {
log.Warnf("add bind failed, cluster <%d> not found",
bind.ClusterID)
return errClusterNotFound
}
status := metapb.Unknown
if server.meta.HeathCheck == nil {
status = metapb.Up
}
newValues := r.copyBinds(metapb.Bind{})
if _, ok := newValues[bind.ClusterID]; !ok {
newValues[bind.ClusterID] = &binds{}
}
bindInfos := newValues[bind.ClusterID]
bindInfos.servers = append(bindInfos.servers, &bindInfo{
svrID: bind.ServerID,
status: status,
})
if status == metapb.Up {
bindInfos.actives = append(bindInfos.actives, *server.meta)
}
newValues[bind.ClusterID] = bindInfos
r.binds = newValues
log.Infof("bind <%d,%d> created", bind.ClusterID, bind.ServerID)
return nil
}
func (r *dispatcher) removeBind(bind *metapb.Bind) error {
if _, ok := r.servers[bind.ServerID]; !ok {
log.Errorf("remove bind failed: server <%d> not found",
bind.ServerID)
return errServerNotFound
}
if _, ok := r.clusters[bind.ClusterID]; !ok {
log.Errorf("remove bind failed: cluster <%d> not found",
bind.ClusterID)
return errClusterNotFound
}
newValues := r.copyBinds(*bind)
r.binds = newValues
log.Infof("bind <%d,%d> removed", bind.ClusterID, bind.ServerID)
return nil
}
func (r *dispatcher) getServerStatus(id uint64) metapb.Status {
binds := r.binds
for _, bindInfos := range binds {
for _, info := range bindInfos.servers {
if info.svrID == id {
return info.status
}
}
}
return metapb.Unknown
}
func sortAPIs(apis map[uint64]*apiRuntime) []uint64 {
if len(apis) == 0 {
return nil
}
type kv struct {
Key uint64
Value uint32
}
ss := make([]kv, len(apis))
var i = 0
for k, v := range apis {
ss[i] = kv{k, v.position()}
i++
}
// position升序
sort.SliceStable(ss, func(i, j int) bool {
return ss[i].Value < ss[j].Value
})
keys := make([]uint64, len(ss))
for i, v := range ss {
keys[i] = v.Key
}
return keys
}
解决activeQPS=0
package proxy
import (
"errors"
"sort"
"time"
"github.com/fagongzi/gateway/pkg/pb/metapb"
"github.com/fagongzi/gateway/pkg/util"
"github.com/fagongzi/log"
)
var (
errServerExists = errors.New("Server already exist")
errClusterExists = errors.New("Cluster already exist")
errBindExists = errors.New("Bind already exist")
errAPIExists = errors.New("API already exist")
errProxyExists = errors.New("Proxy already exist")
errRoutingExists = errors.New("Routing already exist")
errServerNotFound = errors.New("Server not found")
errClusterNotFound = errors.New("Cluster not found")
errBindNotFound = errors.New("Bind not found")
errProxyNotFound = errors.New("Proxy not found")
errAPINotFound = errors.New("API not found")
errRoutingNotFound = errors.New("Routing not found")
limit = int64(32)
)
func (r *dispatcher) load() {
go r.watch()
r.loadProxies()
r.loadClusters()
r.loadServers()
r.loadBinds()
r.loadAPIs()
r.loadRoutings()
}
func (r *dispatcher) loadProxies() {
log.Infof("load proxies")
err := r.store.GetProxies(limit, func(value *metapb.Proxy) error {
return r.addProxy(value)
})
if nil != err {
log.Errorf("load proxies failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadClusters() {
log.Infof("load clusters")
err := r.store.GetClusters(limit, func(value interface{}) error {
return r.addCluster(value.(*metapb.Cluster))
})
if nil != err {
log.Errorf("load clusters failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadServers() {
log.Infof("load servers")
err := r.store.GetServers(limit, func(value interface{}) error {
return r.addServer(value.(*metapb.Server))
})
if nil != err {
log.Errorf("load servers failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadRoutings() {
log.Infof("load routings")
err := r.store.GetRoutings(limit, func(value interface{}) error {
return r.addRouting(value.(*metapb.Routing))
})
if nil != err {
log.Errorf("load servers failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) loadBinds() {
log.Infof("load binds")
for clusterID := range r.clusters {
servers, err := r.store.GetBindServers(clusterID)
if nil != err {
log.Errorf("load binds from store failed, errors:\n%+v",
err)
return
}
for _, serverID := range servers {
b := &metapb.Bind{
ClusterID: clusterID,
ServerID: serverID,
}
err = r.addBind(b)
if nil != err {
log.Fatalf("bind <%s> add failed, errors:\n%+v",
b.String(),
err)
}
}
}
}
func (r *dispatcher) loadAPIs() {
log.Infof("load apis")
err := r.store.GetAPIs(limit, func(value interface{}) error {
return r.addAPI(value.(*metapb.API))
})
if nil != err {
log.Errorf("load apis failed, errors:\n%+v",
err)
return
}
}
func (r *dispatcher) addRouting(meta *metapb.Routing) error {
if _, ok := r.routings[meta.ID]; ok {
return errRoutingExists
}
newValues := r.copyRoutings(0)
newValues[meta.ID] = newRoutingRuntime(meta)
r.routings = newValues
log.Infof("routing <%d> added, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) updateRouting(meta *metapb.Routing) error {
rt, ok := r.routings[meta.ID]
if !ok {
return errRoutingNotFound
}
newValues := r.copyRoutings(0)
rt = newValues[meta.ID]
rt.updateMeta(meta)
r.routings = newValues
log.Infof("routing <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeRouting(id uint64) error {
if _, ok := r.routings[id]; !ok {
return errRoutingNotFound
}
newValues := r.copyRoutings(id)
r.routings = newValues
log.Infof("routing <%d> deleted",
id)
return nil
}
func (r *dispatcher) addProxy(meta *metapb.Proxy) error {
key := util.GetAddrFormat(meta.Addr)
if _, ok := r.proxies[key]; ok {
return errProxyExists
}
r.proxies[key] = meta
r.refreshAllQPS()
log.Infof("proxy <%s> added", key)
return nil
}
func (r *dispatcher) removeProxy(addr string) error {
if _, ok := r.proxies[addr]; !ok {
return errProxyNotFound
}
delete(r.proxies, addr)
r.refreshAllQPS()
log.Infof("proxy <%s> deleted", addr)
return nil
}
func (r *dispatcher) addAPI(api *metapb.API) error {
if _, ok := r.apis[api.ID]; ok {
return errAPIExists
}
a := newAPIRuntime(api, r.tw, r.refreshQPS(api.MaxQPS))
newValues := r.copyAPIs(0)
newValues[api.ID] = a
newKeys := sortAPIs(newValues)
if a.cb != nil {
r.addAnalysis(api.ID, a.cb)
}
r.apis = newValues
r.apiSortedKeys = newKeys
log.Infof("api <%d> added, data <%s>",
api.ID,
api.String())
return nil
}
func (r *dispatcher) updateAPI(api *metapb.API) error {
_, ok := r.apis[api.ID]
if !ok {
return errAPINotFound
}
newValues := r.copyAPIs(0)
rt := newValues[api.ID]
rt.activeQPS = r.refreshQPS(api.MaxQPS)
rt.updateMeta(api)
newKeys := sortAPIs(newValues)
if rt.cb != nil {
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
}
r.apis = newValues
r.apiSortedKeys = newKeys
log.Infof("api <%d> updated, data <%s>",
api.ID,
api.String())
return nil
}
func (r *dispatcher) removeAPI(id uint64) error {
if _, ok := r.apis[id]; !ok {
return errAPINotFound
}
newValues := r.copyAPIs(id)
newKeys := sortAPIs(newValues)
r.apiSortedKeys = newKeys
r.apis = newValues
log.Infof("api <%d> removed", id)
return nil
}
func (r *dispatcher) refreshAllQPS() {
for _, svr := range r.servers {
svr.activeQPS = r.refreshQPS(svr.meta.MaxQPS)
svr.updateMeta(svr.meta)
r.addToCheck(svr)
}
for _, api := range r.apis {
api.activeQPS = r.refreshQPS(api.meta.MaxQPS)
api.updateMeta(api.meta)
}
}
func (r *dispatcher) refreshQPS(value int64) int64 {
activeQPS := value
if len(r.proxies) > 0 {
activeQPS = value / int64(len(r.proxies))
}
if activeQPS <= 0 {
activeQPS = 1
}
return activeQPS
}
func (r *dispatcher) addServer(svr *metapb.Server) error {
if _, ok := r.servers[svr.ID]; ok {
return errServerExists
}
newValues := r.copyServers(0)
rt := newServerRuntime(svr, r.tw, r.refreshQPS(svr.MaxQPS))
newValues[svr.ID] = rt
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
r.addToCheck(rt)
r.servers = newValues
log.Infof("server <%d> added, data <%s>",
svr.ID,
svr.String())
return nil
}
func (r *dispatcher) updateServer(meta *metapb.Server) error {
rt, ok := r.servers[meta.ID]
if !ok {
return errServerNotFound
}
// stop old heath check
rt.heathTimeout.Stop()
newValues := r.copyServers(0)
rt = newValues[meta.ID]
rt.activeQPS = r.refreshQPS(meta.MaxQPS)
rt.updateMeta(meta)
r.addAnalysis(rt.meta.ID, rt.meta.CircuitBreaker)
r.addToCheck(rt)
r.servers = newValues
log.Infof("server <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeServer(id uint64) error {
rt, ok := r.servers[id]
if !ok {
return errServerNotFound
}
// stop old heath check
rt.heathTimeout.Stop()
newValues := r.copyServers(id)
newBinds := r.copyBinds(metapb.Bind{
ServerID: id,
})
r.servers = newValues
r.binds = newBinds
log.Infof("server <%d> removed",
rt.meta.ID)
return nil
}
func (r *dispatcher) addAnalysis(id uint64, cb *metapb.CircuitBreaker) {
r.analysiser.RemoveTarget(id)
r.analysiser.AddTarget(id, time.Second)
if cb != nil {
r.analysiser.AddTarget(id, time.Duration(cb.RateCheckPeriod))
}
}
func (r *dispatcher) addCluster(cluster *metapb.Cluster) error {
if _, ok := r.clusters[cluster.ID]; ok {
return errClusterExists
}
newValues := r.copyClusters(0)
newValues[cluster.ID] = newClusterRuntime(cluster)
r.clusters = newValues
log.Infof("cluster <%d> added, data <%s>",
cluster.ID,
cluster.String())
return nil
}
func (r *dispatcher) updateCluster(meta *metapb.Cluster) error {
_, ok := r.clusters[meta.ID]
if !ok {
return errClusterNotFound
}
newValues := r.copyClusters(0)
rt := newValues[meta.ID]
rt.updateMeta(meta)
r.clusters = newValues
log.Infof("cluster <%d> updated, data <%s>",
meta.ID,
meta.String())
return nil
}
func (r *dispatcher) removeCluster(id uint64) error {
_, ok := r.clusters[id]
if !ok {
return errClusterNotFound
}
newValues := r.copyClusters(id)
newBinds := r.copyBinds(metapb.Bind{
ClusterID: id,
})
r.binds = newBinds
r.clusters = newValues
log.Infof("cluster <%d> removed",
id)
return nil
}
func (r *dispatcher) addBind(bind *metapb.Bind) error {
server, ok := r.servers[bind.ServerID]
if !ok {
log.Warnf("bind failed, server <%d> not found",
bind.ServerID)
return errServerNotFound
}
if _, ok := r.clusters[bind.ClusterID]; !ok {
log.Warnf("add bind failed, cluster <%d> not found",
bind.ClusterID)
return errClusterNotFound
}
status := metapb.Unknown
if server.meta.HeathCheck == nil {
status = metapb.Up
}
newValues := r.copyBinds(metapb.Bind{})
if _, ok := newValues[bind.ClusterID]; !ok {
newValues[bind.ClusterID] = &binds{}
}
bindInfos := newValues[bind.ClusterID]
bindInfos.servers = append(bindInfos.servers, &bindInfo{
svrID: bind.ServerID,
status: status,
})
if status == metapb.Up {
bindInfos.actives = append(bindInfos.actives, *server.meta)
}
newValues[bind.ClusterID] = bindInfos
r.binds = newValues
log.Infof("bind <%d,%d> created", bind.ClusterID, bind.ServerID)
return nil
}
func (r *dispatcher) removeBind(bind *metapb.Bind) error {
if _, ok := r.servers[bind.ServerID]; !ok {
log.Errorf("remove bind failed: server <%d> not found",
bind.ServerID)
return errServerNotFound
}
if _, ok := r.clusters[bind.ClusterID]; !ok {
log.Errorf("remove bind failed: cluster <%d> not found",
bind.ClusterID)
return errClusterNotFound
}
newValues := r.copyBinds(*bind)
r.binds = newValues
log.Infof("bind <%d,%d> removed", bind.ClusterID, bind.ServerID)
return nil
}
func (r *dispatcher) getServerStatus(id uint64) metapb.Status {
binds := r.binds
for _, bindInfos := range binds {
for _, info := range bindInfos.servers {
if info.svrID == id {
return info.status
}
}
}
return metapb.Unknown
}
func sortAPIs(apis map[uint64]*apiRuntime) []uint64 {
if len(apis) == 0 {
return nil
}
type kv struct {
Key uint64
Value uint32
}
ss := make([]kv, len(apis))
var i = 0
for k, v := range apis {
ss[i] = kv{k, v.position()}
i++
}
// position升序
sort.SliceStable(ss, func(i, j int) bool {
return ss[i].Value < ss[j].Value
})
keys := make([]uint64, len(ss))
for i, v := range ss {
keys[i] = v.Key
}
return keys
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"strconv"
"sync"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/golang/glog"
"golang.org/x/net/context"
)
const (
compactInterval = 10 * time.Minute
compactRevKey = "compact_rev_key"
)
var (
endpointsMapMu sync.Mutex
endpointsMap map[string]struct{}
)
func init() {
endpointsMap = make(map[string]struct{})
}
// StartCompactor starts a compactor in the background to compact old version of keys that's not needed.
// By default, we save the most recent 10 minutes data and compact versions > 10minutes ago.
// It should be enough for slow watchers and to tolerate burst.
// TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys.
func StartCompactor(ctx context.Context, client *clientv3.Client) {
endpointsMapMu.Lock()
defer endpointsMapMu.Unlock()
// In one process, we can have only one compactor for one cluster.
// Currently we rely on endpoints to differentiate clusters.
for _, ep := range client.Endpoints() {
if _, ok := endpointsMap[ep]; ok {
glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints())
return
}
}
for _, ep := range client.Endpoints() {
endpointsMap[ep] = struct{}{}
}
go compactor(ctx, client, compactInterval)
}
// compactor periodically compacts historical versions of keys in etcd.
// It will compact keys with versions older than given interval.
// In other words, after compaction, it will only contain keys set during last interval.
// Any API call for the older versions of keys will return error.
// Interval is the time interval between each compaction. The first compaction happens after "interval".
func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) {
// Technical definitions:
// We have a special key in etcd defined as *compactRevKey*.
// compactRevKey's value will be set to the string of last compacted revision.
// compactRevKey's version will be used as logical time for comparison. THe version is referred as compact time.
// Initially, because the key doesn't exist, the compact time (version) is 0.
//
// Algorithm:
// - Compare to see if (local compact_time) = (remote compact_time).
// - If yes, increment both local and remote compact_time, and do a compaction.
// - If not, set local to remote compact_time.
//
// Technical details/insights:
//
// The protocol here is lease based. If one compactor CAS successfully, the others would know it when they fail in
// CAS later and would try again in 10 minutes. If an APIServer crashed, another one would "take over" the lease.
//
// For example, in the following diagram, we have a compactor C1 doing compaction in t1, t2. Another compactor C2
// at t1' (t1 < t1' < t2) would CAS fail, set its known oldRev to rev at t1, and try again in t2' (t2' > t2).
// If C1 crashed and wouldn't compact at t2, C2 would CAS successfully at t2'.
//
// oldRev(t2) curRev(t2)
// +
// oldRev curRev |
// + + |
// | | |
// | | t1' | t2'
// +---v-------------v----^---------v------^---->
// t0 t1 t2
//
// We have the guarantees:
// - in normal cases, the interval is 10 minutes.
// - in failover, the interval is >10m and <20m
//
// FAQ:
// - What if time is not accurate? We don't care as long as someone did the compaction. Atomicity is ensured using
// etcd API.
// - What happened under heavy load scenarios? Initially, each apiserver will do only one compaction
// every 10 minutes. This is very unlikely affecting or affected w.r.t. server load.
var compactTime int64
var rev int64
var err error
for {
select {
case <-time.After(interval):
case <-ctx.Done():
return
}
compactTime, rev, err = compact(ctx, client, compactTime, rev)
if err != nil {
glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err)
continue
}
}
}
// compact compacts etcd store and returns current rev.
// It will return the current compact time and global revision if no error occurred.
// Note that CAS fail will not incur any error.
func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, int64, error) {
resp, err := client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.Version(compactRevKey), "=", t),
).Then(
clientv3.OpPut(compactRevKey, strconv.FormatInt(rev, 10)), // Expect side effect: increment Version
).Else(
clientv3.OpGet(compactRevKey),
).Commit()
if err != nil {
return t, rev, err
}
curRev := resp.Header.Revision
if !resp.Succeeded {
curTime := resp.Responses[0].GetResponseRange().Kvs[0].Version
return curTime, curRev, nil
}
curTime := t + 1
if rev == 0 {
// We don't compact on bootstrap.
return curTime, curRev, nil
}
if _, err = client.Compact(ctx, rev); err != nil {
return curTime, curRev, err
}
glog.Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints())
return curTime, curRev, nil
}
etcd3 compactor: update docs
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"strconv"
"sync"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/golang/glog"
"golang.org/x/net/context"
)
const (
compactInterval = 10 * time.Minute
compactRevKey = "compact_rev_key"
)
var (
endpointsMapMu sync.Mutex
endpointsMap map[string]struct{}
)
func init() {
endpointsMap = make(map[string]struct{})
}
// StartCompactor starts a compactor in the background to compact old version of keys that's not needed.
// By default, we save the most recent 10 minutes data and compact versions > 10minutes ago.
// It should be enough for slow watchers and to tolerate burst.
// TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys.
func StartCompactor(ctx context.Context, client *clientv3.Client) {
endpointsMapMu.Lock()
defer endpointsMapMu.Unlock()
// In one process, we can have only one compactor for one cluster.
// Currently we rely on endpoints to differentiate clusters.
for _, ep := range client.Endpoints() {
if _, ok := endpointsMap[ep]; ok {
glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints())
return
}
}
for _, ep := range client.Endpoints() {
endpointsMap[ep] = struct{}{}
}
go compactor(ctx, client, compactInterval)
}
// compactor periodically compacts historical versions of keys in etcd.
// It will compact keys with versions older than given interval.
// In other words, after compaction, it will only contain keys set during last interval.
// Any API call for the older versions of keys will return error.
// Interval is the time interval between each compaction. The first compaction happens after "interval".
func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) {
// Technical definitions:
// We have a special key in etcd defined as *compactRevKey*.
// compactRevKey's value will be set to the string of last compacted revision.
// compactRevKey's version will be used as logical time for comparison. THe version is referred as compact time.
// Initially, because the key doesn't exist, the compact time (version) is 0.
//
// Algorithm:
// - Compare to see if (local compact_time) = (remote compact_time).
// - If yes, increment both local and remote compact_time, and do a compaction.
// - If not, set local to remote compact_time.
//
// Technical details/insights:
//
// The protocol here is lease based. If one compactor CAS successfully, the others would know it when they fail in
// CAS later and would try again in 10 minutes. If an APIServer crashed, another one would "take over" the lease.
//
// For example, in the following diagram, we have a compactor C1 doing compaction in t1, t2. Another compactor C2
// at t1' (t1 < t1' < t2) would CAS fail, set its known oldRev to rev at t1', and try again in t2' (t2' > t2).
// If C1 crashed and wouldn't compact at t2, C2 would CAS successfully at t2'.
//
// oldRev(t2) curRev(t2)
// +
// oldRev curRev |
// + + |
// | | |
// | | t1' | t2'
// +---v-------------v----^---------v------^---->
// t0 t1 t2
//
// We have the guarantees:
// - in normal cases, the interval is 10 minutes.
// - in failover, the interval is >10m and <20m
//
// FAQ:
// - What if time is not accurate? We don't care as long as someone did the compaction. Atomicity is ensured using
// etcd API.
// - What happened under heavy load scenarios? Initially, each apiserver will do only one compaction
// every 10 minutes. This is very unlikely affecting or affected w.r.t. server load.
var compactTime int64
var rev int64
var err error
for {
select {
case <-time.After(interval):
case <-ctx.Done():
return
}
compactTime, rev, err = compact(ctx, client, compactTime, rev)
if err != nil {
glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err)
continue
}
}
}
// compact compacts etcd store and returns current rev.
// It will return the current compact time and global revision if no error occurred.
// Note that CAS fail will not incur any error.
func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, int64, error) {
resp, err := client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.Version(compactRevKey), "=", t),
).Then(
clientv3.OpPut(compactRevKey, strconv.FormatInt(rev, 10)), // Expect side effect: increment Version
).Else(
clientv3.OpGet(compactRevKey),
).Commit()
if err != nil {
return t, rev, err
}
curRev := resp.Header.Revision
if !resp.Succeeded {
curTime := resp.Responses[0].GetResponseRange().Kvs[0].Version
return curTime, curRev, nil
}
curTime := t + 1
if rev == 0 {
// We don't compact on bootstrap.
return curTime, curRev, nil
}
if _, err = client.Compact(ctx, rev); err != nil {
return curTime, curRev, err
}
glog.Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints())
return curTime, curRev, nil
}
|
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
smtpclient "net/smtp"
"os"
"path/filepath"
"testing"
"time"
"github.com/jhillyerd/goldiff"
"github.com/jhillyerd/inbucket/pkg/config"
"github.com/jhillyerd/inbucket/pkg/message"
"github.com/jhillyerd/inbucket/pkg/msghub"
"github.com/jhillyerd/inbucket/pkg/policy"
"github.com/jhillyerd/inbucket/pkg/rest"
"github.com/jhillyerd/inbucket/pkg/rest/client"
"github.com/jhillyerd/inbucket/pkg/server/smtp"
"github.com/jhillyerd/inbucket/pkg/server/web"
"github.com/jhillyerd/inbucket/pkg/storage"
"github.com/jhillyerd/inbucket/pkg/storage/mem"
"github.com/jhillyerd/inbucket/pkg/webui"
)
const (
restBaseURL = "http://127.0.0.1:9000/"
smtpHost = "127.0.0.1:2500"
)
func TestSuite(t *testing.T) {
stopServer, err := startServer()
if err != nil {
t.Fatal(err)
}
defer stopServer()
testCases := []struct {
name string
test func(*testing.T)
}{
{"basic", testBasic},
{"fullname", testFullname},
{"encodedHeader", testEncodedHeader},
}
for _, tc := range testCases {
t.Run(tc.name, tc.test)
}
}
func testBasic(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("basic.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "basic.golden")
}
func testFullname(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("fullname.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "fullname.golden")
}
func testEncodedHeader(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("encodedheader.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "encodedheader.golden")
}
func formatMessage(m *client.Message) []byte {
b := &bytes.Buffer{}
fmt.Fprintf(b, "Mailbox: %v\n", m.Mailbox)
fmt.Fprintf(b, "From: %v\n", m.From)
fmt.Fprintf(b, "To: %v\n", m.To)
fmt.Fprintf(b, "Subject: %v\n", m.Subject)
fmt.Fprintf(b, "Size: %v\n", m.Size)
fmt.Fprintf(b, "\nBODY TEXT:\n%v\n", m.Body.Text)
fmt.Fprintf(b, "\nBODY HTML:\n%v\n", m.Body.HTML)
return b.Bytes()
}
func startServer() (func(), error) {
// TODO Refactor inbucket/main.go so we don't need to repeat all this here.
storage.Constructors["memory"] = mem.New
os.Clearenv()
conf, err := config.Process()
if err != nil {
return nil, err
}
rootCtx, rootCancel := context.WithCancel(context.Background())
shutdownChan := make(chan bool)
store, err := storage.FromConfig(conf.Storage)
if err != nil {
rootCancel()
return nil, err
}
msgHub := msghub.New(rootCtx, conf.Web.MonitorHistory)
addrPolicy := &policy.Addressing{Config: conf}
mmanager := &message.StoreManager{AddrPolicy: addrPolicy, Store: store, Hub: msgHub}
// Start HTTP server.
web.Initialize(conf, shutdownChan, mmanager, msgHub)
rest.SetupRoutes(web.Router.PathPrefix("/api/").Subrouter())
webui.SetupRoutes(web.Router)
go web.Start(rootCtx)
// Start SMTP server.
smtpServer := smtp.NewServer(conf.SMTP, shutdownChan, mmanager, addrPolicy)
go smtpServer.Start(rootCtx)
// TODO Implmement an elegant way to determine server readiness.
time.Sleep(500 * time.Millisecond)
return func() {
// Shut everything down.
close(shutdownChan)
rootCancel()
smtpServer.Drain()
}, nil
}
func readTestData(path ...string) []byte {
// Prefix path with testdata.
p := append([]string{"testdata"}, path...)
f, err := os.Open(filepath.Join(p...))
if err != nil {
panic(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
panic(err)
}
return data
}
test: Fix integation test server startup
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
smtpclient "net/smtp"
"os"
"path/filepath"
"testing"
"time"
"github.com/jhillyerd/goldiff"
"github.com/jhillyerd/inbucket/pkg/config"
"github.com/jhillyerd/inbucket/pkg/message"
"github.com/jhillyerd/inbucket/pkg/msghub"
"github.com/jhillyerd/inbucket/pkg/policy"
"github.com/jhillyerd/inbucket/pkg/rest"
"github.com/jhillyerd/inbucket/pkg/rest/client"
"github.com/jhillyerd/inbucket/pkg/server/smtp"
"github.com/jhillyerd/inbucket/pkg/server/web"
"github.com/jhillyerd/inbucket/pkg/storage"
"github.com/jhillyerd/inbucket/pkg/storage/mem"
"github.com/jhillyerd/inbucket/pkg/webui"
)
const (
restBaseURL = "http://127.0.0.1:9000/"
smtpHost = "127.0.0.1:2500"
)
func TestSuite(t *testing.T) {
stopServer, err := startServer()
if err != nil {
t.Fatal(err)
}
defer stopServer()
testCases := []struct {
name string
test func(*testing.T)
}{
{"basic", testBasic},
{"fullname", testFullname},
{"encodedHeader", testEncodedHeader},
}
for _, tc := range testCases {
t.Run(tc.name, tc.test)
}
}
func testBasic(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("basic.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "basic.golden")
}
func testFullname(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("fullname.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "fullname.golden")
}
func testEncodedHeader(t *testing.T) {
client, err := client.New(restBaseURL)
if err != nil {
t.Fatal(err)
}
from := "fromuser@inbucket.org"
to := []string{"recipient@inbucket.org"}
input := readTestData("encodedheader.txt")
// Send mail.
err = smtpclient.SendMail(smtpHost, nil, from, to, input)
if err != nil {
t.Fatal(err)
}
// Confirm receipt.
msg, err := client.GetMessage("recipient", "latest")
if err != nil {
t.Fatal(err)
}
if msg == nil {
t.Errorf("Got nil message, wanted non-nil message.")
}
// Compare to golden.
got := formatMessage(msg)
goldiff.File(t, got, "testdata", "encodedheader.golden")
}
func formatMessage(m *client.Message) []byte {
b := &bytes.Buffer{}
fmt.Fprintf(b, "Mailbox: %v\n", m.Mailbox)
fmt.Fprintf(b, "From: %v\n", m.From)
fmt.Fprintf(b, "To: %v\n", m.To)
fmt.Fprintf(b, "Subject: %v\n", m.Subject)
fmt.Fprintf(b, "Size: %v\n", m.Size)
fmt.Fprintf(b, "\nBODY TEXT:\n%v\n", m.Body.Text)
fmt.Fprintf(b, "\nBODY HTML:\n%v\n", m.Body.HTML)
return b.Bytes()
}
func startServer() (func(), error) {
// TODO Refactor inbucket/main.go so we don't need to repeat all this here.
storage.Constructors["memory"] = mem.New
os.Clearenv()
conf, err := config.Process()
if err != nil {
return nil, err
}
rootCtx, rootCancel := context.WithCancel(context.Background())
shutdownChan := make(chan bool)
store, err := storage.FromConfig(conf.Storage)
if err != nil {
rootCancel()
return nil, err
}
msgHub := msghub.New(rootCtx, conf.Web.MonitorHistory)
addrPolicy := &policy.Addressing{Config: conf}
mmanager := &message.StoreManager{AddrPolicy: addrPolicy, Store: store, Hub: msgHub}
// Start HTTP server.
webui.SetupRoutes(web.Router.PathPrefix("/serve/").Subrouter())
rest.SetupRoutes(web.Router.PathPrefix("/api/").Subrouter())
web.Initialize(conf, shutdownChan, mmanager, msgHub)
go web.Start(rootCtx)
// Start SMTP server.
smtpServer := smtp.NewServer(conf.SMTP, shutdownChan, mmanager, addrPolicy)
go smtpServer.Start(rootCtx)
// TODO Implmement an elegant way to determine server readiness.
time.Sleep(500 * time.Millisecond)
return func() {
// Shut everything down.
close(shutdownChan)
rootCancel()
smtpServer.Drain()
}, nil
}
func readTestData(path ...string) []byte {
// Prefix path with testdata.
p := append([]string{"testdata"}, path...)
f, err := os.Open(filepath.Join(p...))
if err != nil {
panic(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
panic(err)
}
return data
}
|
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tiller
import (
"bytes"
"errors"
"fmt"
"log"
"path"
"regexp"
"strings"
"github.com/technosophos/moniker"
ctx "golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/hooks"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
reltesting "k8s.io/helm/pkg/releasetesting"
relutil "k8s.io/helm/pkg/releaseutil"
"k8s.io/helm/pkg/tiller/environment"
"k8s.io/helm/pkg/timeconv"
"k8s.io/helm/pkg/version"
)
// releaseNameMaxLen is the maximum length of a release name.
//
// As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for
// charts to add data. Effectively, that gives us 53 chars.
// See https://github.com/kubernetes/helm/issues/1528
const releaseNameMaxLen = 53
// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine
// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually
// wants to see this file after rendering in the status command. However, it must be a suffix
// since there can be filepath in front of it.
const notesFileSuffix = "NOTES.txt"
var (
// errMissingChart indicates that a chart was not provided.
errMissingChart = errors.New("no chart provided")
// errMissingRelease indicates that a release (name) was not provided.
errMissingRelease = errors.New("no release provided")
// errInvalidRevision indicates that an invalid release revision number was provided.
errInvalidRevision = errors.New("invalid release revision")
)
// ListDefaultLimit is the default limit for number of items returned in a list.
var ListDefaultLimit int64 = 512
// ValidName is a regular expression for names.
//
// According to the Kubernetes help text, the regular expression it uses is:
//
// (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?
//
// We modified that. First, we added start and end delimiters. Second, we changed
// the final ? to + to require that the pattern match at least once. This modification
// prevents an empty string from matching.
var ValidName = regexp.MustCompile("^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$")
// ReleaseServer implements the server-side gRPC endpoint for the HAPI services.
type ReleaseServer struct {
env *environment.Environment
clientset internalclientset.Interface
}
// NewReleaseServer creates a new release server.
func NewReleaseServer(env *environment.Environment, clientset internalclientset.Interface) *ReleaseServer {
return &ReleaseServer{
env: env,
clientset: clientset,
}
}
// ListReleases lists the releases found by the server.
func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream services.ReleaseService_ListReleasesServer) error {
if len(req.StatusCodes) == 0 {
req.StatusCodes = []release.Status_Code{release.Status_DEPLOYED}
}
//rels, err := s.env.Releases.ListDeployed()
rels, err := s.env.Releases.ListFilterAll(func(r *release.Release) bool {
for _, sc := range req.StatusCodes {
if sc == r.Info.Status.Code {
return true
}
}
return false
})
if err != nil {
return err
}
if req.Namespace != "" {
rels, err = filterByNamespace(req.Namespace, rels)
if err != nil {
return err
}
}
if len(req.Filter) != 0 {
rels, err = filterReleases(req.Filter, rels)
if err != nil {
return err
}
}
total := int64(len(rels))
switch req.SortBy {
case services.ListSort_NAME:
relutil.SortByName(rels)
case services.ListSort_LAST_RELEASED:
relutil.SortByDate(rels)
}
if req.SortOrder == services.ListSort_DESC {
ll := len(rels)
rr := make([]*release.Release, ll)
for i, item := range rels {
rr[ll-i-1] = item
}
rels = rr
}
l := int64(len(rels))
if req.Offset != "" {
i := -1
for ii, cur := range rels {
if cur.Name == req.Offset {
i = ii
}
}
if i == -1 {
return fmt.Errorf("offset %q not found", req.Offset)
}
if len(rels) < i {
return fmt.Errorf("no items after %q", req.Offset)
}
rels = rels[i:]
l = int64(len(rels))
}
if req.Limit == 0 {
req.Limit = ListDefaultLimit
}
next := ""
if l > req.Limit {
next = rels[req.Limit].Name
rels = rels[0:req.Limit]
l = int64(len(rels))
}
res := &services.ListReleasesResponse{
Next: next,
Count: l,
Total: total,
Releases: rels,
}
return stream.Send(res)
}
func filterByNamespace(namespace string, rels []*release.Release) ([]*release.Release, error) {
matches := []*release.Release{}
for _, r := range rels {
if namespace == r.Namespace {
matches = append(matches, r)
}
}
return matches, nil
}
func filterReleases(filter string, rels []*release.Release) ([]*release.Release, error) {
preg, err := regexp.Compile(filter)
if err != nil {
return rels, err
}
matches := []*release.Release{}
for _, r := range rels {
if preg.MatchString(r.Name) {
matches = append(matches, r)
}
}
return matches, nil
}
// GetVersion sends the server version.
func (s *ReleaseServer) GetVersion(c ctx.Context, req *services.GetVersionRequest) (*services.GetVersionResponse, error) {
v := version.GetVersionProto()
return &services.GetVersionResponse{Version: v}, nil
}
// GetReleaseStatus gets the status information for a named release.
func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetReleaseStatusRequest) (*services.GetReleaseStatusResponse, error) {
if !ValidName.MatchString(req.Name) {
return nil, errMissingRelease
}
var rel *release.Release
if req.Version <= 0 {
var err error
rel, err = s.env.Releases.Last(req.Name)
if err != nil {
return nil, fmt.Errorf("getting deployed release %q: %s", req.Name, err)
}
} else {
var err error
if rel, err = s.env.Releases.Get(req.Name, req.Version); err != nil {
return nil, fmt.Errorf("getting release '%s' (v%d): %s", req.Name, req.Version, err)
}
}
if rel.Info == nil {
return nil, errors.New("release info is missing")
}
if rel.Chart == nil {
return nil, errors.New("release chart is missing")
}
sc := rel.Info.Status.Code
statusResp := &services.GetReleaseStatusResponse{
Name: rel.Name,
Namespace: rel.Namespace,
Info: rel.Info,
}
// Ok, we got the status of the release as we had jotted down, now we need to match the
// manifest we stashed away with reality from the cluster.
kubeCli := s.env.KubeClient
resp, err := kubeCli.Get(rel.Namespace, bytes.NewBufferString(rel.Manifest))
if sc == release.Status_DELETED || sc == release.Status_FAILED {
// Skip errors if this is already deleted or failed.
return statusResp, nil
} else if err != nil {
log.Printf("warning: Get for %s failed: %v", rel.Name, err)
return nil, err
}
rel.Info.Status.Resources = resp
return statusResp, nil
}
// GetReleaseContent gets all of the stored information for the given release.
func (s *ReleaseServer) GetReleaseContent(c ctx.Context, req *services.GetReleaseContentRequest) (*services.GetReleaseContentResponse, error) {
if !ValidName.MatchString(req.Name) {
return nil, errMissingRelease
}
if req.Version <= 0 {
rel, err := s.env.Releases.Deployed(req.Name)
return &services.GetReleaseContentResponse{Release: rel}, err
}
rel, err := s.env.Releases.Get(req.Name, req.Version)
return &services.GetReleaseContentResponse{Release: rel}, err
}
// UpdateRelease takes an existing release and new information, and upgrades the release.
func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {
currentRelease, updatedRelease, err := s.prepareUpdate(req)
if err != nil {
return nil, err
}
res, err := s.performUpdate(currentRelease, updatedRelease, req)
if err != nil {
return res, err
}
if !req.DryRun {
if err := s.env.Releases.Create(updatedRelease); err != nil {
return res, err
}
}
return res, nil
}
func (s *ReleaseServer) performUpdate(originalRelease, updatedRelease *release.Release, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {
res := &services.UpdateReleaseResponse{Release: updatedRelease}
if req.DryRun {
log.Printf("Dry run for %s", updatedRelease.Name)
res.Release.Info.Description = "Dry run complete"
return res, nil
}
// pre-upgrade hooks
if !req.DisableHooks {
if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, hooks.PreUpgrade, req.Timeout); err != nil {
return res, err
}
}
if err := s.performKubeUpdate(originalRelease, updatedRelease, req.Recreate, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Upgrade %q failed: %s", updatedRelease.Name, err)
log.Printf("warning: %s", msg)
originalRelease.Info.Status.Code = release.Status_SUPERSEDED
updatedRelease.Info.Status.Code = release.Status_FAILED
updatedRelease.Info.Description = msg
s.recordRelease(originalRelease, true)
s.recordRelease(updatedRelease, false)
return res, err
}
// post-upgrade hooks
if !req.DisableHooks {
if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, hooks.PostUpgrade, req.Timeout); err != nil {
return res, err
}
}
originalRelease.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(originalRelease, true)
updatedRelease.Info.Status.Code = release.Status_DEPLOYED
updatedRelease.Info.Description = "Upgrade complete"
return res, nil
}
// reuseValues copies values from the current release to a new release if the
// new release does not have any values.
//
// If the request already has values, or if there are no values in the current
// release, this does nothing.
//
// This is skipped if the req.ResetValues flag is set, in which case the
// request values are not altered.
func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current *release.Release) error {
if req.ResetValues {
// If ResetValues is set, we comletely ignore current.Config.
log.Print("Reset values to the chart's original version.")
return nil
}
// If the ReuseValues flag is set, we always copy the old values over the new config's values.
if req.ReuseValues {
log.Print("Reusing the old release's values")
// We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
if err != nil {
err := fmt.Errorf("failed to rebuild old values: %s", err)
log.Print(err)
return err
}
nv, err := oldVals.YAML()
if err != nil {
return err
}
req.Chart.Values = &chart.Config{Raw: nv}
return nil
}
// If req.Values is empty, but current.Config is not, copy current into the
// request.
if (req.Values == nil || req.Values.Raw == "" || req.Values.Raw == "{}\n") &&
current.Config != nil &&
current.Config.Raw != "" &&
current.Config.Raw != "{}\n" {
log.Printf("Copying values from %s (v%d) to new release.", current.Name, current.Version)
req.Values = current.Config
}
return nil
}
// prepareUpdate builds an updated release for an update operation.
func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) {
if !ValidName.MatchString(req.Name) {
return nil, nil, errMissingRelease
}
if req.Chart == nil {
return nil, nil, errMissingChart
}
// finds the non-deleted release with the given name
currentRelease, err := s.env.Releases.Last(req.Name)
if err != nil {
return nil, nil, err
}
// If new values were not supplied in the upgrade, re-use the existing values.
if err := s.reuseValues(req, currentRelease); err != nil {
return nil, nil, err
}
// Increment revision count. This is passed to templates, and also stored on
// the release object.
revision := currentRelease.Version + 1
ts := timeconv.Now()
options := chartutil.ReleaseOptions{
Name: req.Name,
Time: ts,
Namespace: currentRelease.Namespace,
IsUpgrade: true,
Revision: int(revision),
}
caps, err := capabilities(s.clientset.Discovery())
if err != nil {
return nil, nil, err
}
valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)
if err != nil {
return nil, nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
if err != nil {
return nil, nil, err
}
// Store an updated release.
updatedRelease := &release.Release{
Name: req.Name,
Namespace: currentRelease.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: currentRelease.Info.FirstDeployed,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: "Preparing upgrade", // This should be overwritten later.
},
Version: revision,
Manifest: manifestDoc.String(),
Hooks: hooks,
}
if len(notesTxt) > 0 {
updatedRelease.Info.Status.Notes = notesTxt
}
err = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes())
return currentRelease, updatedRelease, err
}
// RollbackRelease rolls back to a previous version of the given release.
func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) {
currentRelease, targetRelease, err := s.prepareRollback(req)
if err != nil {
return nil, err
}
res, err := s.performRollback(currentRelease, targetRelease, req)
if err != nil {
return res, err
}
if !req.DryRun {
if err := s.env.Releases.Create(targetRelease); err != nil {
return res, err
}
}
return res, nil
}
func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.Release, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) {
res := &services.RollbackReleaseResponse{Release: targetRelease}
if req.DryRun {
log.Printf("Dry run for %s", targetRelease.Name)
return res, nil
}
// pre-rollback hooks
if !req.DisableHooks {
if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, hooks.PreRollback, req.Timeout); err != nil {
return res, err
}
}
if err := s.performKubeUpdate(currentRelease, targetRelease, req.Recreate, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
log.Printf("warning: %s", msg)
currentRelease.Info.Status.Code = release.Status_SUPERSEDED
targetRelease.Info.Status.Code = release.Status_FAILED
targetRelease.Info.Description = msg
s.recordRelease(currentRelease, true)
s.recordRelease(targetRelease, false)
return res, err
}
// post-rollback hooks
if !req.DisableHooks {
if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, hooks.PostRollback, req.Timeout); err != nil {
return res, err
}
}
currentRelease.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(currentRelease, true)
targetRelease.Info.Status.Code = release.Status_DEPLOYED
return res, nil
}
func (s *ReleaseServer) performKubeUpdate(currentRelease, targetRelease *release.Release, recreate bool, timeout int64, shouldWait bool) error {
kubeCli := s.env.KubeClient
current := bytes.NewBufferString(currentRelease.Manifest)
target := bytes.NewBufferString(targetRelease.Manifest)
return kubeCli.Update(targetRelease.Namespace, current, target, recreate, timeout, shouldWait)
}
// prepareRollback finds the previous release and prepares a new release object with
// the previous release's configuration
func (s *ReleaseServer) prepareRollback(req *services.RollbackReleaseRequest) (*release.Release, *release.Release, error) {
switch {
case !ValidName.MatchString(req.Name):
return nil, nil, errMissingRelease
case req.Version < 0:
return nil, nil, errInvalidRevision
}
crls, err := s.env.Releases.Last(req.Name)
if err != nil {
return nil, nil, err
}
rbv := req.Version
if req.Version == 0 {
rbv = crls.Version - 1
}
log.Printf("rolling back %s (current: v%d, target: v%d)", req.Name, crls.Version, rbv)
prls, err := s.env.Releases.Get(req.Name, rbv)
if err != nil {
return nil, nil, err
}
// Store a new release object with previous release's configuration
target := &release.Release{
Name: req.Name,
Namespace: crls.Namespace,
Chart: prls.Chart,
Config: prls.Config,
Info: &release.Info{
FirstDeployed: crls.Info.FirstDeployed,
LastDeployed: timeconv.Now(),
Status: &release.Status{
Code: release.Status_UNKNOWN,
Notes: prls.Info.Status.Notes,
},
// Because we lose the reference to rbv elsewhere, we set the
// message here, and only override it later if we experience failure.
Description: fmt.Sprintf("Rollback to %d", rbv),
},
Version: crls.Version + 1,
Manifest: prls.Manifest,
Hooks: prls.Hooks,
}
return crls, target, nil
}
func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) {
// If a name is supplied, we check to see if that name is taken. If not, it
// is granted. If reuse is true and a deleted release with that name exists,
// we re-grant it. Otherwise, an error is returned.
if start != "" {
if len(start) > releaseNameMaxLen {
return "", fmt.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen)
}
h, err := s.env.Releases.History(start)
if err != nil || len(h) < 1 {
return start, nil
}
relutil.Reverse(h, relutil.SortByRevision)
rel := h[0]
if st := rel.Info.Status.Code; reuse && (st == release.Status_DELETED || st == release.Status_FAILED) {
// Allowe re-use of names if the previous release is marked deleted.
log.Printf("reusing name %q", start)
return start, nil
} else if reuse {
return "", errors.New("cannot re-use a name that is still in use")
}
return "", fmt.Errorf("a release named %q already exists.\nPlease run: helm ls --all %q; helm del --help", start, start)
}
maxTries := 5
for i := 0; i < maxTries; i++ {
namer := moniker.New()
name := namer.NameSep("-")
if len(name) > releaseNameMaxLen {
name = name[:releaseNameMaxLen]
}
if _, err := s.env.Releases.Get(name, 1); strings.Contains(err.Error(), "not found") {
return name, nil
}
log.Printf("info: Name %q is taken. Searching again.", name)
}
log.Printf("warning: No available release names found after %d tries", maxTries)
return "ERROR", errors.New("no available release name found")
}
func (s *ReleaseServer) engine(ch *chart.Chart) environment.Engine {
renderer := s.env.EngineYard.Default()
if ch.Metadata.Engine != "" {
if r, ok := s.env.EngineYard.Get(ch.Metadata.Engine); ok {
renderer = r
} else {
log.Printf("warning: %s requested non-existent template engine %s", ch.Metadata.Name, ch.Metadata.Engine)
}
}
return renderer
}
// InstallRelease installs a release and stores the release record.
func (s *ReleaseServer) InstallRelease(c ctx.Context, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) {
rel, err := s.prepareRelease(req)
if err != nil {
log.Printf("Failed install prepare step: %s", err)
res := &services.InstallReleaseResponse{Release: rel}
// On dry run, append the manifest contents to a failed release. This is
// a stop-gap until we can revisit an error backchannel post-2.0.
if req.DryRun && strings.HasPrefix(err.Error(), "YAML parse error") {
err = fmt.Errorf("%s\n%s", err, rel.Manifest)
}
return res, err
}
res, err := s.performRelease(rel, req)
if err != nil {
log.Printf("Failed install perform step: %s", err)
}
return res, err
}
// capabilities builds a Capabilities from discovery information.
func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) {
sv, err := disc.ServerVersion()
if err != nil {
return nil, err
}
vs, err := getVersionSet(disc)
if err != nil {
return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err)
}
return &chartutil.Capabilities{
APIVersions: vs,
KubeVersion: sv,
TillerVersion: version.GetVersionProto(),
}, nil
}
// prepareRelease builds a release for an install operation.
func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*release.Release, error) {
if req.Chart == nil {
return nil, errMissingChart
}
name, err := s.uniqName(req.Name, req.ReuseName)
if err != nil {
return nil, err
}
caps, err := capabilities(s.clientset.Discovery())
if err != nil {
return nil, err
}
revision := 1
ts := timeconv.Now()
options := chartutil.ReleaseOptions{
Name: name,
Time: ts,
Namespace: req.Namespace,
Revision: revision,
IsInstall: true,
}
valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)
if err != nil {
return nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
if err != nil {
// Return a release with partial data so that client can show debugging
// information.
rel := &release.Release{
Name: name,
Namespace: req.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: ts,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: fmt.Sprintf("Install failed: %s", err),
},
Version: 0,
}
if manifestDoc != nil {
rel.Manifest = manifestDoc.String()
}
return rel, err
}
// Store a release.
rel := &release.Release{
Name: name,
Namespace: req.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: ts,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: "Initial install underway", // Will be overwritten.
},
Manifest: manifestDoc.String(),
Hooks: hooks,
Version: int32(revision),
}
if len(notesTxt) > 0 {
rel.Info.Status.Notes = notesTxt
}
err = validateManifest(s.env.KubeClient, req.Namespace, manifestDoc.Bytes())
return rel, err
}
func getVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet, error) {
groups, err := client.ServerGroups()
if err != nil {
return chartutil.DefaultVersionSet, err
}
// FIXME: The Kubernetes test fixture for cli appears to always return nil
// for calls to Discovery().ServerGroups(). So in this case, we return
// the default API list. This is also a safe value to return in any other
// odd-ball case.
if groups == nil {
return chartutil.DefaultVersionSet, nil
}
versions := metav1.ExtractGroupVersions(groups)
return chartutil.NewVersionSet(versions...), nil
}
func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) {
// Guard to make sure Tiller is at the right version to handle this chart.
sver := version.GetVersion()
if ch.Metadata.TillerVersion != "" &&
!version.IsCompatibleRange(ch.Metadata.TillerVersion, sver) {
return nil, nil, "", fmt.Errorf("Chart incompatible with Tiller %s", sver)
}
renderer := s.engine(ch)
files, err := renderer.Render(ch, values)
if err != nil {
return nil, nil, "", err
}
// NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource,
// pull it out of here into a separate file so that we can actually use the output of the rendered
// text file. We have to spin through this map because the file contains path information, so we
// look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
// it in the sortHooks.
notes := ""
for k, v := range files {
if strings.HasSuffix(k, notesFileSuffix) {
// Only apply the notes if it belongs to the parent chart
// Note: Do not use filePath.Join since it creates a path with \ which is not expected
if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) {
notes = v
}
delete(files, k)
}
}
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
hooks, manifests, err := sortManifests(files, vs, InstallOrder)
if err != nil {
// By catching parse errors here, we can prevent bogus releases from going
// to Kubernetes.
//
// We return the files as a big blob of data to help the user debug parser
// errors.
b := bytes.NewBuffer(nil)
for name, content := range files {
if len(strings.TrimSpace(content)) == 0 {
continue
}
b.WriteString("\n---\n# Source: " + name + "\n")
b.WriteString(content)
}
return nil, b, "", err
}
// Aggregate all valid manifests into one big doc.
b := bytes.NewBuffer(nil)
for _, m := range manifests {
b.WriteString("\n---\n# Source: " + m.name + "\n")
b.WriteString(m.content)
}
return hooks, b, notes, nil
}
func (s *ReleaseServer) recordRelease(r *release.Release, reuse bool) {
if reuse {
if err := s.env.Releases.Update(r); err != nil {
log.Printf("warning: Failed to update release %q: %s", r.Name, err)
}
} else if err := s.env.Releases.Create(r); err != nil {
log.Printf("warning: Failed to record release %q: %s", r.Name, err)
}
}
// performRelease runs a release.
func (s *ReleaseServer) performRelease(r *release.Release, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) {
res := &services.InstallReleaseResponse{Release: r}
if req.DryRun {
log.Printf("Dry run for %s", r.Name)
res.Release.Info.Description = "Dry run complete"
return res, nil
}
// pre-install hooks
if !req.DisableHooks {
if err := s.execHook(r.Hooks, r.Name, r.Namespace, hooks.PreInstall, req.Timeout); err != nil {
return res, err
}
}
switch h, err := s.env.Releases.History(req.Name); {
// if this is a replace operation, append to the release history
case req.ReuseName && err == nil && len(h) >= 1:
// get latest release revision
relutil.Reverse(h, relutil.SortByRevision)
// old release
old := h[0]
// update old release status
old.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(old, true)
// update new release with next revision number
// so as to append to the old release's history
r.Version = old.Version + 1
if err := s.performKubeUpdate(old, r, false, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Release replace %q failed: %s", r.Name, err)
log.Printf("warning: %s", msg)
old.Info.Status.Code = release.Status_SUPERSEDED
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(old, true)
s.recordRelease(r, false)
return res, err
}
default:
// nothing to replace, create as normal
// regular manifests
b := bytes.NewBufferString(r.Manifest)
if err := s.env.KubeClient.Create(r.Namespace, b, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Release %q failed: %s", r.Name, err)
log.Printf("warning: %s", msg)
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(r, false)
return res, fmt.Errorf("release %s failed: %s", r.Name, err)
}
}
// post-install hooks
if !req.DisableHooks {
if err := s.execHook(r.Hooks, r.Name, r.Namespace, hooks.PostInstall, req.Timeout); err != nil {
msg := fmt.Sprintf("Release %q failed post-install: %s", r.Name, err)
log.Printf("warning: %s", msg)
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(r, false)
return res, err
}
}
r.Info.Status.Code = release.Status_DEPLOYED
r.Info.Description = "Install complete"
// This is a tricky case. The release has been created, but the result
// cannot be recorded. The truest thing to tell the user is that the
// release was created. However, the user will not be able to do anything
// further with this release.
//
// One possible strategy would be to do a timed retry to see if we can get
// this stored in the future.
s.recordRelease(r, false)
return res, nil
}
func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook string, timeout int64) error {
kubeCli := s.env.KubeClient
code, ok := events[hook]
if !ok {
return fmt.Errorf("unknown hook %q", hook)
}
log.Printf("Executing %s hooks for %s", hook, name)
executingHooks := []*release.Hook{}
for _, h := range hs {
for _, e := range h.Events {
if e == code {
executingHooks = append(executingHooks, h)
}
}
}
executingHooks = sortByHookWeight(executingHooks)
for _, h := range executingHooks {
b := bytes.NewBufferString(h.Manifest)
if err := kubeCli.Create(namespace, b, timeout, false); err != nil {
log.Printf("warning: Release %q %s %s failed: %s", name, hook, h.Path, err)
return err
}
// No way to rewind a bytes.Buffer()?
b.Reset()
b.WriteString(h.Manifest)
if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil {
log.Printf("warning: Release %q %s %s could not complete: %s", name, hook, h.Path, err)
return err
}
h.LastRun = timeconv.Now()
}
log.Printf("Hooks complete for %s %s", hook, name)
return nil
}
func (s *ReleaseServer) purgeReleases(rels ...*release.Release) error {
for _, rel := range rels {
if _, err := s.env.Releases.Delete(rel.Name, rel.Version); err != nil {
return err
}
}
return nil
}
// UninstallRelease deletes all of the resources associated with this release, and marks the release DELETED.
func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallReleaseRequest) (*services.UninstallReleaseResponse, error) {
if !ValidName.MatchString(req.Name) {
log.Printf("uninstall: Release not found: %s", req.Name)
return nil, errMissingRelease
}
if len(req.Name) > releaseNameMaxLen {
return nil, fmt.Errorf("release name %q exceeds max length of %d", req.Name, releaseNameMaxLen)
}
rels, err := s.env.Releases.History(req.Name)
if err != nil {
log.Printf("uninstall: Release not loaded: %s", req.Name)
return nil, err
}
if len(rels) < 1 {
return nil, errMissingRelease
}
relutil.SortByRevision(rels)
rel := rels[len(rels)-1]
// TODO: Are there any cases where we want to force a delete even if it's
// already marked deleted?
if rel.Info.Status.Code == release.Status_DELETED {
if req.Purge {
if err := s.purgeReleases(rels...); err != nil {
log.Printf("uninstall: Failed to purge the release: %s", err)
return nil, err
}
return &services.UninstallReleaseResponse{Release: rel}, nil
}
return nil, fmt.Errorf("the release named %q is already deleted", req.Name)
}
log.Printf("uninstall: Deleting %s", req.Name)
rel.Info.Status.Code = release.Status_DELETING
rel.Info.Deleted = timeconv.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
res := &services.UninstallReleaseResponse{Release: rel}
if !req.DisableHooks {
if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, hooks.PreDelete, req.Timeout); err != nil {
return res, err
}
}
vs, err := getVersionSet(s.clientset.Discovery())
if err != nil {
return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err)
}
// From here on out, the release is currently considered to be in Status_DELETING
// state.
if err := s.env.Releases.Update(rel); err != nil {
log.Printf("uninstall: Failed to store updated release: %s", err)
}
manifests := relutil.SplitManifests(rel.Manifest)
_, files, err := sortManifests(manifests, vs, UninstallOrder)
if err != nil {
// We could instead just delete everything in no particular order.
// FIXME: One way to delete at this point would be to try a label-based
// deletion. The problem with this is that we could get a false positive
// and delete something that was not legitimately part of this release.
return nil, fmt.Errorf("corrupted release record. You must manually delete the resources: %s", err)
}
filesToKeep, filesToDelete := filterManifestsToKeep(files)
if len(filesToKeep) > 0 {
res.Info = summarizeKeptManifests(filesToKeep)
}
// Collect the errors, and return them later.
es := []string{}
for _, file := range filesToDelete {
b := bytes.NewBufferString(file.content)
if err := s.env.KubeClient.Delete(rel.Namespace, b); err != nil {
log.Printf("uninstall: Failed deletion of %q: %s", req.Name, err)
if err == kube.ErrNoObjectsVisited {
// Rewrite the message from "no objects visited"
err = errors.New("object not found, skipping delete")
}
es = append(es, err.Error())
}
}
if !req.DisableHooks {
if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, hooks.PostDelete, req.Timeout); err != nil {
es = append(es, err.Error())
}
}
rel.Info.Status.Code = release.Status_DELETED
rel.Info.Description = "Deletion complete"
if req.Purge {
err := s.purgeReleases(rels...)
if err != nil {
log.Printf("uninstall: Failed to purge the release: %s", err)
}
return res, err
}
if err := s.env.Releases.Update(rel); err != nil {
log.Printf("uninstall: Failed to store updated release: %s", err)
}
if len(es) > 0 {
return res, fmt.Errorf("deletion completed with %d error(s): %s", len(es), strings.Join(es, "; "))
}
return res, nil
}
func validateManifest(c environment.KubeClient, ns string, manifest []byte) error {
r := bytes.NewReader(manifest)
_, err := c.BuildUnstructured(ns, r)
return err
}
// RunReleaseTest runs pre-defined tests stored as hooks on a given release
func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream services.ReleaseService_RunReleaseTestServer) error {
if !ValidName.MatchString(req.Name) {
return errMissingRelease
}
// finds the non-deleted release with the given name
rel, err := s.env.Releases.Last(req.Name)
if err != nil {
return err
}
testEnv := &reltesting.Environment{
Namespace: rel.Namespace,
KubeClient: s.env.KubeClient,
Timeout: req.Timeout,
Stream: stream,
}
tSuite, err := reltesting.NewTestSuite(rel)
if err != nil {
log.Printf("Error creating test suite for %s", rel.Name)
return err
}
if err := tSuite.Run(testEnv); err != nil {
log.Printf("Error running test suite for %s", rel.Name)
return err
}
rel.Info.Status.LastTestSuiteRun = &release.TestSuite{
StartedAt: tSuite.StartedAt,
CompletedAt: tSuite.CompletedAt,
Results: tSuite.Results,
}
if req.Cleanup {
testEnv.DeleteTestPods(tSuite.TestManifests)
}
return s.env.Releases.Update(rel)
}
fix(tiller): ignore empty YAML documents during delete
Closes #2256
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tiller
import (
"bytes"
"errors"
"fmt"
"log"
"path"
"regexp"
"strings"
"github.com/technosophos/moniker"
ctx "golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/hooks"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
reltesting "k8s.io/helm/pkg/releasetesting"
relutil "k8s.io/helm/pkg/releaseutil"
"k8s.io/helm/pkg/tiller/environment"
"k8s.io/helm/pkg/timeconv"
"k8s.io/helm/pkg/version"
)
// releaseNameMaxLen is the maximum length of a release name.
//
// As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for
// charts to add data. Effectively, that gives us 53 chars.
// See https://github.com/kubernetes/helm/issues/1528
const releaseNameMaxLen = 53
// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine
// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually
// wants to see this file after rendering in the status command. However, it must be a suffix
// since there can be filepath in front of it.
const notesFileSuffix = "NOTES.txt"
var (
// errMissingChart indicates that a chart was not provided.
errMissingChart = errors.New("no chart provided")
// errMissingRelease indicates that a release (name) was not provided.
errMissingRelease = errors.New("no release provided")
// errInvalidRevision indicates that an invalid release revision number was provided.
errInvalidRevision = errors.New("invalid release revision")
)
// ListDefaultLimit is the default limit for number of items returned in a list.
var ListDefaultLimit int64 = 512
// ValidName is a regular expression for names.
//
// According to the Kubernetes help text, the regular expression it uses is:
//
// (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?
//
// We modified that. First, we added start and end delimiters. Second, we changed
// the final ? to + to require that the pattern match at least once. This modification
// prevents an empty string from matching.
var ValidName = regexp.MustCompile("^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$")
// ReleaseServer implements the server-side gRPC endpoint for the HAPI services.
type ReleaseServer struct {
env *environment.Environment
clientset internalclientset.Interface
}
// NewReleaseServer creates a new release server.
func NewReleaseServer(env *environment.Environment, clientset internalclientset.Interface) *ReleaseServer {
return &ReleaseServer{
env: env,
clientset: clientset,
}
}
// ListReleases lists the releases found by the server.
func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream services.ReleaseService_ListReleasesServer) error {
if len(req.StatusCodes) == 0 {
req.StatusCodes = []release.Status_Code{release.Status_DEPLOYED}
}
//rels, err := s.env.Releases.ListDeployed()
rels, err := s.env.Releases.ListFilterAll(func(r *release.Release) bool {
for _, sc := range req.StatusCodes {
if sc == r.Info.Status.Code {
return true
}
}
return false
})
if err != nil {
return err
}
if req.Namespace != "" {
rels, err = filterByNamespace(req.Namespace, rels)
if err != nil {
return err
}
}
if len(req.Filter) != 0 {
rels, err = filterReleases(req.Filter, rels)
if err != nil {
return err
}
}
total := int64(len(rels))
switch req.SortBy {
case services.ListSort_NAME:
relutil.SortByName(rels)
case services.ListSort_LAST_RELEASED:
relutil.SortByDate(rels)
}
if req.SortOrder == services.ListSort_DESC {
ll := len(rels)
rr := make([]*release.Release, ll)
for i, item := range rels {
rr[ll-i-1] = item
}
rels = rr
}
l := int64(len(rels))
if req.Offset != "" {
i := -1
for ii, cur := range rels {
if cur.Name == req.Offset {
i = ii
}
}
if i == -1 {
return fmt.Errorf("offset %q not found", req.Offset)
}
if len(rels) < i {
return fmt.Errorf("no items after %q", req.Offset)
}
rels = rels[i:]
l = int64(len(rels))
}
if req.Limit == 0 {
req.Limit = ListDefaultLimit
}
next := ""
if l > req.Limit {
next = rels[req.Limit].Name
rels = rels[0:req.Limit]
l = int64(len(rels))
}
res := &services.ListReleasesResponse{
Next: next,
Count: l,
Total: total,
Releases: rels,
}
return stream.Send(res)
}
func filterByNamespace(namespace string, rels []*release.Release) ([]*release.Release, error) {
matches := []*release.Release{}
for _, r := range rels {
if namespace == r.Namespace {
matches = append(matches, r)
}
}
return matches, nil
}
func filterReleases(filter string, rels []*release.Release) ([]*release.Release, error) {
preg, err := regexp.Compile(filter)
if err != nil {
return rels, err
}
matches := []*release.Release{}
for _, r := range rels {
if preg.MatchString(r.Name) {
matches = append(matches, r)
}
}
return matches, nil
}
// GetVersion sends the server version.
func (s *ReleaseServer) GetVersion(c ctx.Context, req *services.GetVersionRequest) (*services.GetVersionResponse, error) {
v := version.GetVersionProto()
return &services.GetVersionResponse{Version: v}, nil
}
// GetReleaseStatus gets the status information for a named release.
func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetReleaseStatusRequest) (*services.GetReleaseStatusResponse, error) {
if !ValidName.MatchString(req.Name) {
return nil, errMissingRelease
}
var rel *release.Release
if req.Version <= 0 {
var err error
rel, err = s.env.Releases.Last(req.Name)
if err != nil {
return nil, fmt.Errorf("getting deployed release %q: %s", req.Name, err)
}
} else {
var err error
if rel, err = s.env.Releases.Get(req.Name, req.Version); err != nil {
return nil, fmt.Errorf("getting release '%s' (v%d): %s", req.Name, req.Version, err)
}
}
if rel.Info == nil {
return nil, errors.New("release info is missing")
}
if rel.Chart == nil {
return nil, errors.New("release chart is missing")
}
sc := rel.Info.Status.Code
statusResp := &services.GetReleaseStatusResponse{
Name: rel.Name,
Namespace: rel.Namespace,
Info: rel.Info,
}
// Ok, we got the status of the release as we had jotted down, now we need to match the
// manifest we stashed away with reality from the cluster.
kubeCli := s.env.KubeClient
resp, err := kubeCli.Get(rel.Namespace, bytes.NewBufferString(rel.Manifest))
if sc == release.Status_DELETED || sc == release.Status_FAILED {
// Skip errors if this is already deleted or failed.
return statusResp, nil
} else if err != nil {
log.Printf("warning: Get for %s failed: %v", rel.Name, err)
return nil, err
}
rel.Info.Status.Resources = resp
return statusResp, nil
}
// GetReleaseContent gets all of the stored information for the given release.
func (s *ReleaseServer) GetReleaseContent(c ctx.Context, req *services.GetReleaseContentRequest) (*services.GetReleaseContentResponse, error) {
if !ValidName.MatchString(req.Name) {
return nil, errMissingRelease
}
if req.Version <= 0 {
rel, err := s.env.Releases.Deployed(req.Name)
return &services.GetReleaseContentResponse{Release: rel}, err
}
rel, err := s.env.Releases.Get(req.Name, req.Version)
return &services.GetReleaseContentResponse{Release: rel}, err
}
// UpdateRelease takes an existing release and new information, and upgrades the release.
func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {
currentRelease, updatedRelease, err := s.prepareUpdate(req)
if err != nil {
return nil, err
}
res, err := s.performUpdate(currentRelease, updatedRelease, req)
if err != nil {
return res, err
}
if !req.DryRun {
if err := s.env.Releases.Create(updatedRelease); err != nil {
return res, err
}
}
return res, nil
}
func (s *ReleaseServer) performUpdate(originalRelease, updatedRelease *release.Release, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {
res := &services.UpdateReleaseResponse{Release: updatedRelease}
if req.DryRun {
log.Printf("Dry run for %s", updatedRelease.Name)
res.Release.Info.Description = "Dry run complete"
return res, nil
}
// pre-upgrade hooks
if !req.DisableHooks {
if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, hooks.PreUpgrade, req.Timeout); err != nil {
return res, err
}
}
if err := s.performKubeUpdate(originalRelease, updatedRelease, req.Recreate, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Upgrade %q failed: %s", updatedRelease.Name, err)
log.Printf("warning: %s", msg)
originalRelease.Info.Status.Code = release.Status_SUPERSEDED
updatedRelease.Info.Status.Code = release.Status_FAILED
updatedRelease.Info.Description = msg
s.recordRelease(originalRelease, true)
s.recordRelease(updatedRelease, false)
return res, err
}
// post-upgrade hooks
if !req.DisableHooks {
if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, hooks.PostUpgrade, req.Timeout); err != nil {
return res, err
}
}
originalRelease.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(originalRelease, true)
updatedRelease.Info.Status.Code = release.Status_DEPLOYED
updatedRelease.Info.Description = "Upgrade complete"
return res, nil
}
// reuseValues copies values from the current release to a new release if the
// new release does not have any values.
//
// If the request already has values, or if there are no values in the current
// release, this does nothing.
//
// This is skipped if the req.ResetValues flag is set, in which case the
// request values are not altered.
func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current *release.Release) error {
if req.ResetValues {
// If ResetValues is set, we comletely ignore current.Config.
log.Print("Reset values to the chart's original version.")
return nil
}
// If the ReuseValues flag is set, we always copy the old values over the new config's values.
if req.ReuseValues {
log.Print("Reusing the old release's values")
// We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
if err != nil {
err := fmt.Errorf("failed to rebuild old values: %s", err)
log.Print(err)
return err
}
nv, err := oldVals.YAML()
if err != nil {
return err
}
req.Chart.Values = &chart.Config{Raw: nv}
return nil
}
// If req.Values is empty, but current.Config is not, copy current into the
// request.
if (req.Values == nil || req.Values.Raw == "" || req.Values.Raw == "{}\n") &&
current.Config != nil &&
current.Config.Raw != "" &&
current.Config.Raw != "{}\n" {
log.Printf("Copying values from %s (v%d) to new release.", current.Name, current.Version)
req.Values = current.Config
}
return nil
}
// prepareUpdate builds an updated release for an update operation.
func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) {
if !ValidName.MatchString(req.Name) {
return nil, nil, errMissingRelease
}
if req.Chart == nil {
return nil, nil, errMissingChart
}
// finds the non-deleted release with the given name
currentRelease, err := s.env.Releases.Last(req.Name)
if err != nil {
return nil, nil, err
}
// If new values were not supplied in the upgrade, re-use the existing values.
if err := s.reuseValues(req, currentRelease); err != nil {
return nil, nil, err
}
// Increment revision count. This is passed to templates, and also stored on
// the release object.
revision := currentRelease.Version + 1
ts := timeconv.Now()
options := chartutil.ReleaseOptions{
Name: req.Name,
Time: ts,
Namespace: currentRelease.Namespace,
IsUpgrade: true,
Revision: int(revision),
}
caps, err := capabilities(s.clientset.Discovery())
if err != nil {
return nil, nil, err
}
valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)
if err != nil {
return nil, nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
if err != nil {
return nil, nil, err
}
// Store an updated release.
updatedRelease := &release.Release{
Name: req.Name,
Namespace: currentRelease.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: currentRelease.Info.FirstDeployed,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: "Preparing upgrade", // This should be overwritten later.
},
Version: revision,
Manifest: manifestDoc.String(),
Hooks: hooks,
}
if len(notesTxt) > 0 {
updatedRelease.Info.Status.Notes = notesTxt
}
err = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes())
return currentRelease, updatedRelease, err
}
// RollbackRelease rolls back to a previous version of the given release.
func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) {
currentRelease, targetRelease, err := s.prepareRollback(req)
if err != nil {
return nil, err
}
res, err := s.performRollback(currentRelease, targetRelease, req)
if err != nil {
return res, err
}
if !req.DryRun {
if err := s.env.Releases.Create(targetRelease); err != nil {
return res, err
}
}
return res, nil
}
func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.Release, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) {
res := &services.RollbackReleaseResponse{Release: targetRelease}
if req.DryRun {
log.Printf("Dry run for %s", targetRelease.Name)
return res, nil
}
// pre-rollback hooks
if !req.DisableHooks {
if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, hooks.PreRollback, req.Timeout); err != nil {
return res, err
}
}
if err := s.performKubeUpdate(currentRelease, targetRelease, req.Recreate, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
log.Printf("warning: %s", msg)
currentRelease.Info.Status.Code = release.Status_SUPERSEDED
targetRelease.Info.Status.Code = release.Status_FAILED
targetRelease.Info.Description = msg
s.recordRelease(currentRelease, true)
s.recordRelease(targetRelease, false)
return res, err
}
// post-rollback hooks
if !req.DisableHooks {
if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, hooks.PostRollback, req.Timeout); err != nil {
return res, err
}
}
currentRelease.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(currentRelease, true)
targetRelease.Info.Status.Code = release.Status_DEPLOYED
return res, nil
}
func (s *ReleaseServer) performKubeUpdate(currentRelease, targetRelease *release.Release, recreate bool, timeout int64, shouldWait bool) error {
kubeCli := s.env.KubeClient
current := bytes.NewBufferString(currentRelease.Manifest)
target := bytes.NewBufferString(targetRelease.Manifest)
return kubeCli.Update(targetRelease.Namespace, current, target, recreate, timeout, shouldWait)
}
// prepareRollback finds the previous release and prepares a new release object with
// the previous release's configuration
func (s *ReleaseServer) prepareRollback(req *services.RollbackReleaseRequest) (*release.Release, *release.Release, error) {
switch {
case !ValidName.MatchString(req.Name):
return nil, nil, errMissingRelease
case req.Version < 0:
return nil, nil, errInvalidRevision
}
crls, err := s.env.Releases.Last(req.Name)
if err != nil {
return nil, nil, err
}
rbv := req.Version
if req.Version == 0 {
rbv = crls.Version - 1
}
log.Printf("rolling back %s (current: v%d, target: v%d)", req.Name, crls.Version, rbv)
prls, err := s.env.Releases.Get(req.Name, rbv)
if err != nil {
return nil, nil, err
}
// Store a new release object with previous release's configuration
target := &release.Release{
Name: req.Name,
Namespace: crls.Namespace,
Chart: prls.Chart,
Config: prls.Config,
Info: &release.Info{
FirstDeployed: crls.Info.FirstDeployed,
LastDeployed: timeconv.Now(),
Status: &release.Status{
Code: release.Status_UNKNOWN,
Notes: prls.Info.Status.Notes,
},
// Because we lose the reference to rbv elsewhere, we set the
// message here, and only override it later if we experience failure.
Description: fmt.Sprintf("Rollback to %d", rbv),
},
Version: crls.Version + 1,
Manifest: prls.Manifest,
Hooks: prls.Hooks,
}
return crls, target, nil
}
func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) {
// If a name is supplied, we check to see if that name is taken. If not, it
// is granted. If reuse is true and a deleted release with that name exists,
// we re-grant it. Otherwise, an error is returned.
if start != "" {
if len(start) > releaseNameMaxLen {
return "", fmt.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen)
}
h, err := s.env.Releases.History(start)
if err != nil || len(h) < 1 {
return start, nil
}
relutil.Reverse(h, relutil.SortByRevision)
rel := h[0]
if st := rel.Info.Status.Code; reuse && (st == release.Status_DELETED || st == release.Status_FAILED) {
// Allowe re-use of names if the previous release is marked deleted.
log.Printf("reusing name %q", start)
return start, nil
} else if reuse {
return "", errors.New("cannot re-use a name that is still in use")
}
return "", fmt.Errorf("a release named %q already exists.\nPlease run: helm ls --all %q; helm del --help", start, start)
}
maxTries := 5
for i := 0; i < maxTries; i++ {
namer := moniker.New()
name := namer.NameSep("-")
if len(name) > releaseNameMaxLen {
name = name[:releaseNameMaxLen]
}
if _, err := s.env.Releases.Get(name, 1); strings.Contains(err.Error(), "not found") {
return name, nil
}
log.Printf("info: Name %q is taken. Searching again.", name)
}
log.Printf("warning: No available release names found after %d tries", maxTries)
return "ERROR", errors.New("no available release name found")
}
func (s *ReleaseServer) engine(ch *chart.Chart) environment.Engine {
renderer := s.env.EngineYard.Default()
if ch.Metadata.Engine != "" {
if r, ok := s.env.EngineYard.Get(ch.Metadata.Engine); ok {
renderer = r
} else {
log.Printf("warning: %s requested non-existent template engine %s", ch.Metadata.Name, ch.Metadata.Engine)
}
}
return renderer
}
// InstallRelease installs a release and stores the release record.
func (s *ReleaseServer) InstallRelease(c ctx.Context, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) {
rel, err := s.prepareRelease(req)
if err != nil {
log.Printf("Failed install prepare step: %s", err)
res := &services.InstallReleaseResponse{Release: rel}
// On dry run, append the manifest contents to a failed release. This is
// a stop-gap until we can revisit an error backchannel post-2.0.
if req.DryRun && strings.HasPrefix(err.Error(), "YAML parse error") {
err = fmt.Errorf("%s\n%s", err, rel.Manifest)
}
return res, err
}
res, err := s.performRelease(rel, req)
if err != nil {
log.Printf("Failed install perform step: %s", err)
}
return res, err
}
// capabilities builds a Capabilities from discovery information.
func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) {
sv, err := disc.ServerVersion()
if err != nil {
return nil, err
}
vs, err := getVersionSet(disc)
if err != nil {
return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err)
}
return &chartutil.Capabilities{
APIVersions: vs,
KubeVersion: sv,
TillerVersion: version.GetVersionProto(),
}, nil
}
// prepareRelease builds a release for an install operation.
func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*release.Release, error) {
if req.Chart == nil {
return nil, errMissingChart
}
name, err := s.uniqName(req.Name, req.ReuseName)
if err != nil {
return nil, err
}
caps, err := capabilities(s.clientset.Discovery())
if err != nil {
return nil, err
}
revision := 1
ts := timeconv.Now()
options := chartutil.ReleaseOptions{
Name: name,
Time: ts,
Namespace: req.Namespace,
Revision: revision,
IsInstall: true,
}
valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)
if err != nil {
return nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
if err != nil {
// Return a release with partial data so that client can show debugging
// information.
rel := &release.Release{
Name: name,
Namespace: req.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: ts,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: fmt.Sprintf("Install failed: %s", err),
},
Version: 0,
}
if manifestDoc != nil {
rel.Manifest = manifestDoc.String()
}
return rel, err
}
// Store a release.
rel := &release.Release{
Name: name,
Namespace: req.Namespace,
Chart: req.Chart,
Config: req.Values,
Info: &release.Info{
FirstDeployed: ts,
LastDeployed: ts,
Status: &release.Status{Code: release.Status_UNKNOWN},
Description: "Initial install underway", // Will be overwritten.
},
Manifest: manifestDoc.String(),
Hooks: hooks,
Version: int32(revision),
}
if len(notesTxt) > 0 {
rel.Info.Status.Notes = notesTxt
}
err = validateManifest(s.env.KubeClient, req.Namespace, manifestDoc.Bytes())
return rel, err
}
func getVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet, error) {
groups, err := client.ServerGroups()
if err != nil {
return chartutil.DefaultVersionSet, err
}
// FIXME: The Kubernetes test fixture for cli appears to always return nil
// for calls to Discovery().ServerGroups(). So in this case, we return
// the default API list. This is also a safe value to return in any other
// odd-ball case.
if groups == nil {
return chartutil.DefaultVersionSet, nil
}
versions := metav1.ExtractGroupVersions(groups)
return chartutil.NewVersionSet(versions...), nil
}
func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) {
// Guard to make sure Tiller is at the right version to handle this chart.
sver := version.GetVersion()
if ch.Metadata.TillerVersion != "" &&
!version.IsCompatibleRange(ch.Metadata.TillerVersion, sver) {
return nil, nil, "", fmt.Errorf("Chart incompatible with Tiller %s", sver)
}
renderer := s.engine(ch)
files, err := renderer.Render(ch, values)
if err != nil {
return nil, nil, "", err
}
// NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource,
// pull it out of here into a separate file so that we can actually use the output of the rendered
// text file. We have to spin through this map because the file contains path information, so we
// look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
// it in the sortHooks.
notes := ""
for k, v := range files {
if strings.HasSuffix(k, notesFileSuffix) {
// Only apply the notes if it belongs to the parent chart
// Note: Do not use filePath.Join since it creates a path with \ which is not expected
if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) {
notes = v
}
delete(files, k)
}
}
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
hooks, manifests, err := sortManifests(files, vs, InstallOrder)
if err != nil {
// By catching parse errors here, we can prevent bogus releases from going
// to Kubernetes.
//
// We return the files as a big blob of data to help the user debug parser
// errors.
b := bytes.NewBuffer(nil)
for name, content := range files {
if len(strings.TrimSpace(content)) == 0 {
continue
}
b.WriteString("\n---\n# Source: " + name + "\n")
b.WriteString(content)
}
return nil, b, "", err
}
// Aggregate all valid manifests into one big doc.
b := bytes.NewBuffer(nil)
for _, m := range manifests {
b.WriteString("\n---\n# Source: " + m.name + "\n")
b.WriteString(m.content)
}
return hooks, b, notes, nil
}
func (s *ReleaseServer) recordRelease(r *release.Release, reuse bool) {
if reuse {
if err := s.env.Releases.Update(r); err != nil {
log.Printf("warning: Failed to update release %q: %s", r.Name, err)
}
} else if err := s.env.Releases.Create(r); err != nil {
log.Printf("warning: Failed to record release %q: %s", r.Name, err)
}
}
// performRelease runs a release.
func (s *ReleaseServer) performRelease(r *release.Release, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) {
res := &services.InstallReleaseResponse{Release: r}
if req.DryRun {
log.Printf("Dry run for %s", r.Name)
res.Release.Info.Description = "Dry run complete"
return res, nil
}
// pre-install hooks
if !req.DisableHooks {
if err := s.execHook(r.Hooks, r.Name, r.Namespace, hooks.PreInstall, req.Timeout); err != nil {
return res, err
}
}
switch h, err := s.env.Releases.History(req.Name); {
// if this is a replace operation, append to the release history
case req.ReuseName && err == nil && len(h) >= 1:
// get latest release revision
relutil.Reverse(h, relutil.SortByRevision)
// old release
old := h[0]
// update old release status
old.Info.Status.Code = release.Status_SUPERSEDED
s.recordRelease(old, true)
// update new release with next revision number
// so as to append to the old release's history
r.Version = old.Version + 1
if err := s.performKubeUpdate(old, r, false, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Release replace %q failed: %s", r.Name, err)
log.Printf("warning: %s", msg)
old.Info.Status.Code = release.Status_SUPERSEDED
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(old, true)
s.recordRelease(r, false)
return res, err
}
default:
// nothing to replace, create as normal
// regular manifests
b := bytes.NewBufferString(r.Manifest)
if err := s.env.KubeClient.Create(r.Namespace, b, req.Timeout, req.Wait); err != nil {
msg := fmt.Sprintf("Release %q failed: %s", r.Name, err)
log.Printf("warning: %s", msg)
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(r, false)
return res, fmt.Errorf("release %s failed: %s", r.Name, err)
}
}
// post-install hooks
if !req.DisableHooks {
if err := s.execHook(r.Hooks, r.Name, r.Namespace, hooks.PostInstall, req.Timeout); err != nil {
msg := fmt.Sprintf("Release %q failed post-install: %s", r.Name, err)
log.Printf("warning: %s", msg)
r.Info.Status.Code = release.Status_FAILED
r.Info.Description = msg
s.recordRelease(r, false)
return res, err
}
}
r.Info.Status.Code = release.Status_DEPLOYED
r.Info.Description = "Install complete"
// This is a tricky case. The release has been created, but the result
// cannot be recorded. The truest thing to tell the user is that the
// release was created. However, the user will not be able to do anything
// further with this release.
//
// One possible strategy would be to do a timed retry to see if we can get
// this stored in the future.
s.recordRelease(r, false)
return res, nil
}
func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook string, timeout int64) error {
kubeCli := s.env.KubeClient
code, ok := events[hook]
if !ok {
return fmt.Errorf("unknown hook %q", hook)
}
log.Printf("Executing %s hooks for %s", hook, name)
executingHooks := []*release.Hook{}
for _, h := range hs {
for _, e := range h.Events {
if e == code {
executingHooks = append(executingHooks, h)
}
}
}
executingHooks = sortByHookWeight(executingHooks)
for _, h := range executingHooks {
b := bytes.NewBufferString(h.Manifest)
if err := kubeCli.Create(namespace, b, timeout, false); err != nil {
log.Printf("warning: Release %q %s %s failed: %s", name, hook, h.Path, err)
return err
}
// No way to rewind a bytes.Buffer()?
b.Reset()
b.WriteString(h.Manifest)
if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil {
log.Printf("warning: Release %q %s %s could not complete: %s", name, hook, h.Path, err)
return err
}
h.LastRun = timeconv.Now()
}
log.Printf("Hooks complete for %s %s", hook, name)
return nil
}
func (s *ReleaseServer) purgeReleases(rels ...*release.Release) error {
for _, rel := range rels {
if _, err := s.env.Releases.Delete(rel.Name, rel.Version); err != nil {
return err
}
}
return nil
}
// UninstallRelease deletes all of the resources associated with this release, and marks the release DELETED.
func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallReleaseRequest) (*services.UninstallReleaseResponse, error) {
if !ValidName.MatchString(req.Name) {
log.Printf("uninstall: Release not found: %s", req.Name)
return nil, errMissingRelease
}
if len(req.Name) > releaseNameMaxLen {
return nil, fmt.Errorf("release name %q exceeds max length of %d", req.Name, releaseNameMaxLen)
}
rels, err := s.env.Releases.History(req.Name)
if err != nil {
log.Printf("uninstall: Release not loaded: %s", req.Name)
return nil, err
}
if len(rels) < 1 {
return nil, errMissingRelease
}
relutil.SortByRevision(rels)
rel := rels[len(rels)-1]
// TODO: Are there any cases where we want to force a delete even if it's
// already marked deleted?
if rel.Info.Status.Code == release.Status_DELETED {
if req.Purge {
if err := s.purgeReleases(rels...); err != nil {
log.Printf("uninstall: Failed to purge the release: %s", err)
return nil, err
}
return &services.UninstallReleaseResponse{Release: rel}, nil
}
return nil, fmt.Errorf("the release named %q is already deleted", req.Name)
}
log.Printf("uninstall: Deleting %s", req.Name)
rel.Info.Status.Code = release.Status_DELETING
rel.Info.Deleted = timeconv.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
res := &services.UninstallReleaseResponse{Release: rel}
if !req.DisableHooks {
if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, hooks.PreDelete, req.Timeout); err != nil {
return res, err
}
}
vs, err := getVersionSet(s.clientset.Discovery())
if err != nil {
return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err)
}
// From here on out, the release is currently considered to be in Status_DELETING
// state.
if err := s.env.Releases.Update(rel); err != nil {
log.Printf("uninstall: Failed to store updated release: %s", err)
}
manifests := relutil.SplitManifests(rel.Manifest)
_, files, err := sortManifests(manifests, vs, UninstallOrder)
if err != nil {
// We could instead just delete everything in no particular order.
// FIXME: One way to delete at this point would be to try a label-based
// deletion. The problem with this is that we could get a false positive
// and delete something that was not legitimately part of this release.
return nil, fmt.Errorf("corrupted release record. You must manually delete the resources: %s", err)
}
filesToKeep, filesToDelete := filterManifestsToKeep(files)
if len(filesToKeep) > 0 {
res.Info = summarizeKeptManifests(filesToKeep)
}
// Collect the errors, and return them later.
es := []string{}
for _, file := range filesToDelete {
b := bytes.NewBufferString(strings.TrimSpace(file.content))
if b.Len() == 0 {
continue
}
if err := s.env.KubeClient.Delete(rel.Namespace, b); err != nil {
log.Printf("uninstall: Failed deletion of %q: %s", req.Name, err)
if err == kube.ErrNoObjectsVisited {
// Rewrite the message from "no objects visited"
err = errors.New("object not found, skipping delete")
}
es = append(es, err.Error())
}
}
if !req.DisableHooks {
if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, hooks.PostDelete, req.Timeout); err != nil {
es = append(es, err.Error())
}
}
rel.Info.Status.Code = release.Status_DELETED
rel.Info.Description = "Deletion complete"
if req.Purge {
err := s.purgeReleases(rels...)
if err != nil {
log.Printf("uninstall: Failed to purge the release: %s", err)
}
return res, err
}
if err := s.env.Releases.Update(rel); err != nil {
log.Printf("uninstall: Failed to store updated release: %s", err)
}
if len(es) > 0 {
return res, fmt.Errorf("deletion completed with %d error(s): %s", len(es), strings.Join(es, "; "))
}
return res, nil
}
func validateManifest(c environment.KubeClient, ns string, manifest []byte) error {
r := bytes.NewReader(manifest)
_, err := c.BuildUnstructured(ns, r)
return err
}
// RunReleaseTest runs pre-defined tests stored as hooks on a given release
func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream services.ReleaseService_RunReleaseTestServer) error {
if !ValidName.MatchString(req.Name) {
return errMissingRelease
}
// finds the non-deleted release with the given name
rel, err := s.env.Releases.Last(req.Name)
if err != nil {
return err
}
testEnv := &reltesting.Environment{
Namespace: rel.Namespace,
KubeClient: s.env.KubeClient,
Timeout: req.Timeout,
Stream: stream,
}
tSuite, err := reltesting.NewTestSuite(rel)
if err != nil {
log.Printf("Error creating test suite for %s", rel.Name)
return err
}
if err := tSuite.Run(testEnv); err != nil {
log.Printf("Error running test suite for %s", rel.Name)
return err
}
rel.Info.Status.LastTestSuiteRun = &release.TestSuite{
StartedAt: tSuite.StartedAt,
CompletedAt: tSuite.CompletedAt,
Results: tSuite.Results,
}
if req.Cleanup {
testEnv.DeleteTestPods(tSuite.TestManifests)
}
return s.env.Releases.Update(rel)
}
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sets
import (
"fmt"
"testing"
"istio.io/istio/pkg/test/util/assert"
)
func TestNewSet(t *testing.T) {
elements := []string{"a", "b", "c"}
set := New(elements...)
if len(set) != len(elements) {
t.Errorf("Expected length %d != %d", len(set), len(elements))
}
for _, e := range elements {
if _, exist := set[e]; !exist {
t.Errorf("%s is not in set %v", e, set)
}
}
}
func TestUnion(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
elements2 := []string{"a", "b", "e"}
want := New("a", "b", "c", "d", "e")
for _, sets := range [][]Set{
{New(elements...), New(elements2...)},
{New(elements2...), New(elements...)},
} {
s1, s2 := sets[0], sets[1]
if got := s1.Union(s2); !got.Equals(want) {
t.Errorf("expected %v; got %v", want, got)
}
}
}
func TestDifference(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b", "e"}
s2 := New(elements2...)
d := s1.Difference(s2)
if len(d) != 2 {
t.Errorf("Expected len=2: %d", len(d))
}
if _, exist := d["c"]; !exist {
t.Errorf("c is not in %v", d)
}
if _, exist := d["d"]; !exist {
t.Errorf("d is not in %v", d)
}
}
func TestIntersection(t *testing.T) {
elements := []string{"a", "b", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b", "c"}
s2 := New(elements2...)
d := s1.Intersection(s2)
if len(d) != 2 {
t.Errorf("Expected len=2: %d", len(d))
}
if _, exist := d["a"]; !exist {
t.Errorf("a is not in %v", d)
}
if _, exist := d["b"]; !exist {
t.Errorf("b is not in %v", d)
}
}
func TestSupersetOf(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b"}
s2 := New(elements2...)
if !s1.SupersetOf(s2) {
t.Errorf("%v should be superset of %v", s1.SortedList(), s2.SortedList())
}
s3 := New()
if !New().SupersetOf(s3) {
fmt.Printf("%q\n", s3.SortedList()[0])
t.Errorf("%v should be superset of empty set", s1.SortedList())
}
}
func TestEquals(t *testing.T) {
tests := []struct {
name string
first Set
second Set
want bool
}{
{
"both nil",
nil,
nil,
true,
},
{
"unequal length",
New("test"),
New("test", "test1"),
false,
},
{
"equal sets",
New("test", "test1"),
New("test", "test1"),
true,
},
{
"unequal sets",
New("test", "test1"),
New("test", "test2"),
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.first.Equals(tt.second); got != tt.want {
t.Errorf("Unexpected Equal. got %v, want %v", got, tt.want)
}
})
}
}
func TestMerge(t *testing.T) {
cases := []struct {
s1, s2 Set
expected []string
}{
{
s1: New("a1", "a2"),
s2: New("a1", "a2"),
expected: []string{"a1", "a2"},
},
{
s1: New("a1", "a2", "a3"),
s2: New("a1", "a2"),
expected: []string{"a1", "a2", "a3"},
},
{
s1: New("a1", "a2"),
s2: New("a3", "a4"),
expected: []string{"a1", "a2", "a3", "a4"},
},
}
for _, tc := range cases {
got := tc.s1.Merge(tc.s2)
assert.Equal(t, tc.expected, got.SortedList())
}
}
improve test coverage for string (#39064)
Signed-off-by: AllenZMC <95e483674d6aaddca68d52cac2554f8971532662@daocloud.io>
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sets
import (
"fmt"
"reflect"
"testing"
"istio.io/istio/pkg/test/util/assert"
)
func TestNewSet(t *testing.T) {
elements := []string{"a", "b", "c"}
set := New(elements...)
if len(set) != len(elements) {
t.Errorf("Expected length %d != %d", len(set), len(elements))
}
for _, e := range elements {
if _, exist := set[e]; !exist {
t.Errorf("%s is not in set %v", e, set)
}
}
}
func TestUnion(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
elements2 := []string{"a", "b", "e"}
want := New("a", "b", "c", "d", "e")
for _, sets := range [][]Set{
{New(elements...), New(elements2...)},
{New(elements2...), New(elements...)},
} {
s1, s2 := sets[0], sets[1]
if got := s1.Union(s2); !got.Equals(want) {
t.Errorf("expected %v; got %v", want, got)
}
}
}
func TestDifference(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b", "e"}
s2 := New(elements2...)
d := s1.Difference(s2)
if len(d) != 2 {
t.Errorf("Expected len=2: %d", len(d))
}
if _, exist := d["c"]; !exist {
t.Errorf("c is not in %v", d)
}
if _, exist := d["d"]; !exist {
t.Errorf("d is not in %v", d)
}
}
func TestIntersection(t *testing.T) {
elements := []string{"a", "b", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b", "c"}
s2 := New(elements2...)
d := s1.Intersection(s2)
if len(d) != 2 {
t.Errorf("Expected len=2: %d", len(d))
}
if _, exist := d["a"]; !exist {
t.Errorf("a is not in %v", d)
}
if _, exist := d["b"]; !exist {
t.Errorf("b is not in %v", d)
}
}
func TestSupersetOf(t *testing.T) {
elements := []string{"a", "b", "c", "d"}
s1 := New(elements...)
elements2 := []string{"a", "b"}
s2 := New(elements2...)
if !s1.SupersetOf(s2) {
t.Errorf("%v should be superset of %v", s1.SortedList(), s2.SortedList())
}
s3 := New()
if !New().SupersetOf(s3) {
fmt.Printf("%q\n", s3.SortedList()[0])
t.Errorf("%v should be superset of empty set", s1.SortedList())
}
}
func TestEquals(t *testing.T) {
tests := []struct {
name string
first Set
second Set
want bool
}{
{
"both nil",
nil,
nil,
true,
},
{
"unequal length",
New("test"),
New("test", "test1"),
false,
},
{
"equal sets",
New("test", "test1"),
New("test", "test1"),
true,
},
{
"unequal sets",
New("test", "test1"),
New("test", "test2"),
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.first.Equals(tt.second); got != tt.want {
t.Errorf("Unexpected Equal. got %v, want %v", got, tt.want)
}
})
}
}
func TestMerge(t *testing.T) {
cases := []struct {
s1, s2 Set
expected []string
}{
{
s1: New("a1", "a2"),
s2: New("a1", "a2"),
expected: []string{"a1", "a2"},
},
{
s1: New("a1", "a2", "a3"),
s2: New("a1", "a2"),
expected: []string{"a1", "a2", "a3"},
},
{
s1: New("a1", "a2"),
s2: New("a3", "a4"),
expected: []string{"a1", "a2", "a3", "a4"},
},
}
for _, tc := range cases {
got := tc.s1.Merge(tc.s2)
assert.Equal(t, tc.expected, got.SortedList())
}
}
func TestInsertAll(t *testing.T) {
tests := []struct {
name string
s Set
items []string
want Set
}{
{
name: "insert new item",
s: New("a1", "a2"),
items: []string{"a3"},
want: New("a1", "a2", "a3"),
},
{
name: "inserted item already exists",
s: New("a1", "a2"),
items: []string{"a1"},
want: New("a1", "a2"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.s.InsertAll(tt.items...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("InsertAll() = %v, want %v", got, tt.want)
}
})
}
}
|
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
var stringTerm = []byte{0}
func (b *readBuf) string() string {
i := bytes.Index(*b, stringTerm)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf []byte
func newWriteBuf(c byte) *writeBuf {
b := make(writeBuf, 5)
b[0] = c
return &b
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
*b = append(*b, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
*b = append(*b, x...)
}
func (b *writeBuf) string(s string) {
*b = append(*b, (s + "\000")...)
}
func (b *writeBuf) byte(c byte) {
*b = append(*b, c)
}
func (b *writeBuf) bytes(v []byte) {
*b = append(*b, v...)
}
Use IndexByte to find string terminator
No performance gain since bytes.Index special cases this anyway, but it means
stringTerm can go away.
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf []byte
func newWriteBuf(c byte) *writeBuf {
b := make(writeBuf, 5)
b[0] = c
return &b
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
*b = append(*b, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
*b = append(*b, x...)
}
func (b *writeBuf) string(s string) {
*b = append(*b, (s + "\000")...)
}
func (b *writeBuf) byte(c byte) {
*b = append(*b, c)
}
func (b *writeBuf) bytes(v []byte) {
*b = append(*b, v...)
}
|
package main
import (
"fmt"
)
func buy(args []string) error {
return fmt.Errorf("buy called")
}
updated buy.go
package main
import (
"flag"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
)
func buy(args []string) error {
os.Args = args
var (
serv = flag.String("s", "localhost:8088", "path to the factomclient")
)
flag.Parse()
if len(args) < 2 {
return fmt.Errorf("the ammount of factoids to be transferd must be specified")
}
amt := args[1]
server := "http://" + *serv + "/v1/buycredit"
data := url.Values{
"to": {"wallet"},
"ammount": {amt},
}
resp, err := http.PostForm(server, data)
if err != nil {
return err
}
p, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Println(string(p))
return nil
}
|
package cec
import(
"log"
"time"
"strings"
)
type Device struct {
OSDName string
Vendor string
LogicalAddress int
ActiveSource bool
PowerStatus string
PhysicalAddress string
}
var logicalNames = []string{ "TV", "Recording", "Recording2", "Tuner",
"Playback","Audio", "Tuner2", "Tuner3",
"Playback2", "Recording3", "Tuner4", "Playback3",
"Reserved", "Reserved2", "Free", "Broadcast" }
var vendorList = map[uint64]string{ 0x000039:"Toshiba", 0x0000F0:"Samsung",
0x0005CD:"Denon", 0x000678:"Marantz", 0x000982:"Loewe", 0x0009B0:"Onkyo",
0x000CB8:"Medion", 0x000CE7:"Toshiba", 0x001582:"Pulse Eight",
0x0020C7:"Akai", 0x002467:"Aoc", 0x008045:"Panasonic", 0x00903E:"Philips",
0x009053:"Daewoo", 0x00A0DE:"Yamaha", 0x00D0D5:"Grundig",
0x00E036:"Pioneer", 0x00E091:"LG", 0x08001F:"Sharp", 0x080046:"Sony",
0x18C086:"Broadcom", 0x6B746D:"Vizio", 0x8065E9:"Benq",
0x9C645E:"Harman Kardon" }
var keyList = map[int]string{ 0x00:"Select", 0x01:"Up", 0x02:"Down", 0x03:"Left",
0x04:"Right", 0x05:"RightUp", 0x06:"RightDown", 0x07:"LeftUp",
0x08:"LeftDown", 0x09:"RootMenu", 0x0A:"SetupMenu", 0x0B:"ContentsMenu",
0x0C:"FavoriteMenu", 0x0D:"Exit", 0x20:"0", 0x21:"1", 0x22:"2", 0x23:"3",
0x24:"4", 0x25:"5", 0x26:"6", 0x27:"7", 0x28:"8", 0x29:"9", 0x2A:"Dot",
0x2B:"Enter", 0x2C:"Clear", 0x2F:"NextFavorite", 0x30:"ChannelUp",
0x31:"ChannelDown", 0x32:"PreviousChannel", 0x33:"SoundSelect",
0x34:"InputSelect", 0x35:"DisplayInformation", 0x36:"Help",
0x37:"PageUp", 0x38:"PageDown", 0x40:"Power", 0x41:"VolumeUp",
0x42:"VolumeDown", 0x43:"Mute", 0x44:"Play", 0x45:"Stop", 0x46:"Pause",
0x47:"Record", 0x48:"Rewind", 0x49:"FastForward", 0x4A:"Eject",
0x4B:"Forward", 0x4C:"Backward", 0x4D:"StopRecord", 0x4E:"PauseRecord",
0x50:"Angle", 0x51:"SubPicture", 0x52:"VideoOnDemand",
0x53:"ElectronicProgramGuide", 0x54:"TimerProgramming",
0x55:"InitialConfiguration", 0x60:"PlayFunction", 0x61:"PausePlay",
0x62:"RecordFunction", 0x63:"PauseRecordFunction",
0x64:"StopFunction", 0x65:"Mute",
0x66:"RestoreVolume", 0x67:"Tune", 0x68:"SelectMedia",
0x69:"SelectAvInput", 0x6A:"SelectAudioInput", 0x6B:"PowerToggle",
0x6C:"PowerOff", 0x6D:"PowerOn", 0x71:"Blue", 0X72:"Red", 0x73:"Green",
0x74:"Yellow", 0x75:"F5", 0x76:"Data", 0x91:"AnReturn",
0x96:"Max" }
func Open(name string, deviceName string) {
var config CECConfiguration
config.DeviceName = deviceName
if er := cecInit(config); er != nil {
log.Println(er)
return
}
adapter, er := getAdapter(name)
if er != nil {
log.Println(er)
return
}
er = openAdapter(adapter)
if er != nil {
log.Println(er)
return
}
}
func Key(address int, key string) {
keycode := GetKeyCodeByName(key)
er := KeyPress(address, keycode)
if er != nil {
log.Println(er)
return
}
time.Sleep(10 * time.Millisecond)
er = KeyRelease(address)
if er != nil {
log.Println(er)
return
}
}
func List() map[string]Device {
devices := make(map[string]Device)
active_devices := GetActiveDevices()
for address, active := range active_devices {
if (active) {
var dev Device
dev.LogicalAddress = address
dev.PhysicalAddress = GetDevicePhysicalAddress(address)
dev.OSDName = GetDeviceOSDName(address)
dev.PowerStatus = GetDevicePowerStatus(address)
dev.ActiveSource = IsActiveSource(address)
dev.Vendor = GetVendorById(GetDeviceVendorId(address))
devices[logicalNames[address]] = dev
}
}
return devices
}
func removeSeparators(in string) string {
// remove separators (":", "-", " ", "_")
out := strings.Map(func(r rune) rune {
if strings.IndexRune(":-_ ", r) < 0 {
return r
}
return -1
}, in)
return(out)
}
func GetKeyCodeByName(name string) int {
name = removeSeparators(name)
name = strings.ToLower(name)
for code, value := range keyList {
if strings.ToLower(value) == name {
return code
}
}
return -1
}
func GetLogicalAddressByName(name string) int {
name = removeSeparators(name)
l := len(name)
if name[l-1] == '1' {
name = name[:l-1]
}
name = strings.ToLower(name)
for i:=0; i<16; i++ {
if strings.ToLower(logicalNames[i]) == name {
return i
}
}
if name == "unregistered" {
return 15
}
return -1
}
func GetLogicalNameByAddress(addr int) string {
return logicalNames[addr]
}
func GetVendorById(id uint64) string {
return vendorList[id]
}
make Key function more flexible
package cec
import(
"log"
"encoding/hex"
"time"
"strings"
)
type Device struct {
OSDName string
Vendor string
LogicalAddress int
ActiveSource bool
PowerStatus string
PhysicalAddress string
}
var logicalNames = []string{ "TV", "Recording", "Recording2", "Tuner",
"Playback","Audio", "Tuner2", "Tuner3",
"Playback2", "Recording3", "Tuner4", "Playback3",
"Reserved", "Reserved2", "Free", "Broadcast" }
var vendorList = map[uint64]string{ 0x000039:"Toshiba", 0x0000F0:"Samsung",
0x0005CD:"Denon", 0x000678:"Marantz", 0x000982:"Loewe", 0x0009B0:"Onkyo",
0x000CB8:"Medion", 0x000CE7:"Toshiba", 0x001582:"Pulse Eight",
0x0020C7:"Akai", 0x002467:"Aoc", 0x008045:"Panasonic", 0x00903E:"Philips",
0x009053:"Daewoo", 0x00A0DE:"Yamaha", 0x00D0D5:"Grundig",
0x00E036:"Pioneer", 0x00E091:"LG", 0x08001F:"Sharp", 0x080046:"Sony",
0x18C086:"Broadcom", 0x6B746D:"Vizio", 0x8065E9:"Benq",
0x9C645E:"Harman Kardon" }
var keyList = map[int]string{ 0x00:"Select", 0x01:"Up", 0x02:"Down", 0x03:"Left",
0x04:"Right", 0x05:"RightUp", 0x06:"RightDown", 0x07:"LeftUp",
0x08:"LeftDown", 0x09:"RootMenu", 0x0A:"SetupMenu", 0x0B:"ContentsMenu",
0x0C:"FavoriteMenu", 0x0D:"Exit", 0x20:"0", 0x21:"1", 0x22:"2", 0x23:"3",
0x24:"4", 0x25:"5", 0x26:"6", 0x27:"7", 0x28:"8", 0x29:"9", 0x2A:"Dot",
0x2B:"Enter", 0x2C:"Clear", 0x2F:"NextFavorite", 0x30:"ChannelUp",
0x31:"ChannelDown", 0x32:"PreviousChannel", 0x33:"SoundSelect",
0x34:"InputSelect", 0x35:"DisplayInformation", 0x36:"Help",
0x37:"PageUp", 0x38:"PageDown", 0x40:"Power", 0x41:"VolumeUp",
0x42:"VolumeDown", 0x43:"Mute", 0x44:"Play", 0x45:"Stop", 0x46:"Pause",
0x47:"Record", 0x48:"Rewind", 0x49:"FastForward", 0x4A:"Eject",
0x4B:"Forward", 0x4C:"Backward", 0x4D:"StopRecord", 0x4E:"PauseRecord",
0x50:"Angle", 0x51:"SubPicture", 0x52:"VideoOnDemand",
0x53:"ElectronicProgramGuide", 0x54:"TimerProgramming",
0x55:"InitialConfiguration", 0x60:"PlayFunction", 0x61:"PausePlay",
0x62:"RecordFunction", 0x63:"PauseRecordFunction",
0x64:"StopFunction", 0x65:"Mute",
0x66:"RestoreVolume", 0x67:"Tune", 0x68:"SelectMedia",
0x69:"SelectAvInput", 0x6A:"SelectAudioInput", 0x6B:"PowerToggle",
0x6C:"PowerOff", 0x6D:"PowerOn", 0x71:"Blue", 0X72:"Red", 0x73:"Green",
0x74:"Yellow", 0x75:"F5", 0x76:"Data", 0x91:"AnReturn",
0x96:"Max" }
func Open(name string, deviceName string) {
var config CECConfiguration
config.DeviceName = deviceName
if er := cecInit(config); er != nil {
log.Println(er)
return
}
adapter, er := getAdapter(name)
if er != nil {
log.Println(er)
return
}
er = openAdapter(adapter)
if er != nil {
log.Println(er)
return
}
}
func Key(address int, key interface{}) {
var keycode int
switch key := key.(type) {
case string:
if key[:2] == "0x" && len(key) == 4 {
keybytes, err := hex.DecodeString(key[2:])
if err != nil {
log.Println(err)
return
}
keycode = int(keybytes[0])
} else {
keycode = GetKeyCodeByName(key)
}
case int:
keycode = key
default:
log.Println("Invalid key type")
return
}
er := KeyPress(address, keycode)
if er != nil {
log.Println(er)
return
}
time.Sleep(10 * time.Millisecond)
er = KeyRelease(address)
if er != nil {
log.Println(er)
return
}
}
func List() map[string]Device {
devices := make(map[string]Device)
active_devices := GetActiveDevices()
for address, active := range active_devices {
if (active) {
var dev Device
dev.LogicalAddress = address
dev.PhysicalAddress = GetDevicePhysicalAddress(address)
dev.OSDName = GetDeviceOSDName(address)
dev.PowerStatus = GetDevicePowerStatus(address)
dev.ActiveSource = IsActiveSource(address)
dev.Vendor = GetVendorById(GetDeviceVendorId(address))
devices[logicalNames[address]] = dev
}
}
return devices
}
func removeSeparators(in string) string {
// remove separators (":", "-", " ", "_")
out := strings.Map(func(r rune) rune {
if strings.IndexRune(":-_ ", r) < 0 {
return r
}
return -1
}, in)
return(out)
}
func GetKeyCodeByName(name string) int {
name = removeSeparators(name)
name = strings.ToLower(name)
for code, value := range keyList {
if strings.ToLower(value) == name {
return code
}
}
return -1
}
func GetLogicalAddressByName(name string) int {
name = removeSeparators(name)
l := len(name)
if name[l-1] == '1' {
name = name[:l-1]
}
name = strings.ToLower(name)
for i:=0; i<16; i++ {
if strings.ToLower(logicalNames[i]) == name {
return i
}
}
if name == "unregistered" {
return 15
}
return -1
}
func GetLogicalNameByAddress(addr int) string {
return logicalNames[addr]
}
func GetVendorById(id uint64) string {
return vendorList[id]
}
|
package dnstapio
import (
"log"
"net"
"sync/atomic"
"time"
tap "github.com/dnstap/golang-dnstap"
fs "github.com/farsightsec/golang-framestream"
)
const (
tcpWriteBufSize = 1024 * 1024
tcpTimeout = 4 * time.Second
flushTimeout = 1 * time.Second
queueSize = 10000
)
type dnstapIO struct {
endpoint string
socket bool
conn net.Conn
enc *dnstapEncoder
queue chan tap.Dnstap
dropped uint32
}
// New returns a new and initialized DnstapIO.
func New(endpoint string, socket bool) DnstapIO {
return &dnstapIO{
endpoint: endpoint,
socket: socket,
enc: newDnstapEncoder(&fs.EncoderOptions{
ContentType: []byte("protobuf:dnstap.Dnstap"),
Bidirectional: true,
}),
queue: make(chan tap.Dnstap, queueSize),
}
}
// DnstapIO interface
type DnstapIO interface {
Connect()
Dnstap(payload tap.Dnstap)
Close()
}
func (dio *dnstapIO) newConnect() error {
var err error
if dio.socket {
if dio.conn, err = net.Dial("unix", dio.endpoint); err != nil {
return err
}
} else {
if dio.conn, err = net.DialTimeout("tcp", dio.endpoint, tcpTimeout); err != nil {
return err
}
if tcpConn, ok := dio.conn.(*net.TCPConn); ok {
tcpConn.SetWriteBuffer(tcpWriteBufSize)
tcpConn.SetNoDelay(false)
}
}
return dio.enc.resetWriter(dio.conn)
}
// Connect connects to the dnstop endpoint.
func (dio *dnstapIO) Connect() {
if err := dio.newConnect(); err != nil {
log.Printf("[ERROR] No connection to dnstap endpoint")
}
go dio.serve()
}
// Dnstap enqueues the payload for log.
func (dio *dnstapIO) Dnstap(payload tap.Dnstap) {
select {
case dio.queue <- payload:
default:
atomic.AddUint32(&dio.dropped, 1)
}
}
func (dio *dnstapIO) closeConnection() {
dio.enc.close()
if dio.conn != nil {
dio.conn.Close()
dio.conn = nil
}
}
// Close waits until the I/O routine is finished to return.
func (dio *dnstapIO) Close() {
close(dio.queue)
}
func (dio *dnstapIO) flushBuffer() {
if dio.conn == nil {
if err := dio.newConnect(); err != nil {
return
}
log.Printf("[INFO] Reconnected to dnstap")
}
if err := dio.enc.flushBuffer(); err != nil {
log.Printf("[WARN] Connection lost: %s", err)
dio.closeConnection()
if err := dio.newConnect(); err != nil {
log.Printf("[ERROR] Cannot connect to dnstap: %s", err)
} else {
log.Printf("[INFO] Reconnected to dnstap")
}
}
}
func (dio *dnstapIO) write(payload *tap.Dnstap) {
if err := dio.enc.writeMsg(payload); err != nil {
atomic.AddUint32(&dio.dropped, 1)
}
}
func (dio *dnstapIO) serve() {
timeout := time.After(flushTimeout)
for {
select {
case payload, ok := <-dio.queue:
if !ok {
dio.flushBuffer()
dio.closeConnection()
return
}
dio.write(&payload)
case <-timeout:
if dropped := atomic.SwapUint32(&dio.dropped, 0); dropped > 0 {
log.Printf("[WARN] Dropped dnstap messages: %d", dropped)
}
dio.flushBuffer()
timeout = time.After(flushTimeout)
}
}
}
Fixed dnstap panic after graceful restart (send on closed channel) (#1479)
package dnstapio
import (
"log"
"net"
"sync/atomic"
"time"
tap "github.com/dnstap/golang-dnstap"
fs "github.com/farsightsec/golang-framestream"
)
const (
tcpWriteBufSize = 1024 * 1024
tcpTimeout = 4 * time.Second
flushTimeout = 1 * time.Second
queueSize = 10000
)
type dnstapIO struct {
endpoint string
socket bool
conn net.Conn
enc *dnstapEncoder
queue chan tap.Dnstap
dropped uint32
quit chan struct{}
}
// New returns a new and initialized DnstapIO.
func New(endpoint string, socket bool) DnstapIO {
return &dnstapIO{
endpoint: endpoint,
socket: socket,
enc: newDnstapEncoder(&fs.EncoderOptions{
ContentType: []byte("protobuf:dnstap.Dnstap"),
Bidirectional: true,
}),
queue: make(chan tap.Dnstap, queueSize),
quit: make(chan struct{}),
}
}
// DnstapIO interface
type DnstapIO interface {
Connect()
Dnstap(payload tap.Dnstap)
Close()
}
func (dio *dnstapIO) newConnect() error {
var err error
if dio.socket {
if dio.conn, err = net.Dial("unix", dio.endpoint); err != nil {
return err
}
} else {
if dio.conn, err = net.DialTimeout("tcp", dio.endpoint, tcpTimeout); err != nil {
return err
}
if tcpConn, ok := dio.conn.(*net.TCPConn); ok {
tcpConn.SetWriteBuffer(tcpWriteBufSize)
tcpConn.SetNoDelay(false)
}
}
return dio.enc.resetWriter(dio.conn)
}
// Connect connects to the dnstop endpoint.
func (dio *dnstapIO) Connect() {
if err := dio.newConnect(); err != nil {
log.Printf("[ERROR] No connection to dnstap endpoint")
}
go dio.serve()
}
// Dnstap enqueues the payload for log.
func (dio *dnstapIO) Dnstap(payload tap.Dnstap) {
select {
case dio.queue <- payload:
default:
atomic.AddUint32(&dio.dropped, 1)
}
}
func (dio *dnstapIO) closeConnection() {
dio.enc.close()
if dio.conn != nil {
dio.conn.Close()
dio.conn = nil
}
}
// Close waits until the I/O routine is finished to return.
func (dio *dnstapIO) Close() {
close(dio.quit)
}
func (dio *dnstapIO) flushBuffer() {
if dio.conn == nil {
if err := dio.newConnect(); err != nil {
return
}
log.Printf("[INFO] Reconnected to dnstap")
}
if err := dio.enc.flushBuffer(); err != nil {
log.Printf("[WARN] Connection lost: %s", err)
dio.closeConnection()
if err := dio.newConnect(); err != nil {
log.Printf("[ERROR] Cannot connect to dnstap: %s", err)
} else {
log.Printf("[INFO] Reconnected to dnstap")
}
}
}
func (dio *dnstapIO) write(payload *tap.Dnstap) {
if err := dio.enc.writeMsg(payload); err != nil {
atomic.AddUint32(&dio.dropped, 1)
}
}
func (dio *dnstapIO) serve() {
timeout := time.After(flushTimeout)
for {
select {
case <-dio.quit:
dio.flushBuffer()
dio.closeConnection()
return
case payload := <-dio.queue:
dio.write(&payload)
case <-timeout:
if dropped := atomic.SwapUint32(&dio.dropped, 0); dropped > 0 {
log.Printf("[WARN] Dropped dnstap messages: %d", dropped)
}
dio.flushBuffer()
timeout = time.After(flushTimeout)
}
}
}
|
package ghch
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"strings"
"text/template"
"time"
"github.com/jessevdk/go-flags"
"github.com/octokit/go-octokit/octokit"
)
type ghOpts struct {
RepoPath string `short:"r" long:"repo" default:"." description:"git repository path"`
GitPath string `short:"g" long:"git" default:"git" description:"git path"`
From string `short:"f" long:"from" description:"git commit revision range start from"`
To string `short:"t" long:"to" description:"git commit revision range end to"`
Token string ` long:"token" description:"github token"`
Verbose bool `short:"v" long:"verbose"`
Remote string ` long:"remote" default:"origin"`
Format string `short:"F" long:"format" default:"json" description:"json or markdown"`
All bool `short:"A" long:"all" `
// Tmpl string
}
const (
exitCodeOK = iota
exitCodeParseFlagError
exitCodeErr
)
type CLI struct {
OutStream, ErrStream io.Writer
}
func (cli *CLI) Run(argv []string) int {
log.SetOutput(cli.ErrStream)
opts, err := parseArgs(argv)
if err != nil {
return exitCodeParseFlagError
}
gh := (&ghch{
remote: opts.Remote,
repoPath: opts.RepoPath,
verbose: opts.Verbose,
token: opts.Token,
}).initialize()
if opts.All {
chlog := changelog{}
prevRev := ""
for _, rev := range gh.versions() {
r := gh.getSection(rev, prevRev)
chlog.Sections = append(chlog.Sections, r)
prevRev = rev
}
r := gh.getSection("", prevRev)
chlog.Sections = append(chlog.Sections, r)
if opts.Format == "markdown" {
results := make([]string, len(chlog.Sections))
for i, v := range chlog.Sections {
results[i], _ = v.toMkdn()
}
fmt.Println(cli.OutStream, strings.Join(results, "\n"))
} else {
jsn, _ := json.MarshalIndent(r, "", " ")
fmt.Fprintln(cli.OutStream, string(jsn))
}
} else {
if opts.From == "" && opts.To == "" {
opts.From = gh.getLatestSemverTag()
}
r := gh.getSection(opts.From, opts.To)
if opts.Format == "markdown" {
str, err := r.toMkdn()
if err != nil {
log.Print(err)
} else {
fmt.Fprintln(cli.OutStream, str)
}
} else {
jsn, _ := json.MarshalIndent(r, "", " ")
fmt.Fprintln(cli.OutStream, string(jsn))
}
}
return exitCodeOK
}
func parseArgs(args []string) (*ghOpts, error) {
opts := &ghOpts{}
_, err := flags.ParseArgs(opts, args)
return opts, err
}
func (gh *ghch) getSection(from, to string) section {
r := gh.mergedPRs(from, to)
t, err := gh.getChangedAt(to)
if err != nil {
log.Print(err)
}
owner, repo := gh.ownerAndRepo()
return section{
PullRequests: r,
FromRevision: from,
ToRevision: to,
ChangedAt: t,
Owner: owner,
Repo: repo,
}
}
type changelog struct {
Sections []section `json:"sections"`
}
type section struct {
PullRequests []*octokit.PullRequest `json:"pull_requests"`
FromRevision string `json:"from_revision"`
ToRevision string `json:"to_revision"`
ChangedAt time.Time `json:"changed_at"`
Owner string `json:"owner"`
Repo string `json:"repo"`
}
var tmplStr = `{{ $ret := . }}
## [{{.ToRevision}}](https://github.com/{{.Owner}}/{{.Repo}}/releases/tag/{{.ToRevision}}) ({{.ChangedAt.Format "2006-01-02"}})
{{range .PullRequests}}
* {{.Title}} [#{{.Number}}](https://github.com/{{$ret.Owner}}/{{$ret.Repo}}/pull/{{.Number}}) [{{.User.Login}}](https://github.com/{{.User.Login}}){{end}}
`
var mdTmpl *template.Template
func init() {
var err error
mdTmpl, err = template.New("md-changelog").Parse(tmplStr)
if err != nil {
log.Fatal(err)
}
}
func (rs section) toMkdn() (string, error) {
var b bytes.Buffer
err := mdTmpl.Execute(&b, rs)
if err != nil {
return "", err
}
return b.String(), nil
}
adjust markdown output
package ghch
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"strings"
"text/template"
"time"
"github.com/jessevdk/go-flags"
"github.com/octokit/go-octokit/octokit"
)
type ghOpts struct {
RepoPath string `short:"r" long:"repo" default:"." description:"git repository path"`
GitPath string `short:"g" long:"git" default:"git" description:"git path"`
From string `short:"f" long:"from" description:"git commit revision range start from"`
To string `short:"t" long:"to" description:"git commit revision range end to"`
Token string ` long:"token" description:"github token"`
Verbose bool `short:"v" long:"verbose"`
Remote string ` long:"remote" default:"origin"`
Format string `short:"F" long:"format" default:"json" description:"json or markdown"`
All bool `short:"A" long:"all" `
// Tmpl string
}
const (
exitCodeOK = iota
exitCodeParseFlagError
exitCodeErr
)
type CLI struct {
OutStream, ErrStream io.Writer
}
func (cli *CLI) Run(argv []string) int {
log.SetOutput(cli.ErrStream)
opts, err := parseArgs(argv)
if err != nil {
return exitCodeParseFlagError
}
gh := (&ghch{
remote: opts.Remote,
repoPath: opts.RepoPath,
verbose: opts.Verbose,
token: opts.Token,
}).initialize()
if opts.All {
chlog := changelog{}
prevRev := ""
for _, rev := range gh.versions() {
r := gh.getSection(rev, prevRev)
chlog.Sections = append(chlog.Sections, r)
prevRev = rev
}
r := gh.getSection("", prevRev)
chlog.Sections = append(chlog.Sections, r)
if opts.Format == "markdown" {
results := make([]string, len(chlog.Sections))
for i, v := range chlog.Sections {
results[i], _ = v.toMkdn()
}
fmt.Println(cli.OutStream, strings.Join(results, "\n"))
} else {
jsn, _ := json.MarshalIndent(r, "", " ")
fmt.Fprintln(cli.OutStream, string(jsn))
}
} else {
if opts.From == "" && opts.To == "" {
opts.From = gh.getLatestSemverTag()
}
r := gh.getSection(opts.From, opts.To)
if opts.Format == "markdown" {
str, err := r.toMkdn()
if err != nil {
log.Print(err)
} else {
fmt.Fprintln(cli.OutStream, str)
}
} else {
jsn, _ := json.MarshalIndent(r, "", " ")
fmt.Fprintln(cli.OutStream, string(jsn))
}
}
return exitCodeOK
}
func parseArgs(args []string) (*ghOpts, error) {
opts := &ghOpts{}
_, err := flags.ParseArgs(opts, args)
return opts, err
}
func (gh *ghch) getSection(from, to string) section {
r := gh.mergedPRs(from, to)
t, err := gh.getChangedAt(to)
if err != nil {
log.Print(err)
}
owner, repo := gh.ownerAndRepo()
return section{
PullRequests: r,
FromRevision: from,
ToRevision: to,
ChangedAt: t,
Owner: owner,
Repo: repo,
}
}
type changelog struct {
Sections []section `json:"sections"`
}
type section struct {
PullRequests []*octokit.PullRequest `json:"pull_requests"`
FromRevision string `json:"from_revision"`
ToRevision string `json:"to_revision"`
ChangedAt time.Time `json:"changed_at"`
Owner string `json:"owner"`
Repo string `json:"repo"`
}
var tmplStr = `{{ $ret := . }}
## [{{.ToRevision}}](https://github.com/{{.Owner}}/{{.Repo}}/releases/tag/{{.ToRevision}}) ({{.ChangedAt.Format "2006-01-02"}})
{{range .PullRequests}}
* {{.Title}} [#{{.Number}}](https://github.com/{{$ret.Owner}}/{{$ret.Repo}}/pull/{{.Number}}) ([{{.User.Login}}](https://github.com/{{.User.Login}})){{end}}`
var mdTmpl *template.Template
func init() {
var err error
mdTmpl, err = template.New("md-changelog").Parse(tmplStr)
if err != nil {
log.Fatal(err)
}
}
func (rs section) toMkdn() (string, error) {
var b bytes.Buffer
err := mdTmpl.Execute(&b, rs)
if err != nil {
return "", err
}
return b.String(), nil
}
|
// Photobomb conducts workflow tests triggered by requests to its web server.
package main
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
func main() {
app := cli.NewApp()
app.Name = NAME
app.Version = VERSION
app.Usage = "test workflows for the Getty Images ESP API"
app.Author = "Jordan Peterson"
app.Email = "dysolution@gmail.com"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, D",
Usage: "enable debug output",
},
cli.StringFlag{
Name: "key, k",
Usage: "your ESP API key",
EnvVar: "ESP_API_KEY",
},
cli.StringFlag{
Name: "secret",
Usage: "your ESP API secret",
EnvVar: "ESP_API_SECRET",
},
cli.StringFlag{
Name: "username, u",
Usage: "your ESP username",
EnvVar: "ESP_USERNAME",
},
cli.StringFlag{
Name: "password, p",
Usage: "your ESP password",
EnvVar: "ESP_PASSWORD",
},
cli.StringFlag{
Name: "token, t",
Usage: "use an existing OAuth2 token",
EnvVar: "ESP_TOKEN",
},
cli.StringFlag{
Name: "s3-bucket, b",
Value: "oregon",
Usage: "nearest S3 bucket = [germany|ireland|oregon|singapore|tokyo|virginia]",
EnvVar: "S3_BUCKET",
},
cli.StringFlag{
Name: "format, f",
Value: "json",
Usage: "[json|ascii]",
// EnvVar: "PHOTOBOMB_OUTPUT_FORMAT",
},
cli.DurationFlag{
Name: "attack-interval, i",
Value: time.Duration(5000 * time.Millisecond),
Usage: "wait n ms between attacks",
EnvVar: "PHOTOBOMB_INTERVAL",
},
cli.DurationFlag{
Name: "warning-threshold, w",
Usage: "log WARNINGs for long response times, e.g.: [0.2s|200ms|200000μs|200000000ns]",
EnvVar: "PHOTOBOMB_WARNING_THRESHOLD",
Destination: &warningThreshold,
},
cli.BoolFlag{
Name: "quiet, q",
Usage: "suppress log output",
EnvVar: "PHOTOBOMB_QUIET",
},
}
app.Before = appBefore
app.Commands = []cli.Command{
{
Name: "config",
Usage: "print a JSON representation of the config",
Action: func(c *cli.Context) {
out, err := json.MarshalIndent(config, "", " ")
tableFlip(err)
fmt.Printf("%s\n", out)
},
},
{
Name: "example",
Usage: "print an example JSON configuration",
Action: func(c *cli.Context) {
out, err := json.MarshalIndent(ExampleConfig(), "", " ")
tableFlip(err)
fmt.Printf("%s\n", out)
},
},
{
Name: "gauge",
Usage: "display a horizontal bar gauge of response time",
Flags: []cli.Flag{
cli.IntFlag{
Name: "max-width, m",
Usage: "console width",
Value: 80,
},
cli.StringFlag{
Name: "glyph, g",
Usage: "character to use to build graph bars",
Value: "=",
},
},
Action: func(c *cli.Context) {
reporter.Gauge = true
reporter.MaxColumns = c.Int("max-width")
reporter.Glyph = c.String("glyph")[0]
log.Level = logrus.ErrorLevel
serve()
},
},
}
app.Action = func(c *cli.Context) {
serve()
}
app.Run(os.Args)
}
fix: adapt to changed name in airstrike API
// Photobomb conducts workflow tests triggered by requests to its web server.
package main
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
func main() {
app := cli.NewApp()
app.Name = NAME
app.Version = VERSION
app.Usage = "test workflows for the Getty Images ESP API"
app.Author = "Jordan Peterson"
app.Email = "dysolution@gmail.com"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, D",
Usage: "enable debug output",
},
cli.StringFlag{
Name: "key, k",
Usage: "your ESP API key",
EnvVar: "ESP_API_KEY",
},
cli.StringFlag{
Name: "secret",
Usage: "your ESP API secret",
EnvVar: "ESP_API_SECRET",
},
cli.StringFlag{
Name: "username, u",
Usage: "your ESP username",
EnvVar: "ESP_USERNAME",
},
cli.StringFlag{
Name: "password, p",
Usage: "your ESP password",
EnvVar: "ESP_PASSWORD",
},
cli.StringFlag{
Name: "token, t",
Usage: "use an existing OAuth2 token",
EnvVar: "ESP_TOKEN",
},
cli.StringFlag{
Name: "s3-bucket, b",
Value: "oregon",
Usage: "nearest S3 bucket = [germany|ireland|oregon|singapore|tokyo|virginia]",
EnvVar: "S3_BUCKET",
},
cli.StringFlag{
Name: "format, f",
Value: "json",
Usage: "[json|ascii]",
// EnvVar: "PHOTOBOMB_OUTPUT_FORMAT",
},
cli.DurationFlag{
Name: "attack-interval, i",
Value: time.Duration(5000 * time.Millisecond),
Usage: "wait n ms between attacks",
EnvVar: "PHOTOBOMB_INTERVAL",
},
cli.DurationFlag{
Name: "warning-threshold, w",
Usage: "log WARNINGs for long response times, e.g.: [0.2s|200ms|200000μs|200000000ns]",
EnvVar: "PHOTOBOMB_WARNING_THRESHOLD",
Destination: &warningThreshold,
},
cli.BoolFlag{
Name: "quiet, q",
Usage: "suppress log output",
EnvVar: "PHOTOBOMB_QUIET",
},
}
app.Before = appBefore
app.Commands = []cli.Command{
{
Name: "config",
Usage: "print a JSON representation of the config",
Action: func(c *cli.Context) {
out, err := json.MarshalIndent(config, "", " ")
tableFlip(err)
fmt.Printf("%s\n", out)
},
},
{
Name: "example",
Usage: "print an example JSON configuration",
Action: func(c *cli.Context) {
out, err := json.MarshalIndent(ExampleConfig(), "", " ")
tableFlip(err)
fmt.Printf("%s\n", out)
},
},
{
Name: "gauge",
Usage: "display a horizontal bar gauge of response time",
Flags: []cli.Flag{
cli.IntFlag{
Name: "max-width, m",
Usage: "console width",
Value: 80,
},
cli.StringFlag{
Name: "glyph, g",
Usage: "character to use to build graph bars",
Value: "=",
},
},
Action: func(c *cli.Context) {
reporter.Gauge = true
reporter.GaugeWidth = c.Int("max-width")
reporter.Glyph = c.String("glyph")[0]
log.Level = logrus.ErrorLevel
serve()
},
},
}
app.Action = func(c *cli.Context) {
serve()
}
app.Run(os.Args)
}
|
package worker
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
// include for conditional pprof HTTP server
_ "net/http/pprof"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/getsentry/raven-go"
"github.com/mihasya/go-metrics-librato"
"github.com/rcrowley/go-metrics"
"github.com/streadway/amqp"
"github.com/travis-ci/worker/backend"
"github.com/travis-ci/worker/config"
"github.com/travis-ci/worker/context"
travismetrics "github.com/travis-ci/worker/metrics"
gocontext "golang.org/x/net/context"
"gopkg.in/urfave/cli.v1"
)
// CLI is the top level of execution for the whole shebang
type CLI struct {
c *cli.Context
bootTime time.Time
ctx gocontext.Context
cancel gocontext.CancelFunc
logger *logrus.Entry
Config *config.Config
BuildScriptGenerator BuildScriptGenerator
BackendProvider backend.Provider
ProcessorPool *ProcessorPool
Canceller Canceller
JobQueue JobQueue
heartbeatErrSleep time.Duration
heartbeatSleep time.Duration
}
// NewCLI creates a new *CLI from a *cli.Context
func NewCLI(c *cli.Context) *CLI {
return &CLI{
c: c,
bootTime: time.Now().UTC(),
heartbeatSleep: 5 * time.Minute,
heartbeatErrSleep: 30 * time.Second,
}
}
// Setup runs one-time preparatory actions and returns a boolean success value
// that is used to determine if it is safe to invoke the Run func
func (i *CLI) Setup() (bool, error) {
if i.c.String("pprof-port") != "" {
// Start net/http/pprof server
go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%s", i.c.String("pprof-port")), nil)
}()
}
if i.c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := gocontext.WithCancel(gocontext.Background())
logger := context.LoggerFromContext(ctx)
i.ctx = ctx
i.cancel = cancel
i.logger = logger
logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
cfg := config.FromCLIContext(i.c)
i.Config = cfg
if i.c.Bool("echo-config") {
config.WriteEnvConfig(cfg, os.Stdout)
return false, nil
}
if i.c.Bool("list-backend-providers") {
backend.EachBackend(func(b *backend.Backend) {
fmt.Println(b.Alias)
})
return false, nil
}
logger.WithFields(logrus.Fields{
"cfg": fmt.Sprintf("%#v", cfg),
}).Debug("read config")
i.setupSentry()
i.setupMetrics()
err := i.setupJobQueueAndCanceller()
if err != nil {
logger.WithField("err", err).Error("couldn't create job queue and canceller")
return false, err
}
generator := NewBuildScriptGenerator(cfg)
logger.WithFields(logrus.Fields{
"build_script_generator": fmt.Sprintf("%#v", generator),
}).Debug("built")
i.BuildScriptGenerator = generator
provider, err := backend.NewBackendProvider(cfg.ProviderName, cfg.ProviderConfig)
if err != nil {
logger.WithField("err", err).Error("couldn't create backend provider")
return false, err
}
err = provider.Setup(ctx)
if err != nil {
logger.WithField("err", err).Error("couldn't setup backend provider")
return false, err
}
logger.WithFields(logrus.Fields{
"provider": fmt.Sprintf("%#v", provider),
}).Debug("built")
i.BackendProvider = provider
ppc := &ProcessorPoolConfig{
Hostname: cfg.Hostname,
Context: ctx,
HardTimeout: cfg.HardTimeout,
LogTimeout: cfg.LogTimeout,
ScriptUploadTimeout: cfg.ScriptUploadTimeout,
StartupTimeout: cfg.StartupTimeout,
}
pool := NewProcessorPool(ppc, i.BackendProvider, i.BuildScriptGenerator, i.Canceller)
pool.SkipShutdownOnLogTimeout = cfg.SkipShutdownOnLogTimeout
logger.WithFields(logrus.Fields{
"pool": pool,
}).Debug("built")
i.ProcessorPool = pool
return true, nil
}
// Run starts all long-running processes and blocks until the processor pool
// returns from its Run func
func (i *CLI) Run() {
i.logger.Info("starting")
i.handleStartHook()
defer i.handleStopHook()
i.logger.Info("worker started")
defer i.logger.Info("worker finished")
i.logger.Info("setting up heartbeat")
i.setupHeartbeat()
i.logger.Info("starting signal handler loop")
go i.signalHandler()
i.logger.WithFields(logrus.Fields{
"pool_size": i.Config.PoolSize,
"queue": i.JobQueue,
}).Debug("running pool")
i.ProcessorPool.Run(i.Config.PoolSize, i.JobQueue)
err := i.JobQueue.Cleanup()
if err != nil {
i.logger.WithField("err", err).Error("couldn't clean up job queue")
}
}
func (i *CLI) setupHeartbeat() {
hbURL := i.c.String("heartbeat-url")
if hbURL == "" {
return
}
hbTok := i.c.String("heartbeat-url-auth-token")
if strings.HasPrefix(hbTok, "file://") {
hbTokBytes, err := ioutil.ReadFile(strings.Split(hbTok, "://")[1])
if err != nil {
i.logger.WithField("err", err).Error("failed to read auth token from file")
} else {
hbTok = string(hbTokBytes)
}
}
i.logger.WithField("heartbeat_url", hbURL).Info("starting heartbeat loop")
go i.heartbeatHandler(hbURL, strings.TrimSpace(hbTok))
}
func (i *CLI) handleStartHook() {
hookValue := i.c.String("start-hook")
if hookValue == "" {
return
}
i.logger.WithField("start_hook", hookValue).Info("running start hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"start_hook": hookValue,
}).Error("start hook failed")
}
func (i *CLI) handleStopHook() {
hookValue := i.c.String("stop-hook")
if hookValue == "" {
return
}
i.logger.WithField("stop_hook", hookValue).Info("running stop hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"stop_hook": hookValue,
}).Error("start hook failed")
}
func (i *CLI) setupSentry() {
if i.Config.SentryDSN == "" {
return
}
levels := []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
}
if i.Config.SentryHookErrors {
levels = append(levels, logrus.ErrorLevel)
}
sentryHook, err := NewSentryHook(i.Config.SentryDSN, levels)
if err != nil {
i.logger.WithField("err", err).Error("couldn't create sentry hook")
}
logrus.AddHook(sentryHook)
err = raven.SetDSN(i.Config.SentryDSN)
if err != nil {
i.logger.WithField("err", err).Error("couldn't set DSN in raven")
}
}
func (i *CLI) setupMetrics() {
go travismetrics.ReportMemstatsMetrics()
if i.Config.LibratoEmail != "" && i.Config.LibratoToken != "" && i.Config.LibratoSource != "" {
i.logger.Info("starting librato metrics reporter")
go librato.Librato(metrics.DefaultRegistry, time.Minute,
i.Config.LibratoEmail, i.Config.LibratoToken, i.Config.LibratoSource,
[]float64{0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 1.0}, time.Millisecond)
} else if !i.c.Bool("silence-metrics") {
i.logger.Info("starting logger metrics reporter")
go metrics.Log(metrics.DefaultRegistry, time.Minute,
log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
}
}
func (i *CLI) heartbeatHandler(heartbeatURL, heartbeatAuthToken string) {
b := backoff.NewExponentialBackOff()
b.MaxInterval = 10 * time.Second
b.MaxElapsedTime = time.Minute
for {
err := backoff.Retry(func() error {
return i.heartbeatCheck(heartbeatURL, heartbeatAuthToken)
}, b)
if err != nil {
i.logger.WithFields(logrus.Fields{
"heartbeat_url": heartbeatURL,
"err": err,
}).Warn("failed to get heartbeat")
time.Sleep(i.heartbeatErrSleep)
continue
}
select {
case <-i.ctx.Done():
return
default:
time.Sleep(i.heartbeatSleep)
}
}
}
func (i *CLI) heartbeatCheck(heartbeatURL, heartbeatAuthToken string) error {
req, err := http.NewRequest("GET", heartbeatURL, nil)
if err != nil {
return err
}
if heartbeatAuthToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("token %s", heartbeatAuthToken))
}
res, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
if res.StatusCode > 299 {
return fmt.Errorf("unhappy status code %d", res.StatusCode)
}
body := map[string]string{}
err = json.NewDecoder(res.Body).Decode(&body)
if err != nil {
return err
}
if state, ok := body["state"]; ok && state == "down" {
i.logger.WithField("heartbeat_state", state).Info("starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
}
return nil
}
func (i *CLI) signalHandler() {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan,
syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1,
syscall.SIGTTIN, syscall.SIGTTOU,
syscall.SIGWINCH)
for {
select {
case sig := <-signalChan:
switch sig {
case syscall.SIGINT:
i.logger.Info("SIGINT received, starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
case syscall.SIGTERM:
i.logger.Info("SIGTERM received, shutting down immediately")
i.cancel()
case syscall.SIGTTIN:
i.logger.Info("SIGTTIN received, adding processor to pool")
i.ProcessorPool.Incr()
case syscall.SIGTTOU:
i.logger.Info("SIGTTOU received, removing processor from pool")
i.ProcessorPool.Decr()
case syscall.SIGWINCH:
i.logger.Info("SIGWINCH received, toggling graceful shutdown and pause")
i.ProcessorPool.GracefulShutdown(true)
case syscall.SIGUSR1:
i.logger.WithFields(logrus.Fields{
"version": VersionString,
"revision": RevisionString,
"generated": GeneratedString,
"boot_time": i.bootTime.String(),
"uptime": time.Since(i.bootTime),
"pool_size": i.ProcessorPool.Size(),
}).Info("SIGUSR1 received, dumping info")
i.ProcessorPool.Each(func(n int, proc *Processor) {
i.logger.WithFields(logrus.Fields{
"n": n,
"id": proc.ID,
"processed": proc.ProcessedCount,
"status": proc.CurrentStatus,
"last_job_id": proc.LastJobID,
}).Info("processor info")
})
default:
i.logger.WithField("signal", sig).Info("ignoring unknown signal")
}
default:
time.Sleep(time.Second)
}
}
}
func (i *CLI) setupJobQueueAndCanceller() error {
switch i.Config.QueueType {
case "amqp":
var amqpConn *amqp.Connection
var err error
if i.Config.AmqpTlsCert != "" || i.Config.AmqpTlsCertPath != "" {
cfg := new(tls.Config)
cfg.RootCAs = x509.NewCertPool()
if i.Config.AmqpTlsCert != "" {
cfg.RootCAs.AppendCertsFromPEM([]byte(i.Config.AmqpTlsCert))
}
if i.Config.AmqpTlsCertPath != "" {
cert, err := ioutil.ReadFile(i.Config.AmqpTlsCertPath)
if err != nil {
return err
}
cfg.RootCAs.AppendCertsFromPEM(cert)
}
amqpConn, err = amqp.DialTLS(i.Config.AmqpURI, cfg)
} else if i.Config.AmqpInsecure {
amqpConn, err = amqp.DialTLS(
i.Config.AmqpURI,
&tls.Config{InsecureSkipVerify: true},
)
} else {
amqpConn, err = amqp.Dial(i.Config.AmqpURI)
}
if err != nil {
i.logger.WithField("err", err).Error("couldn't connect to AMQP")
return err
}
go i.amqpErrorWatcher(amqpConn)
i.logger.Debug("connected to AMQP")
canceller := NewAMQPCanceller(i.ctx, amqpConn)
i.logger.WithFields(logrus.Fields{
"canceller": fmt.Sprintf("%#v", canceller),
}).Debug("built")
i.Canceller = canceller
go canceller.Run()
jobQueue, err := NewAMQPJobQueue(amqpConn, i.Config.QueueName)
if err != nil {
return err
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
i.JobQueue = jobQueue
return nil
case "file":
canceller := NewFileCanceller(i.ctx, i.Config.BaseDir)
go canceller.Run()
i.Canceller = canceller
jobQueue, err := NewFileJobQueue(i.Config.BaseDir, i.Config.QueueName, i.Config.FilePollingInterval)
if err != nil {
return err
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
i.JobQueue = jobQueue
return nil
}
return fmt.Errorf("unknown queue type %q", i.Config.QueueType)
}
func (i *CLI) amqpErrorWatcher(amqpConn *amqp.Connection) {
errChan := make(chan *amqp.Error)
errChan = amqpConn.NotifyClose(errChan)
err, ok := <-errChan
if ok {
i.logger.WithField("err", err).Error("amqp connection errored, terminating")
i.cancel()
}
}
Add a timeout for shutdown after AMQP error (#192)
* Add a timeout for shutdown after AMQP error
After an AMQP error we should be able to shut down quite fast, we just
need to wait for any running instances to shut down. If we don't shut
down quickly, that probably means that something has gotten stuck, and
we should force shut down.
* Use logger.Fatal instead of os.Exit
This forwards the log message to Sentry.
* Reduce AMQP error shutdown to one minute
All that needs to happen is instances terminating, and we have other
cleanup processes that will handle it if the worker didn't get to it.
* cli: make amqp connection error timeout panic
This causes a goroutine stack dump, making it easier to see what got stuck, as
this is likely something we want to debug if/when it happens.
package worker
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
// include for conditional pprof HTTP server
_ "net/http/pprof"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/getsentry/raven-go"
"github.com/mihasya/go-metrics-librato"
"github.com/rcrowley/go-metrics"
"github.com/streadway/amqp"
"github.com/travis-ci/worker/backend"
"github.com/travis-ci/worker/config"
"github.com/travis-ci/worker/context"
travismetrics "github.com/travis-ci/worker/metrics"
gocontext "golang.org/x/net/context"
"gopkg.in/urfave/cli.v1"
)
// CLI is the top level of execution for the whole shebang
type CLI struct {
c *cli.Context
bootTime time.Time
ctx gocontext.Context
cancel gocontext.CancelFunc
logger *logrus.Entry
Config *config.Config
BuildScriptGenerator BuildScriptGenerator
BackendProvider backend.Provider
ProcessorPool *ProcessorPool
Canceller Canceller
JobQueue JobQueue
heartbeatErrSleep time.Duration
heartbeatSleep time.Duration
}
// NewCLI creates a new *CLI from a *cli.Context
func NewCLI(c *cli.Context) *CLI {
return &CLI{
c: c,
bootTime: time.Now().UTC(),
heartbeatSleep: 5 * time.Minute,
heartbeatErrSleep: 30 * time.Second,
}
}
// Setup runs one-time preparatory actions and returns a boolean success value
// that is used to determine if it is safe to invoke the Run func
func (i *CLI) Setup() (bool, error) {
if i.c.String("pprof-port") != "" {
// Start net/http/pprof server
go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%s", i.c.String("pprof-port")), nil)
}()
}
if i.c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := gocontext.WithCancel(gocontext.Background())
logger := context.LoggerFromContext(ctx)
i.ctx = ctx
i.cancel = cancel
i.logger = logger
logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
cfg := config.FromCLIContext(i.c)
i.Config = cfg
if i.c.Bool("echo-config") {
config.WriteEnvConfig(cfg, os.Stdout)
return false, nil
}
if i.c.Bool("list-backend-providers") {
backend.EachBackend(func(b *backend.Backend) {
fmt.Println(b.Alias)
})
return false, nil
}
logger.WithFields(logrus.Fields{
"cfg": fmt.Sprintf("%#v", cfg),
}).Debug("read config")
i.setupSentry()
i.setupMetrics()
err := i.setupJobQueueAndCanceller()
if err != nil {
logger.WithField("err", err).Error("couldn't create job queue and canceller")
return false, err
}
generator := NewBuildScriptGenerator(cfg)
logger.WithFields(logrus.Fields{
"build_script_generator": fmt.Sprintf("%#v", generator),
}).Debug("built")
i.BuildScriptGenerator = generator
provider, err := backend.NewBackendProvider(cfg.ProviderName, cfg.ProviderConfig)
if err != nil {
logger.WithField("err", err).Error("couldn't create backend provider")
return false, err
}
err = provider.Setup(ctx)
if err != nil {
logger.WithField("err", err).Error("couldn't setup backend provider")
return false, err
}
logger.WithFields(logrus.Fields{
"provider": fmt.Sprintf("%#v", provider),
}).Debug("built")
i.BackendProvider = provider
ppc := &ProcessorPoolConfig{
Hostname: cfg.Hostname,
Context: ctx,
HardTimeout: cfg.HardTimeout,
LogTimeout: cfg.LogTimeout,
ScriptUploadTimeout: cfg.ScriptUploadTimeout,
StartupTimeout: cfg.StartupTimeout,
}
pool := NewProcessorPool(ppc, i.BackendProvider, i.BuildScriptGenerator, i.Canceller)
pool.SkipShutdownOnLogTimeout = cfg.SkipShutdownOnLogTimeout
logger.WithFields(logrus.Fields{
"pool": pool,
}).Debug("built")
i.ProcessorPool = pool
return true, nil
}
// Run starts all long-running processes and blocks until the processor pool
// returns from its Run func
func (i *CLI) Run() {
i.logger.Info("starting")
i.handleStartHook()
defer i.handleStopHook()
i.logger.Info("worker started")
defer i.logger.Info("worker finished")
i.logger.Info("setting up heartbeat")
i.setupHeartbeat()
i.logger.Info("starting signal handler loop")
go i.signalHandler()
i.logger.WithFields(logrus.Fields{
"pool_size": i.Config.PoolSize,
"queue": i.JobQueue,
}).Debug("running pool")
i.ProcessorPool.Run(i.Config.PoolSize, i.JobQueue)
err := i.JobQueue.Cleanup()
if err != nil {
i.logger.WithField("err", err).Error("couldn't clean up job queue")
}
}
func (i *CLI) setupHeartbeat() {
hbURL := i.c.String("heartbeat-url")
if hbURL == "" {
return
}
hbTok := i.c.String("heartbeat-url-auth-token")
if strings.HasPrefix(hbTok, "file://") {
hbTokBytes, err := ioutil.ReadFile(strings.Split(hbTok, "://")[1])
if err != nil {
i.logger.WithField("err", err).Error("failed to read auth token from file")
} else {
hbTok = string(hbTokBytes)
}
}
i.logger.WithField("heartbeat_url", hbURL).Info("starting heartbeat loop")
go i.heartbeatHandler(hbURL, strings.TrimSpace(hbTok))
}
func (i *CLI) handleStartHook() {
hookValue := i.c.String("start-hook")
if hookValue == "" {
return
}
i.logger.WithField("start_hook", hookValue).Info("running start hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"start_hook": hookValue,
}).Error("start hook failed")
}
func (i *CLI) handleStopHook() {
hookValue := i.c.String("stop-hook")
if hookValue == "" {
return
}
i.logger.WithField("stop_hook", hookValue).Info("running stop hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"stop_hook": hookValue,
}).Error("start hook failed")
}
func (i *CLI) setupSentry() {
if i.Config.SentryDSN == "" {
return
}
levels := []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
}
if i.Config.SentryHookErrors {
levels = append(levels, logrus.ErrorLevel)
}
sentryHook, err := NewSentryHook(i.Config.SentryDSN, levels)
if err != nil {
i.logger.WithField("err", err).Error("couldn't create sentry hook")
}
logrus.AddHook(sentryHook)
err = raven.SetDSN(i.Config.SentryDSN)
if err != nil {
i.logger.WithField("err", err).Error("couldn't set DSN in raven")
}
}
func (i *CLI) setupMetrics() {
go travismetrics.ReportMemstatsMetrics()
if i.Config.LibratoEmail != "" && i.Config.LibratoToken != "" && i.Config.LibratoSource != "" {
i.logger.Info("starting librato metrics reporter")
go librato.Librato(metrics.DefaultRegistry, time.Minute,
i.Config.LibratoEmail, i.Config.LibratoToken, i.Config.LibratoSource,
[]float64{0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 1.0}, time.Millisecond)
} else if !i.c.Bool("silence-metrics") {
i.logger.Info("starting logger metrics reporter")
go metrics.Log(metrics.DefaultRegistry, time.Minute,
log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
}
}
func (i *CLI) heartbeatHandler(heartbeatURL, heartbeatAuthToken string) {
b := backoff.NewExponentialBackOff()
b.MaxInterval = 10 * time.Second
b.MaxElapsedTime = time.Minute
for {
err := backoff.Retry(func() error {
return i.heartbeatCheck(heartbeatURL, heartbeatAuthToken)
}, b)
if err != nil {
i.logger.WithFields(logrus.Fields{
"heartbeat_url": heartbeatURL,
"err": err,
}).Warn("failed to get heartbeat")
time.Sleep(i.heartbeatErrSleep)
continue
}
select {
case <-i.ctx.Done():
return
default:
time.Sleep(i.heartbeatSleep)
}
}
}
func (i *CLI) heartbeatCheck(heartbeatURL, heartbeatAuthToken string) error {
req, err := http.NewRequest("GET", heartbeatURL, nil)
if err != nil {
return err
}
if heartbeatAuthToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("token %s", heartbeatAuthToken))
}
res, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
if res.StatusCode > 299 {
return fmt.Errorf("unhappy status code %d", res.StatusCode)
}
body := map[string]string{}
err = json.NewDecoder(res.Body).Decode(&body)
if err != nil {
return err
}
if state, ok := body["state"]; ok && state == "down" {
i.logger.WithField("heartbeat_state", state).Info("starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
}
return nil
}
func (i *CLI) signalHandler() {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan,
syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1,
syscall.SIGTTIN, syscall.SIGTTOU,
syscall.SIGWINCH)
for {
select {
case sig := <-signalChan:
switch sig {
case syscall.SIGINT:
i.logger.Info("SIGINT received, starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
case syscall.SIGTERM:
i.logger.Info("SIGTERM received, shutting down immediately")
i.cancel()
case syscall.SIGTTIN:
i.logger.Info("SIGTTIN received, adding processor to pool")
i.ProcessorPool.Incr()
case syscall.SIGTTOU:
i.logger.Info("SIGTTOU received, removing processor from pool")
i.ProcessorPool.Decr()
case syscall.SIGWINCH:
i.logger.Info("SIGWINCH received, toggling graceful shutdown and pause")
i.ProcessorPool.GracefulShutdown(true)
case syscall.SIGUSR1:
i.logger.WithFields(logrus.Fields{
"version": VersionString,
"revision": RevisionString,
"generated": GeneratedString,
"boot_time": i.bootTime.String(),
"uptime": time.Since(i.bootTime),
"pool_size": i.ProcessorPool.Size(),
}).Info("SIGUSR1 received, dumping info")
i.ProcessorPool.Each(func(n int, proc *Processor) {
i.logger.WithFields(logrus.Fields{
"n": n,
"id": proc.ID,
"processed": proc.ProcessedCount,
"status": proc.CurrentStatus,
"last_job_id": proc.LastJobID,
}).Info("processor info")
})
default:
i.logger.WithField("signal", sig).Info("ignoring unknown signal")
}
default:
time.Sleep(time.Second)
}
}
}
func (i *CLI) setupJobQueueAndCanceller() error {
switch i.Config.QueueType {
case "amqp":
var amqpConn *amqp.Connection
var err error
if i.Config.AmqpTlsCert != "" || i.Config.AmqpTlsCertPath != "" {
cfg := new(tls.Config)
cfg.RootCAs = x509.NewCertPool()
if i.Config.AmqpTlsCert != "" {
cfg.RootCAs.AppendCertsFromPEM([]byte(i.Config.AmqpTlsCert))
}
if i.Config.AmqpTlsCertPath != "" {
cert, err := ioutil.ReadFile(i.Config.AmqpTlsCertPath)
if err != nil {
return err
}
cfg.RootCAs.AppendCertsFromPEM(cert)
}
amqpConn, err = amqp.DialTLS(i.Config.AmqpURI, cfg)
} else if i.Config.AmqpInsecure {
amqpConn, err = amqp.DialTLS(
i.Config.AmqpURI,
&tls.Config{InsecureSkipVerify: true},
)
} else {
amqpConn, err = amqp.Dial(i.Config.AmqpURI)
}
if err != nil {
i.logger.WithField("err", err).Error("couldn't connect to AMQP")
return err
}
go i.amqpErrorWatcher(amqpConn)
i.logger.Debug("connected to AMQP")
canceller := NewAMQPCanceller(i.ctx, amqpConn)
i.logger.WithFields(logrus.Fields{
"canceller": fmt.Sprintf("%#v", canceller),
}).Debug("built")
i.Canceller = canceller
go canceller.Run()
jobQueue, err := NewAMQPJobQueue(amqpConn, i.Config.QueueName)
if err != nil {
return err
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
i.JobQueue = jobQueue
return nil
case "file":
canceller := NewFileCanceller(i.ctx, i.Config.BaseDir)
go canceller.Run()
i.Canceller = canceller
jobQueue, err := NewFileJobQueue(i.Config.BaseDir, i.Config.QueueName, i.Config.FilePollingInterval)
if err != nil {
return err
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
i.JobQueue = jobQueue
return nil
}
return fmt.Errorf("unknown queue type %q", i.Config.QueueType)
}
func (i *CLI) amqpErrorWatcher(amqpConn *amqp.Connection) {
errChan := make(chan *amqp.Error)
errChan = amqpConn.NotifyClose(errChan)
err, ok := <-errChan
if ok {
i.logger.WithField("err", err).Error("amqp connection errored, terminating")
i.cancel()
time.Sleep(time.Minute)
i.logger.Panic("timed out waiting for shutdown after amqp connection error")
}
}
|
package cli
type App struct {
Name string
Usage string
Action Action
Commands []Command
}
type Command struct {
Name string
ShortName string
Usage string
Description string
Action Action
}
type Action func(name string)
func (a App) Run(command string) {
for _, c := range a.Commands {
if c.Name == command {
c.Action(command)
}
}
}
Accepting Arg string in App.Run
package cli
type App struct {
Name string
Usage string
Action Action
Commands []Command
}
type Command struct {
Name string
ShortName string
Usage string
Description string
Action Action
}
type Action func(name string)
func (a App) Run(args []string) {
command := args[1]
for _, c := range a.Commands {
if c.Name == command {
c.Action(command)
}
}
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"text/template"
"time"
"github.com/hashicorp/logutils"
"github.com/rakutentech/go-nozzle"
"golang.org/x/net/context"
)
//go:generate ./bin/kafka-firehose-nozzle -gen-godoc
// Exit codes are int values that represent an exit code for a particular error.
const (
ExitCodeOK int = 0
ExitCodeError int = 1 + iota
)
const (
// DefaultCfgPath is default config file path
DefaultCfgPath = "example/kafka-firehose-nozzle.toml"
// DefaultUAATimeout is default timeout for requesting
// auth token to UAA server.
DefaultUAATimeout = 20 * time.Second
// DefaultSubscriptionID is default subscription ID for
// loggregagor firehose
DefaultSubscriptionID = "debug-kafka-firehose-nozzle"
)
const (
EnvUAAPassword = "UAA_PASSWORD"
)
// godocFile is file name for godoc
const (
godocFile = "doc.go"
)
// CLI is the command line object
type CLI struct {
// outStream and errStream are the stdout and stderr
// to write message from the CLI.
outStream, errStream io.Writer
}
// Run invokes the CLI with the given arguments.
func (cli *CLI) Run(args []string) int {
var (
cfgPath string
subscriptionID string
logLevel string
worker int
varz bool
debug bool
version bool
genGodoc bool
)
// Define option flag parsing
flags := flag.NewFlagSet(Name, flag.ContinueOnError)
flags.SetOutput(cli.errStream)
flags.Usage = func() {
fmt.Fprintf(cli.errStream, helpText)
}
flags.StringVar(&cfgPath, "config", DefaultCfgPath, "")
flags.StringVar(&subscriptionID, "subscription", DefaultSubscriptionID, "")
flags.StringVar(&logLevel, "log-level", "INFO", "")
flags.IntVar(&worker, "worker", runtime.NumCPU(), "")
flags.BoolVar(&varz, "varz-server", false, "")
flags.BoolVar(&debug, "debug", false, "")
flags.BoolVar(&version, "version", false, "")
// -gen-godoc flag is only for developers of this nozzle.
// It generates godoc.
flags.BoolVar(&genGodoc, "gen-godoc", false, "")
// Parse commandline flag
if err := flags.Parse(args[1:]); err != nil {
return ExitCodeError
}
if genGodoc {
if err := godoc(); err != nil {
fmt.Fprintf(cli.errStream, "Faild to generate godoc %s\n", err)
return ExitCodeError
}
fmt.Fprintf(cli.outStream, "Successfully generated godoc\n")
return ExitCodeOK
}
// Show version
if version {
fmt.Fprintf(cli.errStream, "%s version %s\n", Name, Version)
return ExitCodeOK
}
// Setup logger with level Filtering
logger := log.New(&logutils.LevelFilter{
Levels: []logutils.LogLevel{"DEBUG", "INFO", "ERROR"},
MinLevel: (logutils.LogLevel)(strings.ToUpper(logLevel)),
Writer: cli.outStream,
}, "", log.LstdFlags)
logger.Printf("[INFO] LogLevel: %s", logLevel)
// Load configuration
config, err := LoadConfig(cfgPath)
if err != nil {
logger.Printf("[ERROR] Failed to load configuration file: %s", err)
return ExitCodeError
}
logger.Printf("[DEBUG] %#v", config)
if config.SubscriptionID == "" {
config.SubscriptionID = subscriptionID
}
// Start varz server.
// This is for running this app as PaaS application (need to accept http request)
if varz {
varzServer := &VarzServer{Logger: logger}
go varzServer.Start()
}
// Setup option struct for nozzle consumer.
nozzleConfig := &nozzle.Config{
DopplerAddr: config.CF.DopplerAddr,
Token: config.CF.Token,
UaaAddr: config.CF.UAAAddr,
Username: config.CF.Username,
Password: config.CF.Password,
SubscriptionID: config.SubscriptionID,
Logger: logger,
}
// Setup default nozzle consumer.
nozzleConsumer, err := nozzle.NewDefaultConsumer(nozzleConfig)
if err != nil {
logger.Printf("[ERROR] Failed to construct nozzle consumer: %s", err)
return ExitCodeError
}
// Setup nozzle producer
var producer NozzleProducer
if debug {
logger.Printf("[INFO] Use LogProducer")
producer = NewLogProducer(logger)
} else {
logger.Printf("[INFO] Use KafkaProducer")
var err error
producer, err = NewKafkaProducer(logger, config)
if err != nil {
logger.Printf("[ERROR] Failed to construct kafka producer: %s", err)
return ExitCodeError
}
}
// Create a ctx for cancelation signal across the goroutined producers.
ctx, cancel := context.WithCancel(context.Background())
// Handle nozzle consumer error and slow consumer alerts
go func() {
for {
select {
case err := <-nozzleConsumer.Errors():
if err == nil {
continue
}
// Connection retry is done on noaa side (5 times)
logger.Printf("[ERROR] Received error from nozzle consumer: %s", err)
case err := <-nozzleConsumer.Detects():
logger.Printf("[ERROR] Detect slowConsumerAlert: %s", err)
}
}
}()
// Handle producer error
go func() {
// cancel all other producer goroutine
defer cancel()
for err := range producer.Errors() {
if err == nil {
continue
}
logger.Printf("[ERROR] Faield to produce logs: %s", err)
return
}
}()
// Handle signal of interrupting to stop process safely.
signalCh := make(chan os.Signal)
signal.Notify(signalCh, os.Interrupt, os.Kill)
go func() {
<-signalCh
logger.Println("[INFO] Interrupt Received: cancel all producers")
cancel()
}()
// Start multiple produce worker processes.
// nozzle consumer events will be distributed to each producer.
// And each producer produces message to kafka.
//
// Process will be blocked until all producer process finishes each jobs.
var wg sync.WaitGroup
logger.Printf("[INFO] Start %d producer process", worker)
for i := 0; i < worker; i++ {
wg.Add(1)
go func() {
defer wg.Done()
producer.Produce(ctx, nozzleConsumer.Events())
}()
}
// Wait until all producer process is done.
wg.Wait()
// Attempt to close all the things. Not returns soon even if
// error is happend while closing.
isError := false
// Close nozzle consumer
logger.Printf("[INFO] Closing nozzle cosumer")
if err := nozzleConsumer.Close(); err != nil {
logger.Printf("[ERROR] Failed to close nozzle consumer process: %s", err)
isError = true
}
logger.Printf("[INFO] Closing producer")
if err := producer.Close(); err != nil {
logger.Printf("[ERROR] Failed to close producer: %s", err)
isError = true
}
logger.Printf("[INFO] Finished kafka firehose nozzle")
if isError {
return ExitCodeError
}
return ExitCodeOK
}
func godoc() error {
f, err := os.Create(godocFile)
if err != nil {
return err
}
defer f.Close()
tmpl, err := template.New("godoc").Parse(godocTmpl)
if err != nil {
return err
}
return tmpl.Execute(f, helpText)
}
var godocTmpl = `// THIS FILE IS GENERATED BY GO GENERATE.
// DO NOT EDIT THIS FILE BY HAND.
/*
{{ . }}
*/
package main
`
// helpText is used for flag usage messages.
var helpText = `kafka-firehose-nozzle is Cloud Foundry nozzle which forwards logs from
the loggeregagor firehose to Apache kafka (http://kafka.apache.org/).
Usage:
kafak-firehose-nozzle [options]
Available options:
-config PATH Path to configuraiton file
-worker NUM Number of producer worker. Default is number of CPU core
-subscription ID Subscription ID for firehose. Default is 'kafka-firehose-nozzle'
-debug Output event to stdout instead of producing message to kafka
-log-level LEVEL Log level. Default level is INFO (DEBUG|INFO|ERROR)
`
Allow to set username/password from option
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"text/template"
"time"
"github.com/hashicorp/logutils"
"github.com/rakutentech/go-nozzle"
"golang.org/x/net/context"
)
//go:generate ./bin/kafka-firehose-nozzle -gen-godoc
// Exit codes are int values that represent an exit code for a particular error.
const (
ExitCodeOK int = 0
ExitCodeError int = 1 + iota
)
const (
// DefaultCfgPath is default config file path
DefaultCfgPath = "example/kafka-firehose-nozzle.toml"
// DefaultUsername to grant access token for firehose
DefaultUsername = "admin"
// DefaultUAATimeout is default timeout for requesting
// auth token to UAA server.
DefaultUAATimeout = 20 * time.Second
// DefaultSubscriptionID is default subscription ID for
// loggregagor firehose.
DefaultSubscriptionID = "debug-kafka-firehose-nozzle"
)
const (
EnvPassword = "UAA_PASSWORD"
)
// godocFile is file name for godoc
const (
godocFile = "doc.go"
)
// CLI is the command line object
type CLI struct {
// outStream and errStream are the stdout and stderr
// to write message from the CLI.
outStream, errStream io.Writer
}
// Run invokes the CLI with the given arguments.
func (cli *CLI) Run(args []string) int {
var (
cfgPath string
username string
password string
subscriptionID string
logLevel string
worker int
varz bool
debug bool
version bool
genGodoc bool
)
// Define option flag parsing
flags := flag.NewFlagSet(Name, flag.ContinueOnError)
flags.SetOutput(cli.errStream)
flags.Usage = func() {
fmt.Fprintf(cli.errStream, helpText)
}
flags.StringVar(&cfgPath, "config", DefaultCfgPath, "")
flags.StringVar(&subscriptionID, "subscription", "", "")
flags.StringVar(&username, "username", "", "")
flags.StringVar(&password, "password", os.Getenv(EnvPassword), "")
flags.StringVar(&logLevel, "log-level", "INFO", "")
flags.IntVar(&worker, "worker", runtime.NumCPU(), "")
flags.BoolVar(&varz, "varz-server", false, "")
flags.BoolVar(&debug, "debug", false, "")
flags.BoolVar(&version, "version", false, "")
// -gen-godoc flag is only for developers of this nozzle.
// It generates godoc.
flags.BoolVar(&genGodoc, "gen-godoc", false, "")
// Parse commandline flag
if err := flags.Parse(args[1:]); err != nil {
return ExitCodeError
}
// Generate godoc
if genGodoc {
if err := godoc(); err != nil {
fmt.Fprintf(cli.errStream, "Faild to generate godoc %s\n", err)
return ExitCodeError
}
fmt.Fprintf(cli.outStream, "Successfully generated godoc\n")
return ExitCodeOK
}
// Show version
if version {
fmt.Fprintf(cli.errStream, "%s version %s\n", Name, Version)
return ExitCodeOK
}
// Setup logger with level Filtering
logger := log.New(&logutils.LevelFilter{
Levels: []logutils.LogLevel{"DEBUG", "INFO", "ERROR"},
MinLevel: (logutils.LogLevel)(strings.ToUpper(logLevel)),
Writer: cli.outStream,
}, "", log.LstdFlags)
logger.Printf("[INFO] LogLevel: %s", logLevel)
// Load configuration
config, err := LoadConfig(cfgPath)
if err != nil {
logger.Printf("[ERROR] Failed to load configuration file: %s", err)
return ExitCodeError
}
logger.Printf("[DEBUG] %#v", config)
if subscriptionID != "" {
config.SubscriptionID = subscriptionID
} else if config.SubscriptionID != "" {
config.SubscriptionID = DefaultSubscriptionID
}
if username != "" {
config.CF.Username = username
} else if config.CF.Username != "" {
config.CF.Username = DefaultUsername
}
if password != "" {
config.CF.Password = password
}
// Start varz server.
// This is for running this app as PaaS application (need to accept http request)
if varz {
varzServer := &VarzServer{Logger: logger}
go varzServer.Start()
}
// Setup option struct for nozzle consumer.
nozzleConfig := &nozzle.Config{
DopplerAddr: config.CF.DopplerAddr,
Token: config.CF.Token,
UaaAddr: config.CF.UAAAddr,
Username: config.CF.Username,
Password: config.CF.Password,
SubscriptionID: config.SubscriptionID,
Logger: logger,
}
// Setup default nozzle consumer.
nozzleConsumer, err := nozzle.NewDefaultConsumer(nozzleConfig)
if err != nil {
logger.Printf("[ERROR] Failed to construct nozzle consumer: %s", err)
return ExitCodeError
}
// Setup nozzle producer
var producer NozzleProducer
if debug {
logger.Printf("[INFO] Use LogProducer")
producer = NewLogProducer(logger)
} else {
logger.Printf("[INFO] Use KafkaProducer")
var err error
producer, err = NewKafkaProducer(logger, config)
if err != nil {
logger.Printf("[ERROR] Failed to construct kafka producer: %s", err)
return ExitCodeError
}
}
// Create a ctx for cancelation signal across the goroutined producers.
ctx, cancel := context.WithCancel(context.Background())
// Handle nozzle consumer error and slow consumer alerts
go func() {
for {
select {
case err := <-nozzleConsumer.Errors():
if err == nil {
continue
}
// Connection retry is done on noaa side (5 times)
logger.Printf("[ERROR] Received error from nozzle consumer: %s", err)
case err := <-nozzleConsumer.Detects():
logger.Printf("[ERROR] Detect slowConsumerAlert: %s", err)
}
}
}()
// Handle producer error
go func() {
// cancel all other producer goroutine
defer cancel()
for err := range producer.Errors() {
if err == nil {
continue
}
logger.Printf("[ERROR] Faield to produce logs: %s", err)
return
}
}()
// Handle signal of interrupting to stop process safely.
signalCh := make(chan os.Signal)
signal.Notify(signalCh, os.Interrupt, os.Kill)
go func() {
<-signalCh
logger.Println("[INFO] Interrupt Received: cancel all producers")
cancel()
}()
// Start multiple produce worker processes.
// nozzle consumer events will be distributed to each producer.
// And each producer produces message to kafka.
//
// Process will be blocked until all producer process finishes each jobs.
var wg sync.WaitGroup
logger.Printf("[INFO] Start %d producer process", worker)
for i := 0; i < worker; i++ {
wg.Add(1)
go func() {
defer wg.Done()
producer.Produce(ctx, nozzleConsumer.Events())
}()
}
// Wait until all producer process is done.
wg.Wait()
// Attempt to close all the things. Not returns soon even if
// error is happend while closing.
isError := false
// Close nozzle consumer
logger.Printf("[INFO] Closing nozzle cosumer")
if err := nozzleConsumer.Close(); err != nil {
logger.Printf("[ERROR] Failed to close nozzle consumer process: %s", err)
isError = true
}
logger.Printf("[INFO] Closing producer")
if err := producer.Close(); err != nil {
logger.Printf("[ERROR] Failed to close producer: %s", err)
isError = true
}
logger.Printf("[INFO] Finished kafka firehose nozzle")
if isError {
return ExitCodeError
}
return ExitCodeOK
}
func godoc() error {
f, err := os.Create(godocFile)
if err != nil {
return err
}
defer f.Close()
tmpl, err := template.New("godoc").Parse(godocTmpl)
if err != nil {
return err
}
return tmpl.Execute(f, helpText)
}
var godocTmpl = `// THIS FILE IS GENERATED BY GO GENERATE.
// DO NOT EDIT THIS FILE BY HAND.
/*
{{ . }}
*/
package main
`
// helpText is used for flag usage messages.
var helpText = `kafka-firehose-nozzle is Cloud Foundry nozzle which forwards logs from
the loggeregagor firehose to Apache kafka (http://kafka.apache.org/).
Usage:
kafak-firehose-nozzle [options]
Available options:
-config PATH Path to configuraiton file
-username NAME username to grant access token to connect firehose
-password PASS password to grant access token to connect firehose
-worker NUM Number of producer worker. Default is number of CPU core
-subscription ID Subscription ID for firehose. Default is 'kafka-firehose-nozzle'
-debug Output event to stdout instead of producing message to kafka
-log-level LEVEL Log level. Default level is INFO (DEBUG|INFO|ERROR)
`
|
package buildah
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
is "github.com/containers/image/storage"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/projectatomic/buildah/docker"
)
const (
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
// suitable for specifying as a value of the PreferredManifestType
// member of a CommitOptions structure. It is also the default.
OCIv1ImageManifest = v1.MediaTypeImageManifest
// Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
// manifest, suitable for specifying as a value of the
// PreferredManifestType member of a CommitOptions structure.
Dockerv2ImageManifest = docker.V2S2MediaTypeManifest
)
type containerImageRef struct {
store storage.Store
container *storage.Container
compression archive.Compression
name reference.Named
oconfig []byte
dconfig []byte
createdBy string
annotations map[string]string
preferredManifestType string
}
type containerImageSource struct {
path string
ref *containerImageRef
store storage.Store
container *storage.Container
compression archive.Compression
config []byte
configDigest digest.Digest
manifest []byte
manifestType string
}
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
src, err := i.NewImageSource(sc, nil)
if err != nil {
return nil, err
}
return image.FromSource(src)
}
func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
manifestType := ""
// If we have a preferred format, and it's in the acceptable list, select that one.
for _, mt := range manifestTypes {
if mt == i.preferredManifestType {
manifestType = mt
break
}
}
// Look for a supported format in the acceptable list.
if manifestType == "" {
for _, mt := range manifestTypes {
if mt == v1.MediaTypeImageManifest || mt == docker.V2S2MediaTypeManifest {
manifestType = mt
break
}
}
}
// If we don't support any of the passed-in formats, try to select our preferred one.
if manifestType == "" {
manifestType = i.preferredManifestType
}
// If it's not a format we support, return an error.
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest)
}
layers := []string{}
layerID := i.container.LayerID
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
}
for layer != nil {
layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent
if layerID == "" {
err = nil
break
}
layer, err = i.store.Layer(layerID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
}
}
logrus.Debugf("layer list: %q", layers)
created := time.Now().UTC()
path, err := ioutil.TempDir(os.TempDir(), Package)
if err != nil {
return nil, err
}
logrus.Debugf("using %q to hold temporary data", path)
defer func() {
if src == nil {
err2 := os.RemoveAll(path)
if err2 != nil {
logrus.Errorf("error removing %q: %v", path, err)
}
}
}()
oimage := v1.Image{}
err = json.Unmarshal(i.oconfig, &oimage)
if err != nil {
return nil, err
}
dimage := docker.V2Image{}
err = json.Unmarshal(i.dconfig, &dimage)
if err != nil {
return nil, err
}
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []string{}
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
for _, layerID := range layers {
rc, err := i.store.Diff("", layerID)
if err != nil {
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
}
defer rc.Close()
uncompressed, err := archive.DecompressStream(rc)
if err != nil {
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
}
defer uncompressed.Close()
srcHasher := digest.Canonical.Digester()
reader := io.TeeReader(uncompressed, srcHasher.Hash())
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID)
}
destHasher := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(layerFile)
multiWriter := io.MultiWriter(counter, destHasher.Hash())
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing layer %q with gzip", layerID)
case archive.Bzip2:
logrus.Debugf("compressing layer %q with bzip2", layerID)
default:
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
}
}
compressor, err := archive.CompressStream(multiWriter, i.compression)
if err != nil {
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
}
size, err := io.Copy(compressor, reader)
if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
}
compressor.Close()
layerFile.Close()
if i.compression == archive.Uncompressed {
if size != counter.Count {
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count)
}
} else {
size = counter.Count
}
logrus.Debugf("layer %q size is %d bytes", layerID, size)
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
}
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: destHasher.Digest(),
Size: size,
}
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
dlayerDescriptor := docker.V2S2Descriptor{
MediaType: dmediaType,
Digest: destHasher.Digest(),
Size: size,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest().String())
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
}
onews := v1.History{
Created: created,
CreatedBy: i.createdBy,
Author: oimage.Author,
EmptyLayer: false,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: created,
CreatedBy: i.createdBy,
Author: dimage.Author,
EmptyLayer: false,
}
dimage.History = append(dimage.History, dnews)
oconfig, err := json.Marshal(&oimage)
if err != nil {
return nil, err
}
logrus.Debugf("OCIv1 config = %s", oconfig)
i.oconfig = oconfig
omanifest.Config.Digest = digest.FromBytes(oconfig)
omanifest.Config.Size = int64(len(oconfig))
omanifest.Config.MediaType = v1.MediaTypeImageConfig
omanifestbytes, err := json.Marshal(&omanifest)
if err != nil {
return nil, err
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
dconfig, err := json.Marshal(&dimage)
if err != nil {
return nil, err
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
i.dconfig = dconfig
dmanifest.Config.Digest = digest.FromBytes(dconfig)
dmanifest.Config.Size = int64(len(dconfig))
dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig
dmanifestbytes, err := json.Marshal(&dmanifest)
if err != nil {
return nil, err
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
var config []byte
var manifest []byte
switch manifestType {
case v1.MediaTypeImageManifest:
manifest = omanifestbytes
config = i.oconfig
case docker.V2S2MediaTypeManifest:
manifest = dmanifestbytes
config = i.dconfig
default:
panic("unreachable code: unsupported manifest type")
}
src = &containerImageSource{
path: path,
ref: i,
store: i.store,
container: i.container,
compression: i.compression,
manifest: manifest,
manifestType: manifestType,
config: config,
configDigest: digest.FromBytes(config),
}
return src, nil
}
func (i *containerImageRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) {
return nil, errors.Errorf("can't write to a container")
}
func (i *containerImageRef) DockerReference() reference.Named {
return i.name
}
func (i *containerImageRef) StringWithinTransport() string {
if len(i.container.Names) > 0 {
return i.container.Names[0]
}
return ""
}
func (i *containerImageRef) DeleteImage(*types.SystemContext) error {
// we were never here
return nil
}
func (i *containerImageRef) PolicyConfigurationIdentity() string {
return ""
}
func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
return nil
}
func (i *containerImageRef) Transport() types.ImageTransport {
return is.Transport
}
func (i *containerImageSource) Close() error {
err := os.RemoveAll(i.path)
if err != nil {
logrus.Errorf("error removing %q: %v", i.path, err)
}
return err
}
func (i *containerImageSource) Reference() types.ImageReference {
return i.ref
}
func (i *containerImageSource) GetSignatures() ([][]byte, error) {
return nil, nil
}
func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return []byte{}, "", errors.Errorf("TODO")
}
func (i *containerImageSource) GetManifest() ([]byte, string, error) {
return i.manifest, i.manifestType, nil
}
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
closer := func() error {
logrus.Debugf("finished reading config")
return nil
}
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
}
layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600)
if err != nil {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
return nil, -1, err
}
size = -1
st, err := layerFile.Stat()
if err != nil {
logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err)
} else {
size = st.Size()
}
logrus.Debugf("reading layer %q", blob.Digest.String())
closer := func() error {
layerFile.Close()
logrus.Debugf("finished reading layer %q", blob.Digest.String())
return nil
}
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
func (b *Builder) makeContainerImageRef(manifestType string, compress archive.Compression) (types.ImageReference, error) {
var name reference.Named
if manifestType == "" {
manifestType = OCIv1ImageManifest
}
container, err := b.store.Container(b.ContainerID)
if err != nil {
return nil, err
}
if len(container.Names) > 0 {
name, err = reference.ParseNamed(container.Names[0])
if err != nil {
name = nil
}
}
oconfig, err := json.Marshal(&b.OCIv1)
if err != nil {
return nil, err
}
dconfig, err := json.Marshal(&b.Docker)
if err != nil {
return nil, err
}
ref := &containerImageRef{
store: b.store,
container: container,
compression: compress,
name: name,
oconfig: oconfig,
dconfig: dconfig,
createdBy: b.CreatedBy(),
annotations: b.Annotations(),
preferredManifestType: manifestType,
}
return ref, nil
}
Make histories consistent when synthesizing images
Ensure that "created" timestamps in Images and ImageSources derived from
an image reference that we synthesize don't vary, since we want to
compute the exact same image configuration and manifest every time we
read the image using that reference.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Closes: #141
Approved by: rhatdan
package buildah
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
is "github.com/containers/image/storage"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/projectatomic/buildah/docker"
)
const (
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
// suitable for specifying as a value of the PreferredManifestType
// member of a CommitOptions structure. It is also the default.
OCIv1ImageManifest = v1.MediaTypeImageManifest
// Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
// manifest, suitable for specifying as a value of the
// PreferredManifestType member of a CommitOptions structure.
Dockerv2ImageManifest = docker.V2S2MediaTypeManifest
)
type containerImageRef struct {
store storage.Store
container *storage.Container
compression archive.Compression
name reference.Named
oconfig []byte
dconfig []byte
created time.Time
createdBy string
annotations map[string]string
preferredManifestType string
}
type containerImageSource struct {
path string
ref *containerImageRef
store storage.Store
container *storage.Container
compression archive.Compression
config []byte
configDigest digest.Digest
manifest []byte
manifestType string
}
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
src, err := i.NewImageSource(sc, nil)
if err != nil {
return nil, err
}
return image.FromSource(src)
}
func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
manifestType := ""
// If we have a preferred format, and it's in the acceptable list, select that one.
for _, mt := range manifestTypes {
if mt == i.preferredManifestType {
manifestType = mt
break
}
}
// Look for a supported format in the acceptable list.
if manifestType == "" {
for _, mt := range manifestTypes {
if mt == v1.MediaTypeImageManifest || mt == docker.V2S2MediaTypeManifest {
manifestType = mt
break
}
}
}
// If we don't support any of the passed-in formats, try to select our preferred one.
if manifestType == "" {
manifestType = i.preferredManifestType
}
// If it's not a format we support, return an error.
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest)
}
// Start building the list of layers using the read-write layer.
layers := []string{}
layerID := i.container.LayerID
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
}
// Walk the list of parent layers, prepending each as we go.
for layer != nil {
layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent
if layerID == "" {
err = nil
break
}
layer, err = i.store.Layer(layerID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
}
}
logrus.Debugf("layer list: %q", layers)
// Make a temporary directory to hold blobs.
path, err := ioutil.TempDir(os.TempDir(), Package)
if err != nil {
return nil, err
}
logrus.Debugf("using %q to hold temporary data", path)
defer func() {
if src == nil {
err2 := os.RemoveAll(path)
if err2 != nil {
logrus.Errorf("error removing %q: %v", path, err)
}
}
}()
// Build fresh copies of the configurations so that we don't mess with the values in the Builder
// object itself.
oimage := v1.Image{}
err = json.Unmarshal(i.oconfig, &oimage)
if err != nil {
return nil, err
}
dimage := docker.V2Image{}
err = json.Unmarshal(i.dconfig, &dimage)
if err != nil {
return nil, err
}
// Start building manifests.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []string{}
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers {
// Start reading the layer.
rc, err := i.store.Diff("", layerID)
if err != nil {
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
}
defer rc.Close()
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
// differences, the result may not match the digest the blob had when it was originally imported,
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
// correct.
uncompressed, err := archive.DecompressStream(rc)
if err != nil {
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
}
defer uncompressed.Close()
srcHasher := digest.Canonical.Digester()
reader := io.TeeReader(uncompressed, srcHasher.Hash())
// Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID)
}
destHasher := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(layerFile)
multiWriter := io.MultiWriter(counter, destHasher.Hash())
// Figure out which media type we want to call this. Assume no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing layer %q with gzip", layerID)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return nil, errors.New("media type for bzip2-compressed layers is not defined")
default:
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
}
}
writer, err := archive.CompressStream(multiWriter, i.compression)
if err != nil {
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
}
size, err := io.Copy(writer, reader)
if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
}
writer.Close()
layerFile.Close()
if i.compression == archive.Uncompressed {
if size != counter.Count {
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count)
}
} else {
size = counter.Count
}
logrus.Debugf("layer %q size is %d bytes", layerID, size)
// Rename the layer so that we can more easily find it by digest later.
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: destHasher.Digest(),
Size: size,
}
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
dlayerDescriptor := docker.V2S2Descriptor{
MediaType: dmediaType,
Digest: destHasher.Digest(),
Size: size,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
// Add a note about the diffID, which is always an uncompressed value.
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest().String())
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
}
// Build history notes in the image configurations.
onews := v1.History{
Created: i.created,
CreatedBy: i.createdBy,
Author: oimage.Author,
EmptyLayer: false,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: i.created,
CreatedBy: i.createdBy,
Author: dimage.Author,
EmptyLayer: false,
}
dimage.History = append(dimage.History, dnews)
// Encode the image configuration blob.
oconfig, err := json.Marshal(&oimage)
if err != nil {
return nil, err
}
logrus.Debugf("OCIv1 config = %s", oconfig)
// Add the configuration blob to the manifest.
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
omanifest.Config.Size = int64(len(oconfig))
omanifest.Config.MediaType = v1.MediaTypeImageConfig
// Encode the manifest.
omanifestbytes, err := json.Marshal(&omanifest)
if err != nil {
return nil, err
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
// Encode the image configuration blob.
dconfig, err := json.Marshal(&dimage)
if err != nil {
return nil, err
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
// Add the configuration blob to the manifest.
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
dmanifest.Config.Size = int64(len(dconfig))
dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig
// Encode the manifest.
dmanifestbytes, err := json.Marshal(&dmanifest)
if err != nil {
return nil, err
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
// Decide which manifest and configuration blobs we'll actually output.
var config []byte
var manifest []byte
switch manifestType {
case v1.MediaTypeImageManifest:
manifest = omanifestbytes
config = oconfig
case docker.V2S2MediaTypeManifest:
manifest = dmanifestbytes
config = dconfig
default:
panic("unreachable code: unsupported manifest type")
}
src = &containerImageSource{
path: path,
ref: i,
store: i.store,
container: i.container,
compression: i.compression,
manifest: manifest,
manifestType: manifestType,
config: config,
configDigest: digest.Canonical.FromBytes(config),
}
return src, nil
}
func (i *containerImageRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) {
return nil, errors.Errorf("can't write to a container")
}
func (i *containerImageRef) DockerReference() reference.Named {
return i.name
}
func (i *containerImageRef) StringWithinTransport() string {
if len(i.container.Names) > 0 {
return i.container.Names[0]
}
return ""
}
func (i *containerImageRef) DeleteImage(*types.SystemContext) error {
// we were never here
return nil
}
func (i *containerImageRef) PolicyConfigurationIdentity() string {
return ""
}
func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
return nil
}
func (i *containerImageRef) Transport() types.ImageTransport {
return is.Transport
}
func (i *containerImageSource) Close() error {
err := os.RemoveAll(i.path)
if err != nil {
logrus.Errorf("error removing %q: %v", i.path, err)
}
return err
}
func (i *containerImageSource) Reference() types.ImageReference {
return i.ref
}
func (i *containerImageSource) GetSignatures() ([][]byte, error) {
return nil, nil
}
func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return []byte{}, "", errors.Errorf("TODO")
}
func (i *containerImageSource) GetManifest() ([]byte, string, error) {
return i.manifest, i.manifestType, nil
}
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
closer := func() error {
logrus.Debugf("finished reading config")
return nil
}
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
}
layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600)
if err != nil {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
return nil, -1, err
}
size = -1
st, err := layerFile.Stat()
if err != nil {
logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err)
} else {
size = st.Size()
}
logrus.Debugf("reading layer %q", blob.Digest.String())
closer := func() error {
layerFile.Close()
logrus.Debugf("finished reading layer %q", blob.Digest.String())
return nil
}
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
func (b *Builder) makeContainerImageRef(manifestType string, compress archive.Compression) (types.ImageReference, error) {
var name reference.Named
if manifestType == "" {
manifestType = OCIv1ImageManifest
}
container, err := b.store.Container(b.ContainerID)
if err != nil {
return nil, err
}
if len(container.Names) > 0 {
name, err = reference.ParseNamed(container.Names[0])
if err != nil {
name = nil
}
}
oconfig, err := json.Marshal(&b.OCIv1)
if err != nil {
return nil, err
}
dconfig, err := json.Marshal(&b.Docker)
if err != nil {
return nil, err
}
ref := &containerImageRef{
store: b.store,
container: container,
compression: compress,
name: name,
oconfig: oconfig,
dconfig: dconfig,
created: time.Now().UTC(),
createdBy: b.CreatedBy(),
annotations: b.Annotations(),
preferredManifestType: manifestType,
}
return ref, nil
}
|
package main
import (
"fmt"
"strings"
"github.com/fsouza/go-dockerclient"
"github.com/jawher/bateau/query"
)
var (
imgFields = map[string][]query.Operator{
"id": {query.EQ, query.LIKE},
"cmd": {query.EQ, query.LIKE},
"entrypoint": {query.EQ, query.LIKE},
"comment": {query.EQ, query.LIKE},
"author": {query.EQ, query.LIKE},
"arch": {query.EQ, query.LIKE},
"docker_version": {query.EQ, query.LIKE},
"label.*": {query.IS, query.EQ, query.LIKE},
"size": {query.EQ, query.GT},
"created": {query.EQ, query.GT},
}
)
type DockerImage struct {
client *docker.Client
apiImage docker.APIImages
fullImage *docker.Image
}
func wrapImage(client *docker.Client, apiImage docker.APIImages) *DockerImage {
return &DockerImage{
client: client,
apiImage: apiImage,
}
}
var _ query.Queryable = &DockerImage{}
func (c *DockerImage) Is(field string, operator query.Operator, value string) bool {
switch {
case strings.HasPrefix(field, "label."):
label := strings.TrimPrefix(field, "label.")
labelValue, found := c.full().Config.Labels[label]
if operator == query.IS {
return found
}
return strCompare(labelValue, operator, value)
case field == "id":
return strCompare(c.apiImage.ID, operator, value)
case field == "docker_version":
return strCompare(c.full().DockerVersion, operator, value)
case field == "comment":
return strCompare(c.full().Comment, operator, value)
case field == "author":
return strCompare(c.full().Author, operator, value)
case field == "arch":
return strCompare(c.full().Architecture, operator, value)
case field == "cmd":
return sliceCompare(c.full().Config.Cmd, operator, value)
case field == "entrypoint":
return sliceCompare(c.full().Config.Entrypoint, operator, value)
case field == "size":
return sizeCompare(c.apiImage.VirtualSize, operator, value)
case field == "created":
return durationCompare(c.full().Created, operator, value)
default:
panic(fmt.Sprintf("Invalid field %s", field))
}
}
func (c *DockerImage) full() *docker.Image {
if c.fullImage != nil {
return c.fullImage
}
daRealImage, err := c.client.InspectImage(c.apiImage.ID)
if err != nil {
fail("Error while retreiving image %s: %v", c.apiImage.ID, err)
}
c.fullImage = daRealImage
return c.fullImage
}
Add tag field to images
package main
import (
"fmt"
"strings"
"github.com/fsouza/go-dockerclient"
"github.com/jawher/bateau/query"
)
var (
imgFields = map[string][]query.Operator{
"id": {query.EQ, query.LIKE},
"tag": {query.EQ, query.LIKE},
"cmd": {query.EQ, query.LIKE},
"entrypoint": {query.EQ, query.LIKE},
"comment": {query.EQ, query.LIKE},
"author": {query.EQ, query.LIKE},
"arch": {query.EQ, query.LIKE},
"docker_version": {query.EQ, query.LIKE},
"label.*": {query.IS, query.EQ, query.LIKE},
"size": {query.EQ, query.GT},
"created": {query.EQ, query.GT},
}
)
type DockerImage struct {
client *docker.Client
apiImage docker.APIImages
fullImage *docker.Image
}
func wrapImage(client *docker.Client, apiImage docker.APIImages) *DockerImage {
return &DockerImage{
client: client,
apiImage: apiImage,
}
}
var _ query.Queryable = &DockerImage{}
func (c *DockerImage) Is(field string, operator query.Operator, value string) bool {
switch {
case strings.HasPrefix(field, "label."):
label := strings.TrimPrefix(field, "label.")
labelValue, found := c.full().Config.Labels[label]
if operator == query.IS {
return found
}
return strCompare(labelValue, operator, value)
case field == "id":
return strCompare(c.apiImage.ID, operator, value)
case field == "tag":
return sliceCompare(c.apiImage.RepoTags, operator, value)
case field == "docker_version":
return strCompare(c.full().DockerVersion, operator, value)
case field == "comment":
return strCompare(c.full().Comment, operator, value)
case field == "author":
return strCompare(c.full().Author, operator, value)
case field == "arch":
return strCompare(c.full().Architecture, operator, value)
case field == "cmd":
return sliceCompare(c.full().Config.Cmd, operator, value)
case field == "entrypoint":
return sliceCompare(c.full().Config.Entrypoint, operator, value)
case field == "size":
return sizeCompare(c.apiImage.VirtualSize, operator, value)
case field == "created":
return durationCompare(c.full().Created, operator, value)
default:
panic(fmt.Sprintf("Invalid field %s", field))
}
}
func (c *DockerImage) full() *docker.Image {
if c.fullImage != nil {
return c.fullImage
}
daRealImage, err := c.client.InspectImage(c.apiImage.ID)
if err != nil {
fail("Error while retreiving image %s: %v", c.apiImage.ID, err)
}
c.fullImage = daRealImage
return c.fullImage
}
|
// Copyright 2013 Francisco Souza. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"errors"
"github.com/dotcloud/docker"
"io"
"net/http"
)
// Error returned when the image does not exist.
var ErrNoSuchImage = errors.New("No such image")
// ListImages returns the list of available images in the server.
//
// See http://goo.gl/5ZfHk for more details.
func (c *Client) ListImages(all bool) ([]docker.APIImages, error) {
path := "/images/json?all="
if all {
path += "1"
} else {
path += "0"
}
body, _, err := c.do("GET", path, nil)
if err != nil {
return nil, err
}
var images []docker.APIImages
err = json.Unmarshal(body, &images)
if err != nil {
return nil, err
}
return images, nil
}
// RemoveImage removes a image by its name or ID.
//
// See http://goo.gl/J2FNF for more details.
func (c *Client) RemoveImage(name string) error {
_, status, err := c.do("DELETE", "/images/"+name, nil)
if status == http.StatusNotFound {
return ErrNoSuchImage
}
return err
}
// InspectImage returns an image by its name or ID.
//
// See http://goo.gl/dqGQO for more details.
func (c *Client) InspectImage(name string) (*docker.Image, error) {
body, status, err := c.do("GET", "/images/"+name+"/json", nil)
if status == http.StatusNotFound {
return nil, ErrNoSuchImage
}
if err != nil {
return nil, err
}
var image docker.Image
err = json.Unmarshal(body, &image)
if err != nil {
return nil, err
}
return &image, nil
}
// PushImageOptions options to use in the PushImage method.
type PushImageOptions struct {
// Name or ID of the image
Name string
// Registry server to push the image
Registry string
}
// PushImage pushes a image to a remote registry, logging progress to w.
//
// See http://goo.gl/Hx3CB for more details.
func (c *Client) PushImage(opts PushImageOptions, w io.Writer) error {
if opts.Name == "" {
return ErrNoSuchImage
}
name := opts.Name
opts.Name = ""
path := "/images/" + name + "/push?" + queryString(&opts)
return c.stream("POST", path, nil, w)
}
// PullImageOptions present the set of options available for pulling a image
// from a registry.
//
// See http://goo.gl/JSltN for more details.
type PullImageOptions struct {
Repository string `qs:"fromImage"`
Registry string
}
// PullImage pulls a image from a remote registry, logging progress to w.
//
// See http://goo.gl/JSltN for more details.
func (c *Client) PullImage(opts PullImageOptions, w io.Writer) error {
if opts.Repository == "" {
return ErrNoSuchImage
}
path := "/images/create?" + queryString(&opts)
return c.stream("POST", path, nil, w)
}
all: s/a image/an image/
// Copyright 2013 Francisco Souza. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"errors"
"github.com/dotcloud/docker"
"io"
"net/http"
)
// Error returned when the image does not exist.
var ErrNoSuchImage = errors.New("No such image")
// ListImages returns the list of available images in the server.
//
// See http://goo.gl/5ZfHk for more details.
func (c *Client) ListImages(all bool) ([]docker.APIImages, error) {
path := "/images/json?all="
if all {
path += "1"
} else {
path += "0"
}
body, _, err := c.do("GET", path, nil)
if err != nil {
return nil, err
}
var images []docker.APIImages
err = json.Unmarshal(body, &images)
if err != nil {
return nil, err
}
return images, nil
}
// RemoveImage removes an image by its name or ID.
//
// See http://goo.gl/J2FNF for more details.
func (c *Client) RemoveImage(name string) error {
_, status, err := c.do("DELETE", "/images/"+name, nil)
if status == http.StatusNotFound {
return ErrNoSuchImage
}
return err
}
// InspectImage returns an image by its name or ID.
//
// See http://goo.gl/dqGQO for more details.
func (c *Client) InspectImage(name string) (*docker.Image, error) {
body, status, err := c.do("GET", "/images/"+name+"/json", nil)
if status == http.StatusNotFound {
return nil, ErrNoSuchImage
}
if err != nil {
return nil, err
}
var image docker.Image
err = json.Unmarshal(body, &image)
if err != nil {
return nil, err
}
return &image, nil
}
// PushImageOptions options to use in the PushImage method.
type PushImageOptions struct {
// Name or ID of the image
Name string
// Registry server to push the image
Registry string
}
// PushImage pushes an image to a remote registry, logging progress to w.
//
// See http://goo.gl/Hx3CB for more details.
func (c *Client) PushImage(opts PushImageOptions, w io.Writer) error {
if opts.Name == "" {
return ErrNoSuchImage
}
name := opts.Name
opts.Name = ""
path := "/images/" + name + "/push?" + queryString(&opts)
return c.stream("POST", path, nil, w)
}
// PullImageOptions present the set of options available for pulling an image
// from a registry.
//
// See http://goo.gl/JSltN for more details.
type PullImageOptions struct {
Repository string `qs:"fromImage"`
Registry string
}
// PullImage pulls an image from a remote registry, logging progress to w.
//
// See http://goo.gl/JSltN for more details.
func (c *Client) PullImage(opts PullImageOptions, w io.Writer) error {
if opts.Repository == "" {
return ErrNoSuchImage
}
path := "/images/create?" + queryString(&opts)
return c.stream("POST", path, nil, w)
}
|
package common
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"path"
"reflect"
"strconv"
"strings"
)
// CommandPropertySet is a generic function that will set a property for a given plugin/app combination
func CommandPropertySet(pluginName, appName, property, value string, properties map[string]string) {
if err := VerifyAppName(appName); err != nil {
LogFail(err.Error())
}
if property == "" {
LogFail("No property specified")
}
if _, ok := properties[property]; !ok {
properties := reflect.ValueOf(properties).MapKeys()
validPropertyList := make([]string, len(properties))
for i := 0; i < len(properties); i++ {
validPropertyList[i] = properties[i].String()
}
LogFail(fmt.Sprintf("Invalid property specified, valid properties include: %s", strings.Join(validPropertyList, ", ")))
}
if value != "" {
LogInfo2Quiet(fmt.Sprintf("Setting %s to %s", property, value))
PropertyWrite(pluginName, appName, property, value)
} else {
LogInfo2Quiet(fmt.Sprintf("Unsetting %s", property))
err := PropertyDelete(pluginName, appName, property)
if err != nil {
LogFail(err.Error())
}
}
}
// PropertyDelete deletes a property from the plugin properties for an app
func PropertyDelete(pluginName string, appName string, property string) error {
propertyPath := getPropertyPath(pluginName, appName, property)
if err := os.Remove(propertyPath); err != nil {
return fmt.Errorf("Unable to remove %s property %s.%s", pluginName, appName, property)
}
return nil
}
// PropertyDestroy destroys the plugin properties for an app
func PropertyDestroy(pluginName string, appName string) error {
if appName == "_all_" {
pluginConfigPath := getPluginConfigPath(pluginName)
return os.RemoveAll(pluginConfigPath)
}
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
return os.RemoveAll(pluginAppConfigRoot)
}
// PropertyExists returns whether a property exists or not
func PropertyExists(pluginName string, appName string, property string) bool {
propertyPath := getPropertyPath(pluginName, appName, property)
_, err := os.Stat(propertyPath)
return !os.IsNotExist(err)
}
// PropertyGet returns the value for a given property
func PropertyGet(pluginName string, appName string, property string) string {
return PropertyGetDefault(pluginName, appName, property, "")
}
// PropertyGetDefault returns the value for a given property with a specified default value
func PropertyGetDefault(pluginName, appName, property, defaultValue string) (val string) {
if !PropertyExists(pluginName, appName, property) {
return
}
propertyPath := getPropertyPath(pluginName, appName, property)
b, err := ioutil.ReadFile(propertyPath)
if err != nil {
LogWarn(fmt.Sprintf("Unable to read %s property %s.%s", pluginName, appName, property))
return
}
val = string(b)
return
}
func PropertyListAdd(pluginName string, appName string, property string, value string, index int) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
scannedLines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
value = strings.TrimSpace(value)
var lines []string
for i, line := range scannedLines {
if index != 0 && i == (index-1) {
lines = append(lines, value)
}
lines = append(lines, line)
}
if index == 0 {
lines = append(lines, value)
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
w := bufio.NewWriter(file)
for _, line := range lines {
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
func PropertyListGet(pluginName string, appName string, property string) (lines []string, err error) {
if !PropertyExists(pluginName, appName, property) {
return lines, nil
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.Open(propertyPath)
if err != nil {
return lines, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err = scanner.Err(); err != nil {
return lines, fmt.Errorf("Unable to read %s config value for %s.%s: %s", pluginName, appName, property, err.Error())
}
return lines, nil
}
func PropertyListGetByIndex(pluginName string, appName string, property string, index int) (propertyValue string, err error) {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return
}
found := false
for i, line := range lines {
if i == index {
propertyValue = line
found = true
}
}
if !found {
err = errors.New("Index not found")
}
return
}
func PropertyListGetByValue(pluginName string, appName string, property string, value string) (propertyValue string, err error) {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return
}
found := false
for _, line := range lines {
if line == value {
propertyValue = line
found = true
}
}
if !found {
err = errors.New("Value not found")
}
return
}
func PropertyListRemove(pluginName string, appName string, property string, value string) error {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
found := false
w := bufio.NewWriter(file)
for _, line := range lines {
if line == value {
found = true
continue
}
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
if !found {
return errors.New("Property not found, nothing was removed")
}
return nil
}
func PropertyListSet(pluginName string, appName string, property string, value string, index int) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
scannedLines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
value = strings.TrimSpace(value)
var lines []string
if index >= len(scannedLines) {
for _, line := range scannedLines {
lines = append(lines, line)
}
lines = append(lines, value)
} else {
for i, line := range scannedLines {
if i == index {
lines = append(lines, value)
} else {
lines = append(lines, line)
}
}
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
w := bufio.NewWriter(file)
for _, line := range lines {
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
// PropertyTouch ensures a given application property file exists
func PropertyTouch(pluginName string, appName string, property string) error {
if err := makePluginAppPropertyPath(pluginName, appName); err != nil {
return fmt.Errorf("Unable to create %s config directory for %s: %s", pluginName, appName, err.Error())
}
propertyPath := getPropertyPath(pluginName, appName, property)
if PropertyExists(pluginName, appName, property) {
return nil
}
file, err := os.Create(propertyPath)
if err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
defer file.Close()
return nil
}
// PropertyWrite writes a value for a given application property
func PropertyWrite(pluginName string, appName string, property string, value string) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.Create(propertyPath)
if err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
defer file.Close()
fmt.Fprintf(file, value)
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
// PropertySetup creates the plugin config root
func PropertySetup(pluginName string) (err error) {
pluginConfigRoot := getPluginConfigPath(pluginName)
if err = os.MkdirAll(pluginConfigRoot, 0755); err != nil {
return
}
return setPermissions(pluginConfigRoot, 0755)
}
func getPropertyPath(pluginName string, appName string, property string) string {
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
return path.Join(pluginAppConfigRoot, property)
}
// getPluginAppPropertyPath returns the plugin property path for a given plugin/app combination
func getPluginAppPropertyPath(pluginName string, appName string) string {
return path.Join(getPluginConfigPath(pluginName), appName)
}
// getPluginConfigPath returns the plugin property path for a given plugin
func getPluginConfigPath(pluginName string) string {
return path.Join(MustGetEnv("DOKKU_LIB_ROOT"), "config", pluginName)
}
// makePluginAppPropertyPath ensures that a property path exists
func makePluginAppPropertyPath(pluginName string, appName string) (err error) {
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
if err = os.MkdirAll(pluginAppConfigRoot, 0755); err != nil {
return
}
return setPermissions(pluginAppConfigRoot, 0755)
}
// setPermissions sets the proper owner and filemode for a given file
func setPermissions(path string, fileMode os.FileMode) (err error) {
if err = os.Chmod(path, fileMode); err != nil {
return err
}
systemGroup := os.Getenv("DOKKU_SYSTEM_GROUP")
systemUser := os.Getenv("DOKKU_SYSTEM_USER")
if systemGroup == "" {
systemGroup = "dokku"
}
if systemUser == "" {
systemUser = "dokku"
}
group, err := user.LookupGroup(systemGroup)
if err != nil {
return
}
user, err := user.Lookup(systemUser)
if err != nil {
return
}
uid, err := strconv.Atoi(user.Uid)
if err != nil {
return
}
gid, err := strconv.Atoi(group.Gid)
if err != nil {
return
}
return os.Chown(path, uid, gid)
}
fix: add missing comments to exported functions
package common
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"path"
"reflect"
"strconv"
"strings"
)
// CommandPropertySet is a generic function that will set a property for a given plugin/app combination
func CommandPropertySet(pluginName, appName, property, value string, properties map[string]string) {
if err := VerifyAppName(appName); err != nil {
LogFail(err.Error())
}
if property == "" {
LogFail("No property specified")
}
if _, ok := properties[property]; !ok {
properties := reflect.ValueOf(properties).MapKeys()
validPropertyList := make([]string, len(properties))
for i := 0; i < len(properties); i++ {
validPropertyList[i] = properties[i].String()
}
LogFail(fmt.Sprintf("Invalid property specified, valid properties include: %s", strings.Join(validPropertyList, ", ")))
}
if value != "" {
LogInfo2Quiet(fmt.Sprintf("Setting %s to %s", property, value))
PropertyWrite(pluginName, appName, property, value)
} else {
LogInfo2Quiet(fmt.Sprintf("Unsetting %s", property))
err := PropertyDelete(pluginName, appName, property)
if err != nil {
LogFail(err.Error())
}
}
}
// PropertyDelete deletes a property from the plugin properties for an app
func PropertyDelete(pluginName string, appName string, property string) error {
propertyPath := getPropertyPath(pluginName, appName, property)
if err := os.Remove(propertyPath); err != nil {
return fmt.Errorf("Unable to remove %s property %s.%s", pluginName, appName, property)
}
return nil
}
// PropertyDestroy destroys the plugin properties for an app
func PropertyDestroy(pluginName string, appName string) error {
if appName == "_all_" {
pluginConfigPath := getPluginConfigPath(pluginName)
return os.RemoveAll(pluginConfigPath)
}
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
return os.RemoveAll(pluginAppConfigRoot)
}
// PropertyExists returns whether a property exists or not
func PropertyExists(pluginName string, appName string, property string) bool {
propertyPath := getPropertyPath(pluginName, appName, property)
_, err := os.Stat(propertyPath)
return !os.IsNotExist(err)
}
// PropertyGet returns the value for a given property
func PropertyGet(pluginName string, appName string, property string) string {
return PropertyGetDefault(pluginName, appName, property, "")
}
// PropertyGetDefault returns the value for a given property with a specified default value
func PropertyGetDefault(pluginName, appName, property, defaultValue string) (val string) {
if !PropertyExists(pluginName, appName, property) {
return
}
propertyPath := getPropertyPath(pluginName, appName, property)
b, err := ioutil.ReadFile(propertyPath)
if err != nil {
LogWarn(fmt.Sprintf("Unable to read %s property %s.%s", pluginName, appName, property))
return
}
val = string(b)
return
}
// PropertyListAdd adds a property to a list at an optionally specified index
func PropertyListAdd(pluginName string, appName string, property string, value string, index int) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
scannedLines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
value = strings.TrimSpace(value)
var lines []string
for i, line := range scannedLines {
if index != 0 && i == (index-1) {
lines = append(lines, value)
}
lines = append(lines, line)
}
if index == 0 {
lines = append(lines, value)
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
w := bufio.NewWriter(file)
for _, line := range lines {
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
// PropertyListGet returns a property list
func PropertyListGet(pluginName string, appName string, property string) (lines []string, err error) {
if !PropertyExists(pluginName, appName, property) {
return lines, nil
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.Open(propertyPath)
if err != nil {
return lines, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err = scanner.Err(); err != nil {
return lines, fmt.Errorf("Unable to read %s config value for %s.%s: %s", pluginName, appName, property, err.Error())
}
return lines, nil
}
// PropertyListGetByIndex returns an entry within property list by index
func PropertyListGetByIndex(pluginName string, appName string, property string, index int) (propertyValue string, err error) {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return
}
found := false
for i, line := range lines {
if i == index {
propertyValue = line
found = true
}
}
if !found {
err = errors.New("Index not found")
}
return
}
// PropertyListGetByValue returns an entry within property list by value
func PropertyListGetByValue(pluginName string, appName string, property string, value string) (propertyValue string, err error) {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return
}
found := false
for _, line := range lines {
if line == value {
propertyValue = line
found = true
}
}
if !found {
err = errors.New("Value not found")
}
return
}
// PropertyListRemove removes a value from a property list
func PropertyListRemove(pluginName string, appName string, property string, value string) error {
lines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
found := false
w := bufio.NewWriter(file)
for _, line := range lines {
if line == value {
found = true
continue
}
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
if !found {
return errors.New("Property not found, nothing was removed")
}
return nil
}
// PropertyListSet sets a value within a property list at a specified index
func PropertyListSet(pluginName string, appName string, property string, value string, index int) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
scannedLines, err := PropertyListGet(pluginName, appName, property)
if err != nil {
return err
}
value = strings.TrimSpace(value)
var lines []string
if index >= len(scannedLines) {
for _, line := range scannedLines {
lines = append(lines, line)
}
lines = append(lines, value)
} else {
for i, line := range scannedLines {
if i == index {
lines = append(lines, value)
} else {
lines = append(lines, line)
}
}
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.OpenFile(propertyPath, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return err
}
w := bufio.NewWriter(file)
for _, line := range lines {
fmt.Fprintln(w, line)
}
if err = w.Flush(); err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
// PropertyTouch ensures a given application property file exists
func PropertyTouch(pluginName string, appName string, property string) error {
if err := makePluginAppPropertyPath(pluginName, appName); err != nil {
return fmt.Errorf("Unable to create %s config directory for %s: %s", pluginName, appName, err.Error())
}
propertyPath := getPropertyPath(pluginName, appName, property)
if PropertyExists(pluginName, appName, property) {
return nil
}
file, err := os.Create(propertyPath)
if err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
defer file.Close()
return nil
}
// PropertyWrite writes a value for a given application property
func PropertyWrite(pluginName string, appName string, property string, value string) error {
if err := PropertyTouch(pluginName, appName, property); err != nil {
return err
}
propertyPath := getPropertyPath(pluginName, appName, property)
file, err := os.Create(propertyPath)
if err != nil {
return fmt.Errorf("Unable to write %s config value %s.%s: %s", pluginName, appName, property, err.Error())
}
defer file.Close()
fmt.Fprintf(file, value)
file.Chmod(0600)
setPermissions(propertyPath, 0600)
return nil
}
// PropertySetup creates the plugin config root
func PropertySetup(pluginName string) (err error) {
pluginConfigRoot := getPluginConfigPath(pluginName)
if err = os.MkdirAll(pluginConfigRoot, 0755); err != nil {
return
}
return setPermissions(pluginConfigRoot, 0755)
}
func getPropertyPath(pluginName string, appName string, property string) string {
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
return path.Join(pluginAppConfigRoot, property)
}
// getPluginAppPropertyPath returns the plugin property path for a given plugin/app combination
func getPluginAppPropertyPath(pluginName string, appName string) string {
return path.Join(getPluginConfigPath(pluginName), appName)
}
// getPluginConfigPath returns the plugin property path for a given plugin
func getPluginConfigPath(pluginName string) string {
return path.Join(MustGetEnv("DOKKU_LIB_ROOT"), "config", pluginName)
}
// makePluginAppPropertyPath ensures that a property path exists
func makePluginAppPropertyPath(pluginName string, appName string) (err error) {
pluginAppConfigRoot := getPluginAppPropertyPath(pluginName, appName)
if err = os.MkdirAll(pluginAppConfigRoot, 0755); err != nil {
return
}
return setPermissions(pluginAppConfigRoot, 0755)
}
// setPermissions sets the proper owner and filemode for a given file
func setPermissions(path string, fileMode os.FileMode) (err error) {
if err = os.Chmod(path, fileMode); err != nil {
return err
}
systemGroup := os.Getenv("DOKKU_SYSTEM_GROUP")
systemUser := os.Getenv("DOKKU_SYSTEM_USER")
if systemGroup == "" {
systemGroup = "dokku"
}
if systemUser == "" {
systemUser = "dokku"
}
group, err := user.LookupGroup(systemGroup)
if err != nil {
return
}
user, err := user.Lookup(systemUser)
if err != nil {
return
}
uid, err := strconv.Atoi(user.Uid)
if err != nil {
return
}
gid, err := strconv.Atoi(group.Gid)
if err != nil {
return
}
return os.Chown(path, uid, gid)
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
)
type indexHandler struct {
dir http.Dir
}
// yanked out of golang's library
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
const indexPage = "/index.html"
func (fs *indexHandler) Open(name string) (http.File, error) {
log.Printf("dir: %s", args.directory)
log.Printf("name: %s", name)
if args.directory != "" || name != indexPage {
return fs.dir.Open(name)
}
var b bytes.Buffer
// build the index page from the standard input instead
fmt.Fprintf(&b, "<pre>\n")
// loop through the standard input line by line and add it as
// an entry
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
name = s.Text()
log.Println(name)
// name may contain '?' or '#', which must be escaped to remain
// part of the URL path, and not indicate the start of a query
// string or fragment.
url := url.URL{Path: name}
fmt.Fprintf(&b, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
}
fmt.Fprintf(&b, "</pre>\n")
return &memFile{bytes.NewReader(b.Bytes()), time.Now()}, nil
}
// implements http.File
type memFile struct {
*bytes.Reader
modTime time.Time
}
// only need for index.html
func (f *memFile) Stat() (os.FileInfo, error) {
return &memOSFile{indexPage,
f.Size(),
0,
f.modTime,
false}, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
// will not be called, here to complete the http.FileInterace
return nil, nil
}
func (f *memFile) Close() error {
return nil
}
// implements os.FileInfo
type memOSFile struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (f *memOSFile) Name() string {
return f.name
}
func (f *memOSFile) Size() int64 {
return f.size
}
func (f *memOSFile) Mode() os.FileMode {
return f.mode
}
func (f *memOSFile) ModTime() time.Time {
return f.modTime
}
func (f *memOSFile) IsDir() bool {
return f.isDir
}
func (f *memOSFile) Sys() interface{} {
return nil
}
minor cleanup
package main
import (
"bufio"
"bytes"
"fmt"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
)
type indexHandler struct {
dir http.Dir
}
// yanked out of golang's library
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
const indexPage = "/index.html"
func (fs *indexHandler) Open(name string) (http.File, error) {
if args.directory != "" || name != indexPage {
return fs.dir.Open(name)
}
var b bytes.Buffer
// build the index page from the standard input instead
fmt.Fprintf(&b, "<pre>\n")
// loop through the standard input line by line and add it as
// an entry
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
name = s.Text()
log.Println(name)
// name may contain '?' or '#', which must be escaped to remain
// part of the URL path, and not indicate the start of a query
// string or fragment.
url := url.URL{Path: name}
fmt.Fprintf(&b, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
}
fmt.Fprintf(&b, "</pre>\n")
return &memFile{bytes.NewReader(b.Bytes()), time.Now()}, nil
}
// implements http.File
type memFile struct {
*bytes.Reader
modTime time.Time
}
// only need for index.html
func (f *memFile) Stat() (os.FileInfo, error) {
return &memOSFile{indexPage,
f.Size(),
0,
f.modTime,
false}, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
// will not be called, here to complete the http.FileInterace
return nil, nil
}
func (f *memFile) Close() error {
return nil
}
// implements os.FileInfo
type memOSFile struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (f *memOSFile) Name() string {
return f.name
}
func (f *memOSFile) Size() int64 {
return f.size
}
func (f *memOSFile) Mode() os.FileMode {
return f.mode
}
func (f *memOSFile) ModTime() time.Time {
return f.modTime
}
func (f *memOSFile) IsDir() bool {
return f.isDir
}
func (f *memOSFile) Sys() interface{} {
return nil
}
|
// Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED "AS IS"
// WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.
package efhsim
import (
"fmt"
"io"
"log"
"my/itto/verify/packet/itto"
"my/itto/verify/sim"
)
const (
EFHM_DEFINITION = 0
EFHM_TRADE = 1
EFHM_QUOTE = 2
EFHM_ORDER = 3
EFHM_REFRESHED = 100
EFHM_STOPPED = 101
)
const (
EFH_ORDER_BID = 1
EFH_ORDER_ASK = -1
)
type efhm_header struct {
Type uint8
TickCondition uint8
QueuePosition uint16
UnderlyingId uint32
SecurityId uint32
SequenceNumber uint32
TimeStamp uint64
}
type efhm_order struct {
efhm_header
TradeStatus uint8
OrderType uint8
OrderSide int8
_pad byte
Price uint32
Size uint32
AoNSize uint32
CustomerSize uint32
CustomerAoNSize uint32
BDSize uint32
BDAoNSize uint32
}
type efhm_quote struct {
efhm_header
TradeStatus uint8
_pad [3]byte
BidPrice uint32
BidSize uint32
BidOrderSize uint32
BidAoNSize uint32
BidCustomerSize uint32
BidCustomerAoNSize uint32
BidBDSize uint32
BidBDAoNSize uint32
AskPrice uint32
AskSize uint32
AskOrderSize uint32
AskAoNSize uint32
AskCustomerSize uint32
AskCustomerAoNSize uint32
AskBDSize uint32
AskBDAoNSize uint32
}
func (m efhm_header) String() string {
switch m.Type {
case EFHM_QUOTE, EFHM_ORDER:
return fmt.Sprintf("HDR{T:%d, TC:%d, QP:%d, UId:%08x, SId:%08x, SN:%d, TS:%08x}",
m.Type,
m.TickCondition,
m.QueuePosition,
m.UnderlyingId,
m.SecurityId,
m.SequenceNumber,
m.TimeStamp,
)
default:
return fmt.Sprintf("HDR{T:%d}", m.Type)
}
}
func (m efhm_order) String() string {
return fmt.Sprintf("%s ORD{TS:%d, OT:%d, OS:%+d, P:%10d, S:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}",
m.efhm_header,
m.TradeStatus,
m.OrderType,
m.OrderSide,
m.Price,
m.Size,
m.AoNSize,
m.CustomerSize,
m.CustomerAoNSize,
m.BDSize,
m.BDAoNSize,
)
}
func (m efhm_quote) String() string {
return fmt.Sprintf("%s QUO{TS:%d, "+
"Bid{P:%10d, S:%d, OS:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}, "+
"Ask{P:%10d, S:%d, OS:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}"+
"}",
m.efhm_header,
m.TradeStatus,
m.BidPrice,
m.BidSize,
m.BidOrderSize,
m.BidAoNSize,
m.BidCustomerSize,
m.BidCustomerAoNSize,
m.BidBDSize,
m.BidBDAoNSize,
m.AskPrice,
m.AskSize,
m.AskOrderSize,
m.AskAoNSize,
m.AskCustomerSize,
m.AskCustomerAoNSize,
m.AskBDSize,
m.AskBDAoNSize,
)
}
var _ sim.Observer = &EfhLogger{}
type EfhLogger struct {
w io.Writer
lastMessage *sim.IttoDbMessage
lastOptionId itto.OptionId
consumeOps int
curOps int
ittoSeconds uint32
mode EfhLoggerOutputMode
bid tob
ask tob
}
type tob struct {
Check bool
Side itto.MarketSide
Old sim.PriceLevel
New sim.PriceLevel
}
func NewEfhLogger(w io.Writer) *EfhLogger {
l := &EfhLogger{w: w,
bid: tob{Side: itto.MarketSideBid},
ask: tob{Side: itto.MarketSideAsk},
}
return l
}
type EfhLoggerOutputMode byte
const (
EfhLoggerOutputOrders EfhLoggerOutputMode = iota
EfhLoggerOutputQuotes
)
func (l *EfhLogger) SetOutputMode(mode EfhLoggerOutputMode) {
l.mode = mode
}
func (l *EfhLogger) MessageArrived(idm *sim.IttoDbMessage) {
l.lastMessage = idm
l.bid.Check, l.ask.Check = false, false
switch m := l.lastMessage.Pam.Layer().(type) {
case
*itto.IttoMessageAddOrder,
*itto.IttoMessageSingleSideExecuted,
*itto.IttoMessageSingleSideExecutedWithPrice,
*itto.IttoMessageOrderCancel,
*itto.IttoMessageSingleSideDelete,
*itto.IttoMessageBlockSingleSideDelete:
l.consumeOps = 1
case
*itto.IttoMessageSingleSideReplace,
*itto.IttoMessageSingleSideUpdate:
l.consumeOps = 2
case
*itto.IttoMessageAddQuote,
*itto.IttoMessageQuoteDelete:
l.consumeOps = 2
l.bid.Check, l.ask.Check = true, true
case
*itto.IttoMessageQuoteReplace:
l.consumeOps = 4
l.bid.Check, l.ask.Check = true, true
case *itto.IttoMessageSeconds:
l.ittoSeconds = m.Second
default:
log.Println("wrong message type ", l.lastMessage.Pam.Layer())
return
}
l.curOps = 0
}
func (*EfhLogger) OperationAppliedToOrders(sim.IttoOperation) {}
func (l *EfhLogger) BeforeBookUpdate(book sim.Book, operation sim.IttoOperation) {
if l.curOps > 0 {
return
}
l.lastOptionId = operation.GetOptionId()
if l.lastOptionId.Invalid() {
return
}
switch operation.GetSide() {
case itto.MarketSideBid:
l.bid.Check = true
case itto.MarketSideAsk:
l.ask.Check = true
default:
log.Fatalln("wrong operation side")
}
l.bid.update(book, l.lastOptionId, TobUpdateOld)
l.ask.update(book, l.lastOptionId, TobUpdateOld)
}
func (l *EfhLogger) AfterBookUpdate(book sim.Book, operation sim.IttoOperation) {
l.curOps++
if l.curOps < l.consumeOps {
return
}
l.curOps = 0
if l.lastOptionId.Invalid() {
return
}
l.bid.update(book, l.lastOptionId, TobUpdateNew)
l.ask.update(book, l.lastOptionId, TobUpdateNew)
if l.mode == EfhLoggerOutputOrders {
l.genUpdateOrders(l.bid)
l.genUpdateOrders(l.ask)
} else {
l.genUpdateQuotes()
}
}
func (l *EfhLogger) genUpdateOrders(tob tob) {
if !tob.updated() {
return
}
eo := efhm_order{
efhm_header: l.genUpdateHeader(EFHM_ORDER),
Price: uint32(tob.New.Price),
Size: uint32(tob.New.Size),
OrderType: 1,
}
switch tob.Side {
case itto.MarketSideBid:
eo.OrderSide = EFH_ORDER_BID
case itto.MarketSideAsk:
eo.OrderSide = EFH_ORDER_ASK
}
fmt.Fprintln(l.w, eo)
}
func (l *EfhLogger) genUpdateQuotes() {
if !l.bid.updated() && !l.ask.updated() {
return
}
eq := efhm_quote{
efhm_header: l.genUpdateHeader(EFHM_QUOTE),
BidPrice: uint32(l.bid.New.Price),
BidSize: uint32(l.bid.New.Size),
AskPrice: uint32(l.ask.New.Price),
AskSize: uint32(l.ask.New.Size),
}
fmt.Fprintln(l.w, eq)
}
func (l *EfhLogger) genUpdateHeader(messageType uint8) efhm_header {
return efhm_header{
Type: messageType,
SecurityId: uint32(l.lastOptionId),
SequenceNumber: uint32(l.lastMessage.Pam.SequenceNumber()), // FIXME MoldUDP64 seqNum is 64 bit
TimeStamp: uint64(l.ittoSeconds)*1e9 + uint64(l.lastMessage.Pam.Layer().(itto.IttoMessageCommon).Base().Timestamp),
}
}
type TobUpdate byte
const (
TobUpdateOld TobUpdate = iota
TobUpdateNew
)
func (tob *tob) update(book sim.Book, oid itto.OptionId, u TobUpdate) {
pl := &tob.New
if u == TobUpdateOld {
pl = &tob.Old
}
*pl = sim.PriceLevel{}
if tob.Check {
if pls := book.GetTop(oid, tob.Side, 1); len(pls) > 0 {
*pl = pls[0]
}
}
}
func (tob *tob) updated() bool {
return tob.Check && tob.Old != tob.New
}
EfhLogger: fix quotes output
// Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED "AS IS"
// WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.
package efhsim
import (
"fmt"
"io"
"log"
"my/itto/verify/packet/itto"
"my/itto/verify/sim"
)
const (
EFHM_DEFINITION = 0
EFHM_TRADE = 1
EFHM_QUOTE = 2
EFHM_ORDER = 3
EFHM_REFRESHED = 100
EFHM_STOPPED = 101
)
const (
EFH_ORDER_BID = 1
EFH_ORDER_ASK = -1
)
type efhm_header struct {
Type uint8
TickCondition uint8
QueuePosition uint16
UnderlyingId uint32
SecurityId uint32
SequenceNumber uint32
TimeStamp uint64
}
type efhm_order struct {
efhm_header
TradeStatus uint8
OrderType uint8
OrderSide int8
_pad byte
Price uint32
Size uint32
AoNSize uint32
CustomerSize uint32
CustomerAoNSize uint32
BDSize uint32
BDAoNSize uint32
}
type efhm_quote struct {
efhm_header
TradeStatus uint8
_pad [3]byte
BidPrice uint32
BidSize uint32
BidOrderSize uint32
BidAoNSize uint32
BidCustomerSize uint32
BidCustomerAoNSize uint32
BidBDSize uint32
BidBDAoNSize uint32
AskPrice uint32
AskSize uint32
AskOrderSize uint32
AskAoNSize uint32
AskCustomerSize uint32
AskCustomerAoNSize uint32
AskBDSize uint32
AskBDAoNSize uint32
}
func (m efhm_header) String() string {
switch m.Type {
case EFHM_QUOTE, EFHM_ORDER:
return fmt.Sprintf("HDR{T:%d, TC:%d, QP:%d, UId:%08x, SId:%08x, SN:%d, TS:%08x}",
m.Type,
m.TickCondition,
m.QueuePosition,
m.UnderlyingId,
m.SecurityId,
m.SequenceNumber,
m.TimeStamp,
)
default:
return fmt.Sprintf("HDR{T:%d}", m.Type)
}
}
func (m efhm_order) String() string {
return fmt.Sprintf("%s ORD{TS:%d, OT:%d, OS:%+d, P:%10d, S:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}",
m.efhm_header,
m.TradeStatus,
m.OrderType,
m.OrderSide,
m.Price,
m.Size,
m.AoNSize,
m.CustomerSize,
m.CustomerAoNSize,
m.BDSize,
m.BDAoNSize,
)
}
func (m efhm_quote) String() string {
return fmt.Sprintf("%s QUO{TS:%d, "+
"Bid{P:%10d, S:%d, OS:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}, "+
"Ask{P:%10d, S:%d, OS:%d, AS:%d, CS:%d, CAS:%d, BS:%d, BAS:%d}"+
"}",
m.efhm_header,
m.TradeStatus,
m.BidPrice,
m.BidSize,
m.BidOrderSize,
m.BidAoNSize,
m.BidCustomerSize,
m.BidCustomerAoNSize,
m.BidBDSize,
m.BidBDAoNSize,
m.AskPrice,
m.AskSize,
m.AskOrderSize,
m.AskAoNSize,
m.AskCustomerSize,
m.AskCustomerAoNSize,
m.AskBDSize,
m.AskBDAoNSize,
)
}
var _ sim.Observer = &EfhLogger{}
type EfhLogger struct {
w io.Writer
lastMessage *sim.IttoDbMessage
lastOptionId itto.OptionId
consumeOps int
curOps int
ittoSeconds uint32
mode EfhLoggerOutputMode
bid tob
ask tob
}
type tob struct {
Check bool
Side itto.MarketSide
Old sim.PriceLevel
New sim.PriceLevel
}
func NewEfhLogger(w io.Writer) *EfhLogger {
l := &EfhLogger{w: w,
bid: tob{Side: itto.MarketSideBid},
ask: tob{Side: itto.MarketSideAsk},
}
return l
}
type EfhLoggerOutputMode byte
const (
EfhLoggerOutputOrders EfhLoggerOutputMode = iota
EfhLoggerOutputQuotes
)
func (l *EfhLogger) SetOutputMode(mode EfhLoggerOutputMode) {
l.mode = mode
}
func (l *EfhLogger) MessageArrived(idm *sim.IttoDbMessage) {
l.lastMessage = idm
l.bid.Check, l.ask.Check = false, false
switch m := l.lastMessage.Pam.Layer().(type) {
case
*itto.IttoMessageAddOrder,
*itto.IttoMessageSingleSideExecuted,
*itto.IttoMessageSingleSideExecutedWithPrice,
*itto.IttoMessageOrderCancel,
*itto.IttoMessageSingleSideDelete,
*itto.IttoMessageBlockSingleSideDelete:
l.consumeOps = 1
case
*itto.IttoMessageSingleSideReplace,
*itto.IttoMessageSingleSideUpdate:
l.consumeOps = 2
case
*itto.IttoMessageAddQuote,
*itto.IttoMessageQuoteDelete:
l.consumeOps = 2
l.bid.Check, l.ask.Check = true, true
case
*itto.IttoMessageQuoteReplace:
l.consumeOps = 4
l.bid.Check, l.ask.Check = true, true
case *itto.IttoMessageSeconds:
l.ittoSeconds = m.Second
default:
log.Println("wrong message type ", l.lastMessage.Pam.Layer())
return
}
l.curOps = 0
}
func (*EfhLogger) OperationAppliedToOrders(sim.IttoOperation) {}
func (l *EfhLogger) BeforeBookUpdate(book sim.Book, operation sim.IttoOperation) {
if l.curOps > 0 {
return
}
l.lastOptionId = operation.GetOptionId()
if l.lastOptionId.Invalid() {
return
}
switch operation.GetSide() {
case itto.MarketSideBid:
l.bid.Check = true
case itto.MarketSideAsk:
l.ask.Check = true
default:
log.Fatalln("wrong operation side")
}
l.bid.update(book, l.lastOptionId, TobUpdateOld)
l.ask.update(book, l.lastOptionId, TobUpdateOld)
}
func (l *EfhLogger) AfterBookUpdate(book sim.Book, operation sim.IttoOperation) {
l.curOps++
if l.curOps < l.consumeOps {
return
}
l.curOps = 0
if l.lastOptionId.Invalid() {
return
}
u := TobUpdateNew
if l.mode == EfhLoggerOutputQuotes {
u = TobUpdateNewForce
}
l.bid.update(book, l.lastOptionId, u)
l.ask.update(book, l.lastOptionId, u)
if l.mode == EfhLoggerOutputOrders {
l.genUpdateOrders(l.bid)
l.genUpdateOrders(l.ask)
} else {
l.genUpdateQuotes()
}
}
func (l *EfhLogger) genUpdateOrders(tob tob) {
if !tob.updated() {
return
}
eo := efhm_order{
efhm_header: l.genUpdateHeader(EFHM_ORDER),
Price: uint32(tob.New.Price),
Size: uint32(tob.New.Size),
OrderType: 1,
}
switch tob.Side {
case itto.MarketSideBid:
eo.OrderSide = EFH_ORDER_BID
case itto.MarketSideAsk:
eo.OrderSide = EFH_ORDER_ASK
}
fmt.Fprintln(l.w, eo)
}
func (l *EfhLogger) genUpdateQuotes() {
if !l.bid.updated() && !l.ask.updated() {
return
}
eq := efhm_quote{
efhm_header: l.genUpdateHeader(EFHM_QUOTE),
BidPrice: uint32(l.bid.New.Price),
BidSize: uint32(l.bid.New.Size),
AskPrice: uint32(l.ask.New.Price),
AskSize: uint32(l.ask.New.Size),
}
fmt.Fprintln(l.w, eq)
}
func (l *EfhLogger) genUpdateHeader(messageType uint8) efhm_header {
return efhm_header{
Type: messageType,
SecurityId: uint32(l.lastOptionId),
SequenceNumber: uint32(l.lastMessage.Pam.SequenceNumber()), // FIXME MoldUDP64 seqNum is 64 bit
TimeStamp: uint64(l.ittoSeconds)*1e9 + uint64(l.lastMessage.Pam.Layer().(itto.IttoMessageCommon).Base().Timestamp),
}
}
type TobUpdate byte
const (
TobUpdateOld TobUpdate = iota
TobUpdateNew
TobUpdateNewForce
)
func (tob *tob) update(book sim.Book, oid itto.OptionId, u TobUpdate) {
pl := &tob.New
if u == TobUpdateOld {
pl = &tob.Old
}
*pl = sim.PriceLevel{}
if tob.Check || u == TobUpdateNewForce {
if pls := book.GetTop(oid, tob.Side, 1); len(pls) > 0 {
*pl = pls[0]
}
}
}
func (tob *tob) updated() bool {
return tob.Check && tob.Old != tob.New
}
|
package bot
import (
"fmt"
"log"
"sync"
)
// Cmd holds the parsed user's input for easier handling of commands
type Cmd struct {
Raw string // Raw is full string passed to the command
Channel string // Channel where the command was called
User *User // User who sent the message
Message string // Full string without the prefix
Command string // Command is the first argument passed to the bot
RawArgs string // Raw arguments after the command
Args []string // Arguments as array
}
// PassiveCmd holds the information which will be passed to passive commands when receiving a message
type PassiveCmd struct {
Raw string // Raw message sent to the channel
Channel string // Channel which the message was sent to
User *User // User who sent this message
}
// PeriodicConfig holds a cron specification for periodically notifying the configured channels
type PeriodicConfig struct {
CronSpec string // CronSpec that schedules some function
Channels []string // A list of channels to notify
CmdFunc func(channel string) (string, error) // func to be executed at the period specified on CronSpec
}
// User holds user id (nick) and real name
type User struct {
Nick string
RealName string
}
type customCommand struct {
Version int
Cmd string
CmdFuncV1 activeCmdFuncV1
CmdFuncV2 activeCmdFuncV2
Description string
ExampleArgs string
}
type incomingMessage struct {
Channel string
Text string
User *User
BotCurrentNick string
}
// CmdResult is the result message of V2 commands
type CmdResult struct {
Channel string // The channel where the bot should send the message
Message string // The message to be sent
}
const (
v1 = iota
v2
)
const (
commandNotAvailable = "Command %v not available."
noCommandsAvailable = "No commands available."
errorExecutingCommand = "Error executing %s: %s"
)
type passiveCmdFunc func(cmd *PassiveCmd) (string, error)
type activeCmdFuncV1 func(cmd *Cmd) (string, error)
type activeCmdFuncV2 func(cmd *Cmd) (CmdResult, error)
var (
commands = make(map[string]*customCommand)
passiveCommands = make(map[string]passiveCmdFunc)
periodicCommands = make(map[string]PeriodicConfig)
)
// RegisterCommand adds a new command to the bot.
// The command(s) should be registered in the Init() func of your package
// command: String which the user will use to execute the command, example: reverse
// decription: Description of the command to use in !help, example: Reverses a string
// exampleArgs: Example args to be displayed in !help <command>, example: string to be reversed
// cmdFunc: Function which will be executed. It will received a parsed command as a Cmd value
func RegisterCommand(command, description, exampleArgs string, cmdFunc activeCmdFuncV1) {
commands[command] = &customCommand{
Version: v1,
Cmd: command,
CmdFuncV1: cmdFunc,
Description: description,
ExampleArgs: exampleArgs,
}
}
// RegisterCommandV2 adds a new command to the bot.
// It is the same as RegisterCommand but the command can specify the channel to reply to
func RegisterCommandV2(command, description, exampleArgs string, cmdFunc activeCmdFuncV2) {
commands[command] = &customCommand{
Version: v2,
Cmd: command,
CmdFuncV2: cmdFunc,
Description: description,
ExampleArgs: exampleArgs,
}
}
// RegisterPassiveCommand adds a new passive command to the bot.
// The command should be registered in the Init() func of your package
// Passive commands receives all the text posted to a channel without any parsing
// command: String used to identify the command, for internal use only (ex: logs)
// cmdFunc: Function which will be executed. It will received the raw message, channel and nick
func RegisterPassiveCommand(command string, cmdFunc func(cmd *PassiveCmd) (string, error)) {
passiveCommands[command] = cmdFunc
}
// RegisterPeriodicCommand adds a command that is run periodically.
// The command should be registered in the Init() func of your package
// config: PeriodicConfig which specify CronSpec and a channel list
// cmdFunc: A no-arg function which gets triggered periodically
func RegisterPeriodicCommand(command string, config PeriodicConfig) {
periodicCommands[command] = config
}
// Disable allows disabling commands that were registered.
// It is usefull when running multiple bot instances to disabled some plugins like url which
// is already present on some protocols.
func (b *Bot) Disable(cmds []string) {
b.disabledCmds = append(b.disabledCmds, cmds...)
}
func (b *Bot) executePassiveCommands(cmd *PassiveCmd) {
var wg sync.WaitGroup
mutex := &sync.Mutex{}
for k, v := range passiveCommands {
if b.isDisabled(k) {
continue
}
cmdFunc := v
wg.Add(1)
go func() {
defer wg.Done()
result, err := cmdFunc(cmd)
if err != nil {
log.Println(err)
} else {
mutex.Lock()
b.handlers.Response(cmd.Channel, result, cmd.User)
mutex.Unlock()
}
}()
}
wg.Wait()
}
func (b *Bot) isDisabled(cmd string) bool {
for _, c := range b.disabledCmds {
if c == cmd {
return true
}
}
return false
}
func (b *Bot) handleCmd(c *Cmd) {
cmd := commands[c.Command]
if cmd == nil {
log.Printf("Command not found %v", c.Command)
return
}
if b.isDisabled(c.Command) {
return
}
switch cmd.Version {
case v1:
message, err := cmd.CmdFuncV1(c)
b.checkCmdError(err, c)
if message != "" {
b.handlers.Response(c.Channel, message, c.User)
}
case v2:
result, err := cmd.CmdFuncV2(c)
b.checkCmdError(err, c)
if result.Channel == "" {
result.Channel = c.Channel
}
if result.Message != "" {
b.handlers.Response(result.Channel, result.Message, c.User)
}
}
}
func (b *Bot) checkCmdError(err error, c *Cmd) {
if err != nil {
errorMsg := fmt.Sprintf(errorExecutingCommand, c.Command, err.Error())
log.Printf(errorMsg)
b.handlers.Response(c.Channel, errorMsg, c.User)
}
}
Tabs instead of spaces
package bot
import (
"fmt"
"log"
"sync"
)
// Cmd holds the parsed user's input for easier handling of commands
type Cmd struct {
Raw string // Raw is full string passed to the command
Channel string // Channel where the command was called
User *User // User who sent the message
Message string // Full string without the prefix
Command string // Command is the first argument passed to the bot
RawArgs string // Raw arguments after the command
Args []string // Arguments as array
}
// PassiveCmd holds the information which will be passed to passive commands when receiving a message
type PassiveCmd struct {
Raw string // Raw message sent to the channel
Channel string // Channel which the message was sent to
User *User // User who sent this message
}
// PeriodicConfig holds a cron specification for periodically notifying the configured channels
type PeriodicConfig struct {
CronSpec string // CronSpec that schedules some function
Channels []string // A list of channels to notify
CmdFunc func(channel string) (string, error) // func to be executed at the period specified on CronSpec
}
// User holds user id (nick) and real name
type User struct {
Nick string
RealName string
}
type customCommand struct {
Version int
Cmd string
CmdFuncV1 activeCmdFuncV1
CmdFuncV2 activeCmdFuncV2
Description string
ExampleArgs string
}
type incomingMessage struct {
Channel string
Text string
User *User
BotCurrentNick string
}
// CmdResult is the result message of V2 commands
type CmdResult struct {
Channel string // The channel where the bot should send the message
Message string // The message to be sent
}
const (
v1 = iota
v2
)
const (
commandNotAvailable = "Command %v not available."
noCommandsAvailable = "No commands available."
errorExecutingCommand = "Error executing %s: %s"
)
type passiveCmdFunc func(cmd *PassiveCmd) (string, error)
type activeCmdFuncV1 func(cmd *Cmd) (string, error)
type activeCmdFuncV2 func(cmd *Cmd) (CmdResult, error)
var (
commands = make(map[string]*customCommand)
passiveCommands = make(map[string]passiveCmdFunc)
periodicCommands = make(map[string]PeriodicConfig)
)
// RegisterCommand adds a new command to the bot.
// The command(s) should be registered in the Init() func of your package
// command: String which the user will use to execute the command, example: reverse
// decription: Description of the command to use in !help, example: Reverses a string
// exampleArgs: Example args to be displayed in !help <command>, example: string to be reversed
// cmdFunc: Function which will be executed. It will received a parsed command as a Cmd value
func RegisterCommand(command, description, exampleArgs string, cmdFunc activeCmdFuncV1) {
commands[command] = &customCommand{
Version: v1,
Cmd: command,
CmdFuncV1: cmdFunc,
Description: description,
ExampleArgs: exampleArgs,
}
}
// RegisterCommandV2 adds a new command to the bot.
// It is the same as RegisterCommand but the command can specify the channel to reply to
func RegisterCommandV2(command, description, exampleArgs string, cmdFunc activeCmdFuncV2) {
commands[command] = &customCommand{
Version: v2,
Cmd: command,
CmdFuncV2: cmdFunc,
Description: description,
ExampleArgs: exampleArgs,
}
}
// RegisterPassiveCommand adds a new passive command to the bot.
// The command should be registered in the Init() func of your package
// Passive commands receives all the text posted to a channel without any parsing
// command: String used to identify the command, for internal use only (ex: logs)
// cmdFunc: Function which will be executed. It will received the raw message, channel and nick
func RegisterPassiveCommand(command string, cmdFunc func(cmd *PassiveCmd) (string, error)) {
passiveCommands[command] = cmdFunc
}
// RegisterPeriodicCommand adds a command that is run periodically.
// The command should be registered in the Init() func of your package
// config: PeriodicConfig which specify CronSpec and a channel list
// cmdFunc: A no-arg function which gets triggered periodically
func RegisterPeriodicCommand(command string, config PeriodicConfig) {
periodicCommands[command] = config
}
// Disable allows disabling commands that were registered.
// It is usefull when running multiple bot instances to disabled some plugins like url which
// is already present on some protocols.
func (b *Bot) Disable(cmds []string) {
b.disabledCmds = append(b.disabledCmds, cmds...)
}
func (b *Bot) executePassiveCommands(cmd *PassiveCmd) {
var wg sync.WaitGroup
mutex := &sync.Mutex{}
for k, v := range passiveCommands {
if b.isDisabled(k) {
continue
}
cmdFunc := v
wg.Add(1)
go func() {
defer wg.Done()
result, err := cmdFunc(cmd)
if err != nil {
log.Println(err)
} else {
mutex.Lock()
b.handlers.Response(cmd.Channel, result, cmd.User)
mutex.Unlock()
}
}()
}
wg.Wait()
}
func (b *Bot) isDisabled(cmd string) bool {
for _, c := range b.disabledCmds {
if c == cmd {
return true
}
}
return false
}
func (b *Bot) handleCmd(c *Cmd) {
cmd := commands[c.Command]
if cmd == nil {
log.Printf("Command not found %v", c.Command)
return
}
if b.isDisabled(c.Command) {
return
}
switch cmd.Version {
case v1:
message, err := cmd.CmdFuncV1(c)
b.checkCmdError(err, c)
if message != "" {
b.handlers.Response(c.Channel, message, c.User)
}
case v2:
result, err := cmd.CmdFuncV2(c)
b.checkCmdError(err, c)
if result.Channel == "" {
result.Channel = c.Channel
}
if result.Message != "" {
b.handlers.Response(result.Channel, result.Message, c.User)
}
}
}
func (b *Bot) checkCmdError(err error, c *Cmd) {
if err != nil {
errorMsg := fmt.Sprintf(errorExecutingCommand, c.Command, err.Error())
log.Printf(errorMsg)
b.handlers.Response(c.Channel, errorMsg, c.User)
}
}
|
package ole
import (
"syscall"
"unicode/utf16"
"unsafe"
)
var (
procCoInitialize, _ = modole32.FindProc("CoInitialize")
procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx")
procCoUninitialize, _ = modole32.FindProc("CoUninitialize")
procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance")
procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID")
procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString")
procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID")
procStringFromIID, _ = modole32.FindProc("StringFromIID")
procIIDFromString, _ = modole32.FindProc("IIDFromString")
procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID")
procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory")
procVariantInit, _ = modoleaut32.FindProc("VariantInit")
procVariantClear, _ = modoleaut32.FindProc("VariantClear")
procSysAllocString, _ = modoleaut32.FindProc("SysAllocString")
procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen")
procSysFreeString, _ = modoleaut32.FindProc("SysFreeString")
procSysStringLen, _ = modoleaut32.FindProc("SysStringLen")
procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo")
procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch")
procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject")
procGetMessageW, _ = moduser32.FindProc("GetMessageW")
procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW")
)
func coInitialize() (err error) {
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx
// Suggests that no value should be passed to CoInitialized.
// Could just be Call() since the parameter is optional. <-- Needs testing to be sure.
hr, _, _ := procCoInitialize.Call(uintptr(0))
if hr != 0 {
err = NewError(hr)
}
return
}
func coInitializeEx(coinit uint32) (err error) {
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx
// Suggests that the first parameter is not only optional but should always be NULL.
hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit))
if hr != 0 {
err = NewError(hr)
}
return
}
func CoInitialize(p uintptr) (err error) {
// p is ignored and won't be used.
// Avoid any variable not used errors.
p = uintptr(0)
return coInitialize()
}
func CoInitializeEx(p uintptr, coinit uint32) (err error) {
// Avoid any variable not used errors.
p = uintptr(0)
return coInitializeEx(coinit)
}
func CoUninitialize() {
procCoUninitialize.Call()
}
func CLSIDFromProgID(progId string) (clsid *GUID, err error) {
var guid GUID
lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func CLSIDFromString(str string) (clsid *GUID, err error) {
var guid GUID
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str)))
hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func StringFromCLSID(clsid *GUID) (str string, err error) {
var p *uint16
hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p)))
if hr != 0 {
err = NewError(hr)
}
str = LpOleStrToString(p)
return
}
func IIDFromString(progId string) (clsid *GUID, err error) {
var guid GUID
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func StringFromIID(iid *GUID) (str string, err error) {
var p *uint16
hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p)))
if hr != 0 {
err = NewError(hr)
}
str = LpOleStrToString(p)
return
}
func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
if iid == nil {
iid = IID_IUnknown
}
hr, _, _ := procCoCreateInstance.Call(
uintptr(unsafe.Pointer(clsid)),
0,
CLSCTX_SERVER,
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&unk)))
if hr != 0 {
err = NewError(hr)
}
return
}
func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
if iid == nil {
iid = IID_IUnknown
}
hr, _, _ := procGetActiveObject.Call(
uintptr(unsafe.Pointer(clsid)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&unk)))
if hr != 0 {
err = NewError(hr)
}
return
}
func VariantInit(v *VARIANT) (err error) {
hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func VariantClear(v *VARIANT) (err error) {
hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func SysAllocString(v string) (ss *int16) {
pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v))))
ss = (*int16)(unsafe.Pointer(pss))
return
}
func SysAllocStringLen(v string) (ss *int16) {
utf16 := utf16.Encode([]rune(v))
ptr := &utf16[0]
pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)))
ss = (*int16)(unsafe.Pointer(pss))
return
}
func SysFreeString(v *int16) (err error) {
hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func SysStringLen(v *int16) uint32 {
l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v)))
return uint32(l)
}
func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) {
hr, _, _ := procCreateStdDispatch.Call(
uintptr(unsafe.Pointer(unk)),
v,
uintptr(unsafe.Pointer(ptinfo)),
uintptr(unsafe.Pointer(&disp)))
if hr != 0 {
err = NewError(hr)
}
return
}
func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) {
hr, _, _ := procCreateDispTypeInfo.Call(
uintptr(unsafe.Pointer(idata)),
uintptr(GetUserDefaultLCID()),
uintptr(unsafe.Pointer(&pptinfo)))
if hr != 0 {
err = NewError(hr)
}
return
}
func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {
procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length))
}
func GetUserDefaultLCID() (lcid uint32) {
ret, _, _ := procGetUserDefaultLCID.Call()
lcid = uint32(ret)
return
}
func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) {
r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax))
ret = int32(r0)
return
}
func DispatchMessage(msg *Msg) (ret int32) {
r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg)))
ret = int32(r0)
return
}
Fixes SysAllocStringLen to handle empty strings without causing panic.
package ole
import (
"syscall"
"unicode/utf16"
"unsafe"
)
var (
procCoInitialize, _ = modole32.FindProc("CoInitialize")
procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx")
procCoUninitialize, _ = modole32.FindProc("CoUninitialize")
procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance")
procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID")
procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString")
procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID")
procStringFromIID, _ = modole32.FindProc("StringFromIID")
procIIDFromString, _ = modole32.FindProc("IIDFromString")
procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID")
procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory")
procVariantInit, _ = modoleaut32.FindProc("VariantInit")
procVariantClear, _ = modoleaut32.FindProc("VariantClear")
procSysAllocString, _ = modoleaut32.FindProc("SysAllocString")
procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen")
procSysFreeString, _ = modoleaut32.FindProc("SysFreeString")
procSysStringLen, _ = modoleaut32.FindProc("SysStringLen")
procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo")
procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch")
procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject")
procGetMessageW, _ = moduser32.FindProc("GetMessageW")
procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW")
)
func coInitialize() (err error) {
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx
// Suggests that no value should be passed to CoInitialized.
// Could just be Call() since the parameter is optional. <-- Needs testing to be sure.
hr, _, _ := procCoInitialize.Call(uintptr(0))
if hr != 0 {
err = NewError(hr)
}
return
}
func coInitializeEx(coinit uint32) (err error) {
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx
// Suggests that the first parameter is not only optional but should always be NULL.
hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit))
if hr != 0 {
err = NewError(hr)
}
return
}
func CoInitialize(p uintptr) (err error) {
// p is ignored and won't be used.
// Avoid any variable not used errors.
p = uintptr(0)
return coInitialize()
}
func CoInitializeEx(p uintptr, coinit uint32) (err error) {
// Avoid any variable not used errors.
p = uintptr(0)
return coInitializeEx(coinit)
}
func CoUninitialize() {
procCoUninitialize.Call()
}
func CLSIDFromProgID(progId string) (clsid *GUID, err error) {
var guid GUID
lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func CLSIDFromString(str string) (clsid *GUID, err error) {
var guid GUID
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str)))
hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func StringFromCLSID(clsid *GUID) (str string, err error) {
var p *uint16
hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p)))
if hr != 0 {
err = NewError(hr)
}
str = LpOleStrToString(p)
return
}
func IIDFromString(progId string) (clsid *GUID, err error) {
var guid GUID
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
if hr != 0 {
err = NewError(hr)
}
clsid = &guid
return
}
func StringFromIID(iid *GUID) (str string, err error) {
var p *uint16
hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p)))
if hr != 0 {
err = NewError(hr)
}
str = LpOleStrToString(p)
return
}
func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
if iid == nil {
iid = IID_IUnknown
}
hr, _, _ := procCoCreateInstance.Call(
uintptr(unsafe.Pointer(clsid)),
0,
CLSCTX_SERVER,
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&unk)))
if hr != 0 {
err = NewError(hr)
}
return
}
func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
if iid == nil {
iid = IID_IUnknown
}
hr, _, _ := procGetActiveObject.Call(
uintptr(unsafe.Pointer(clsid)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&unk)))
if hr != 0 {
err = NewError(hr)
}
return
}
func VariantInit(v *VARIANT) (err error) {
hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func VariantClear(v *VARIANT) (err error) {
hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func SysAllocString(v string) (ss *int16) {
pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v))))
ss = (*int16)(unsafe.Pointer(pss))
return
}
func SysAllocStringLen(v string) (ss *int16) {
utf16 := utf16.Encode([]rune(v + "\x00"))
ptr := &utf16[0]
pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1))
ss = (*int16)(unsafe.Pointer(pss))
return
}
func SysFreeString(v *int16) (err error) {
hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v)))
if hr != 0 {
err = NewError(hr)
}
return
}
func SysStringLen(v *int16) uint32 {
l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v)))
return uint32(l)
}
func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) {
hr, _, _ := procCreateStdDispatch.Call(
uintptr(unsafe.Pointer(unk)),
v,
uintptr(unsafe.Pointer(ptinfo)),
uintptr(unsafe.Pointer(&disp)))
if hr != 0 {
err = NewError(hr)
}
return
}
func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) {
hr, _, _ := procCreateDispTypeInfo.Call(
uintptr(unsafe.Pointer(idata)),
uintptr(GetUserDefaultLCID()),
uintptr(unsafe.Pointer(&pptinfo)))
if hr != 0 {
err = NewError(hr)
}
return
}
func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {
procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length))
}
func GetUserDefaultLCID() (lcid uint32) {
ret, _, _ := procGetUserDefaultLCID.Call()
lcid = uint32(ret)
return
}
func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) {
r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax))
ret = int32(r0)
return
}
func DispatchMessage(msg *Msg) (ret int32) {
r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg)))
ret = int32(r0)
return
}
|
package gotana
import (
"encoding/json"
"github.com/go-redis/redis"
)
type genericStruct map[string]interface{}
type DAO interface {
Write(name string, data []byte) error
GetLatestItem(name string) error
GetItems(name string) []string
CountItems(name string) int64
ProcessItems(items []string) []genericStruct
}
func SaveItem(item SaveableItem, dao DAO) {
if dao == nil {
return
}
if !item.Validate() {
Logger().Warning("Item is not valid. Skipping...")
return
}
scraper := item.Scraper().Name
if data, err := item.RecordData(); err == nil {
dao.Write(scraper, data)
}
}
type RedisDAO struct {
client *redis.Client
}
func (r RedisDAO) KeyPrefixed(key string) string {
return "gotana-" + key
}
func (r RedisDAO) Write(name string, data []byte) error {
stringData := string(data[:])
key := r.KeyPrefixed(name)
r.client.SAdd(key, stringData)
return nil
}
func (r RedisDAO) GetLatestItem(name string) error {
return nil
}
func (r RedisDAO) GetItems(name string) []string {
key := r.KeyPrefixed(name)
return r.client.SMembers(key).Val()
}
func (r RedisDAO) CountItems(name string) int64 {
key := r.KeyPrefixed(name)
return r.client.SCard(key).Val()
}
func (r RedisDAO) ProcessItems(items []string) []genericStruct {
result := make([]genericStruct, len(items))
for index, item := range items {
var data = genericStruct{}
json.Unmarshal([]byte(item), &data)
result[index] = data
}
return result
}
func (r RedisDAO) String() string {
return "RedisDAO"
}
func NewRedisDao(address string) (dao RedisDAO) {
client := redis.NewClient(&redis.Options{
Addr: address,
Password: "",
DB: 0,
})
dao = RedisDAO{
client: client,
}
return
}
Created new helper func
package gotana
import (
"encoding/json"
"github.com/go-redis/redis"
)
type genericStruct map[string]interface{}
type DAO interface {
Write(name string, data []byte) error
GetLatestItem(name string) error
GetItems(name string) []string
CountItems(name string) int64
ProcessItems(items []string) []genericStruct
}
func SaveItem(item SaveableItem, dao DAO) {
if dao == nil {
return
}
if !item.Validate() {
Logger().Warning("Item is not valid. Skipping...")
return
}
scraper := item.Scraper().Name
if data, err := item.RecordData(); err == nil {
dao.Write(scraper, data)
}
}
type RedisDAO struct {
client *redis.Client
}
func (r RedisDAO) KeyPrefixed(key string) string {
return "gotana-" + key
}
func (r RedisDAO) Write(name string, data []byte) error {
stringData := string(data[:])
key := r.KeyPrefixed(name)
r.client.SAdd(key, stringData)
return nil
}
func (r RedisDAO) GetLatestItem(name string) error {
return nil
}
func (r RedisDAO) GetItems(name string) []string {
key := r.KeyPrefixed(name)
return r.client.SMembers(key).Val()
}
func (r RedisDAO) CountItems(name string) int64 {
key := r.KeyPrefixed(name)
return r.client.SCard(key).Val()
}
func (r RedisDAO) ProcessItem(item string) genericStruct {
var data = genericStruct{}
json.Unmarshal([]byte(item), &data)
return data
}
func (r RedisDAO) ProcessItems(items []string) []genericStruct {
result := make([]genericStruct, len(items))
for index, item := range items {
result[index] = r.ProcessItem(item)
}
return result
}
func (r RedisDAO) String() string {
return "RedisDAO"
}
func NewRedisDao(address string) (dao RedisDAO) {
client := redis.NewClient(&redis.Options{
Addr: address,
Password: "",
DB: 0,
})
dao = RedisDAO{
client: client,
}
return
}
|
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package platformvm
import (
"errors"
"fmt"
"github.com/ava-labs/gecko/chains/atomic"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/versiondb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/math"
"github.com/ava-labs/gecko/vms/components/ava"
"github.com/ava-labs/gecko/vms/components/verify"
"github.com/ava-labs/gecko/vms/secp256k1fx"
)
var (
errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo")
errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs")
errNoImportInputs = errors.New("no import inputs")
errInputsNotSortedUnique = errors.New("inputs not sorted and unique")
errPublicKeySignatureMismatch = errors.New("signature doesn't match public key")
errUnknownAsset = errors.New("unknown asset ID")
)
// UnsignedImportTx is an unsigned ImportTx
type UnsignedImportTx struct {
// ID of the network this blockchain exists on
NetworkID uint32 `serialize:"true"`
// Next unused nonce of account paying the transaction fee and receiving the
// inputs of this transaction.
Nonce uint64 `serialize:"true"`
// Account that this transaction is being sent by. This is needed to ensure the Credentials are replay safe.
Account ids.ShortID `serialize:"true"`
Ins []*ava.TransferableInput `serialize:"true"` // The inputs to this transaction
}
// ImportTx imports funds from the AVM
type ImportTx struct {
UnsignedImportTx `serialize:"true"`
Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"`
Creds []verify.Verifiable `serialize:"true"` // The credentials of this transaction
vm *VM
id ids.ID
key crypto.PublicKey // public key of transaction signer
unsignedBytes []byte
bytes []byte
}
func (tx *ImportTx) initialize(vm *VM) error {
tx.vm = vm
txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx
tx.bytes = txBytes
tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes))
return err
}
// ID of this transaction
func (tx *ImportTx) ID() ids.ID { return tx.id }
// Key returns the public key of the signer of this transaction
// Precondition: tx.Verify() has been called and returned nil
func (tx *ImportTx) Key() crypto.PublicKey { return tx.key }
// UnsignedBytes returns the unsigned byte representation of an ImportTx
func (tx *ImportTx) UnsignedBytes() []byte { return tx.unsignedBytes }
// Bytes returns the byte representation of an ImportTx
func (tx *ImportTx) Bytes() []byte { return tx.bytes }
// InputUTXOs returns an empty set
func (tx *ImportTx) InputUTXOs() ids.Set {
set := ids.Set{}
for _, in := range tx.Ins {
set.Add(in.InputID())
}
return set
}
// SyntacticVerify this transaction is well-formed
// Also populates [tx.Key] with the public key that signed this transaction
func (tx *ImportTx) SyntacticVerify() error {
switch {
case tx == nil:
return errNilTx
case tx.key != nil:
return nil // Only verify the transaction once
case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network
return errWrongNetworkID
case tx.id.IsZero():
return errInvalidID
case len(tx.Ins) == 0:
return errNoImportInputs
case len(tx.Ins) != len(tx.Creds):
return errWrongNumberOfCredentials
}
for _, in := range tx.Ins {
if err := in.Verify(); err != nil {
return err
}
if !in.AssetID().Equals(tx.vm.ava) {
return errUnknownAsset
}
}
if !ava.IsSortedAndUniqueTransferableInputs(tx.Ins) {
return errInputsNotSortedUnique
}
for _, cred := range tx.Creds {
if err := cred.Verify(); err != nil {
return err
}
}
unsignedIntf := interface{}(&tx.UnsignedImportTx)
unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx
if err != nil {
return err
}
key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:])
if err != nil {
return err
}
if !tx.Account.Equals(key.Address()) {
return errPublicKeySignatureMismatch
}
tx.key = key
tx.unsignedBytes = unsignedBytes
return nil
}
// SemanticVerify this transaction is valid.
func (tx *ImportTx) SemanticVerify(db database.Database) error {
if err := tx.SyntacticVerify(); err != nil {
return err
}
amount := uint64(0)
for _, in := range tx.Ins {
newAmount, err := math.Add64(in.In.Amount(), amount)
if err != nil {
return err
}
amount = newAmount
}
// Deduct tx fee from payer's account
account, err := tx.vm.getAccount(db, tx.Key().Address())
if err != nil {
return err
}
account, err = account.Remove(0, tx.Nonce)
if err != nil {
return err
}
account, err = account.Add(amount)
if err != nil {
return err
}
if err := tx.vm.putAccount(db, account); err != nil {
return err
}
smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm)
defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm)
state := ava.NewPrefixedState(smDB, Codec)
for i, in := range tx.Ins {
cred := tx.Creds[i]
utxoID := in.UTXOID.InputID()
utxo, err := state.AVMUTXO(utxoID)
if err != nil {
return err
}
utxoAssetID := utxo.AssetID()
inAssetID := in.AssetID()
if !utxoAssetID.Equals(inAssetID) {
return errAssetIDMismatch
}
if err := tx.vm.fx.VerifyTransfer(tx, in.In, cred, utxo.Out); err != nil {
return err
}
}
return nil
}
// Accept this transaction.
func (tx *ImportTx) Accept(batch database.Batch) error {
smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm)
defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm)
vsmDB := versiondb.New(smDB)
state := ava.NewPrefixedState(vsmDB, Codec)
for _, in := range tx.Ins {
utxoID := in.UTXOID.InputID()
if err := state.SpendAVMUTXO(utxoID); err != nil {
return err
}
}
sharedBatch, err := vsmDB.CommitBatch()
if err != nil {
return err
}
return atomic.WriteAll(batch, sharedBatch)
}
func (vm *VM) newImportTx(nonce uint64, networkID uint32, ins []*ava.TransferableInput, from [][]*crypto.PrivateKeySECP256K1R, to *crypto.PrivateKeySECP256K1R) (*ImportTx, error) {
ava.SortTransferableInputsWithSigners(ins, from)
tx := &ImportTx{UnsignedImportTx: UnsignedImportTx{
NetworkID: networkID,
Nonce: nonce,
Account: to.PublicKey().Address(),
Ins: ins,
}}
unsignedIntf := interface{}(&tx.UnsignedImportTx)
unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction
if err != nil {
return nil, err
}
hash := hashing.ComputeHash256(unsignedBytes)
for _, credKeys := range from {
cred := &secp256k1fx.Credential{}
for _, key := range credKeys {
sig, err := key.SignHash(hash)
if err != nil {
return nil, fmt.Errorf("problem creating transaction: %w", err)
}
fixedSig := [crypto.SECP256K1RSigLen]byte{}
copy(fixedSig[:], sig)
cred.Sigs = append(cred.Sigs, fixedSig)
}
tx.Creds = append(tx.Creds, cred)
}
sig, err := to.SignHash(hash)
if err != nil {
return nil, err
}
copy(tx.Sig[:], sig)
return tx, tx.initialize(vm)
}
Allow fees to be paid with the imported balance in the ImportTx
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package platformvm
import (
"errors"
"fmt"
"github.com/ava-labs/gecko/chains/atomic"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/versiondb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/math"
"github.com/ava-labs/gecko/vms/components/ava"
"github.com/ava-labs/gecko/vms/components/verify"
"github.com/ava-labs/gecko/vms/secp256k1fx"
)
var (
errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo")
errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs")
errNoImportInputs = errors.New("no import inputs")
errInputsNotSortedUnique = errors.New("inputs not sorted and unique")
errPublicKeySignatureMismatch = errors.New("signature doesn't match public key")
errUnknownAsset = errors.New("unknown asset ID")
)
// UnsignedImportTx is an unsigned ImportTx
type UnsignedImportTx struct {
// ID of the network this blockchain exists on
NetworkID uint32 `serialize:"true"`
// Next unused nonce of account paying the transaction fee and receiving the
// inputs of this transaction.
Nonce uint64 `serialize:"true"`
// Account that this transaction is being sent by. This is needed to ensure the Credentials are replay safe.
Account ids.ShortID `serialize:"true"`
Ins []*ava.TransferableInput `serialize:"true"` // The inputs to this transaction
}
// ImportTx imports funds from the AVM
type ImportTx struct {
UnsignedImportTx `serialize:"true"`
Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"`
Creds []verify.Verifiable `serialize:"true"` // The credentials of this transaction
vm *VM
id ids.ID
key crypto.PublicKey // public key of transaction signer
unsignedBytes []byte
bytes []byte
}
func (tx *ImportTx) initialize(vm *VM) error {
tx.vm = vm
txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx
tx.bytes = txBytes
tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes))
return err
}
// ID of this transaction
func (tx *ImportTx) ID() ids.ID { return tx.id }
// Key returns the public key of the signer of this transaction
// Precondition: tx.Verify() has been called and returned nil
func (tx *ImportTx) Key() crypto.PublicKey { return tx.key }
// UnsignedBytes returns the unsigned byte representation of an ImportTx
func (tx *ImportTx) UnsignedBytes() []byte { return tx.unsignedBytes }
// Bytes returns the byte representation of an ImportTx
func (tx *ImportTx) Bytes() []byte { return tx.bytes }
// InputUTXOs returns an empty set
func (tx *ImportTx) InputUTXOs() ids.Set {
set := ids.Set{}
for _, in := range tx.Ins {
set.Add(in.InputID())
}
return set
}
// SyntacticVerify this transaction is well-formed
// Also populates [tx.Key] with the public key that signed this transaction
func (tx *ImportTx) SyntacticVerify() error {
switch {
case tx == nil:
return errNilTx
case tx.key != nil:
return nil // Only verify the transaction once
case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network
return errWrongNetworkID
case tx.id.IsZero():
return errInvalidID
case len(tx.Ins) == 0:
return errNoImportInputs
case len(tx.Ins) != len(tx.Creds):
return errWrongNumberOfCredentials
}
for _, in := range tx.Ins {
if err := in.Verify(); err != nil {
return err
}
if !in.AssetID().Equals(tx.vm.ava) {
return errUnknownAsset
}
}
if !ava.IsSortedAndUniqueTransferableInputs(tx.Ins) {
return errInputsNotSortedUnique
}
for _, cred := range tx.Creds {
if err := cred.Verify(); err != nil {
return err
}
}
unsignedIntf := interface{}(&tx.UnsignedImportTx)
unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx
if err != nil {
return err
}
key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:])
if err != nil {
return err
}
if !tx.Account.Equals(key.Address()) {
return errPublicKeySignatureMismatch
}
tx.key = key
tx.unsignedBytes = unsignedBytes
return nil
}
// SemanticVerify this transaction is valid.
func (tx *ImportTx) SemanticVerify(db database.Database) error {
if err := tx.SyntacticVerify(); err != nil {
return err
}
amount := uint64(0)
for _, in := range tx.Ins {
newAmount, err := math.Add64(in.In.Amount(), amount)
if err != nil {
return err
}
amount = newAmount
}
// Deduct tx fee from payer's account
account, err := tx.vm.getAccount(db, tx.Key().Address())
if err != nil {
return err
}
account, err = account.Add(amount)
if err != nil {
return err
}
account, err = account.Remove(0, tx.Nonce)
if err != nil {
return err
}
if err := tx.vm.putAccount(db, account); err != nil {
return err
}
smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm)
defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm)
state := ava.NewPrefixedState(smDB, Codec)
for i, in := range tx.Ins {
cred := tx.Creds[i]
utxoID := in.UTXOID.InputID()
utxo, err := state.AVMUTXO(utxoID)
if err != nil {
return err
}
utxoAssetID := utxo.AssetID()
inAssetID := in.AssetID()
if !utxoAssetID.Equals(inAssetID) {
return errAssetIDMismatch
}
if err := tx.vm.fx.VerifyTransfer(tx, in.In, cred, utxo.Out); err != nil {
return err
}
}
return nil
}
// Accept this transaction.
func (tx *ImportTx) Accept(batch database.Batch) error {
smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm)
defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm)
vsmDB := versiondb.New(smDB)
state := ava.NewPrefixedState(vsmDB, Codec)
for _, in := range tx.Ins {
utxoID := in.UTXOID.InputID()
if err := state.SpendAVMUTXO(utxoID); err != nil {
return err
}
}
sharedBatch, err := vsmDB.CommitBatch()
if err != nil {
return err
}
return atomic.WriteAll(batch, sharedBatch)
}
func (vm *VM) newImportTx(nonce uint64, networkID uint32, ins []*ava.TransferableInput, from [][]*crypto.PrivateKeySECP256K1R, to *crypto.PrivateKeySECP256K1R) (*ImportTx, error) {
ava.SortTransferableInputsWithSigners(ins, from)
tx := &ImportTx{UnsignedImportTx: UnsignedImportTx{
NetworkID: networkID,
Nonce: nonce,
Account: to.PublicKey().Address(),
Ins: ins,
}}
unsignedIntf := interface{}(&tx.UnsignedImportTx)
unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction
if err != nil {
return nil, err
}
hash := hashing.ComputeHash256(unsignedBytes)
for _, credKeys := range from {
cred := &secp256k1fx.Credential{}
for _, key := range credKeys {
sig, err := key.SignHash(hash)
if err != nil {
return nil, fmt.Errorf("problem creating transaction: %w", err)
}
fixedSig := [crypto.SECP256K1RSigLen]byte{}
copy(fixedSig[:], sig)
cred.Sigs = append(cred.Sigs, fixedSig)
}
tx.Creds = append(tx.Creds, cred)
}
sig, err := to.SignHash(hash)
if err != nil {
return nil, err
}
copy(tx.Sig[:], sig)
return tx, tx.initialize(vm)
}
|
// Package inf (type inf.Dec) implements "infinite-precision" decimal
// arithmetic.
// "Infinite precision" describes two characteristics: practically unlimited
// precision for decimal number representation and no support for calculating
// with any specific fixed precision.
// (Although there is no practical limit on precision, inf.Dec can only
// represent finite decimals.)
//
// This package is currently in experimental stage and the API may change.
//
// This package does NOT support:
// - rounding to specific precisions (as opposed to specific decimal positions)
// - the notion of context (each rounding must be explicit)
// - NaN and Inf values, and distinguishing between positive and negative zero
// - conversions to and from float32/64 types
//
// Features considered for possible addition:
// + formatting options
// + Exp method
// + combined operations such as AddRound/MulAdd etc
// + exchanging data in decimal32/64/128 formats
//
package inf
// TODO:
// - avoid excessive deep copying (quo and rounders)
import (
"fmt"
"io"
"math/big"
"strings"
)
// A Dec represents a signed arbitrary-precision decimal.
// It is a combination of a sign, an arbitrary-precision integer coefficient
// value, and a signed fixed-precision exponent value.
// The sign and the coefficient value are handled together as a signed value
// and referred to as the unscaled value.
// (Positive and negative zero values are not distinguished.)
// Since the exponent is most commonly non-positive, it is handled in negated
// form and referred to as scale.
//
// The mathematical value of a Dec equals:
//
// unscaled * 10**(-scale)
//
// Note that different Dec representations may have equal mathematical values.
//
// unscaled scale String()
// -------------------------
// 0 0 "0"
// 0 2 "0.00"
// 0 -2 "0"
// 1 0 "1"
// 100 2 "1.00"
// 10 0 "10"
// 1 -1 "10"
//
// The zero value for a Dec represents the value 0 with scale 0.
//
// Operations are typically performed through the *Dec type.
// The semantics of the assignment operation "=" for "bare" Dec values is
// undefined and should not be relied on.
//
// Methods are typically of the form:
//
// func (z *Dec) Op(x, y *Dec) *Dec
//
// and implement operations z = x Op y with the result as receiver; if it
// is one of the operands it may be overwritten (and its memory reused).
// To enable chaining of operations, the result is also returned. Methods
// returning a result other than *Dec take one of the operands as the receiver.
//
// A "bare" Quo method (quotient / division operation) is not provided, as the
// result is not always a finite decimal and thus in general cannot be
// represented as a Dec.
// Instead, in the common case when rounding is (potentially) necessary,
// QuoRound should be used with a Scale and a Rounder.
// QuoExact or QuoRound with RoundExact can be used in the special cases when it
// is known that the result is always a finite decimal.
//
type Dec struct {
unscaled big.Int
scale Scale
}
// Scale represents the type used for the scale of a Dec.
type Scale int32
const scaleSize = 4 // bytes in a Scale value
// Scaler represents a method for obtaining the scale to use for the result of
// an operation on x and y.
type scaler interface {
Scale(x *Dec, y *Dec) Scale
}
var bigInt = [...]*big.Int{
big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
big.NewInt(10),
}
var exp10cache [64]big.Int = func() [64]big.Int {
e10, e10i := [64]big.Int{}, bigInt[1]
for i, _ := range e10 {
e10[i].Set(e10i)
e10i = new(big.Int).Mul(e10i, bigInt[10])
}
return e10
}()
// NewDec allocates and returns a new Dec set to the given int64 unscaled value
// and scale.
func NewDec(unscaled int64, scale Scale) *Dec {
return new(Dec).SetUnscaled(unscaled).SetScale(scale)
}
// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
// value and scale.
func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
}
// Scale returns the scale of x.
func (x *Dec) Scale() Scale {
return x.scale
}
// Unscaled returns the unscaled value of x for u and true for ok when the
// unscaled value can be represented as int64; otherwise it returns an undefined
// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
// checking the validity of the value when the check is known to be redundant.
func (x *Dec) Unscaled() (u int64, ok bool) {
u = x.unscaled.Int64()
var i big.Int
ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
return
}
// UnscaledBig returns the unscaled value of x as *big.Int.
func (x *Dec) UnscaledBig() *big.Int {
return &x.unscaled
}
// SetScale sets the scale of z, with the unscaled value unchanged, and returns
// z.
// The mathematical value of the Dec changes as if it was multiplied by
// 10**(oldscale-scale).
func (z *Dec) SetScale(scale Scale) *Dec {
z.scale = scale
return z
}
// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaled(unscaled int64) *Dec {
z.unscaled.SetInt64(unscaled)
return z
}
// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
z.unscaled.Set(unscaled)
return z
}
// Set sets z to the value of x and returns z.
// It does nothing if z == x.
func (z *Dec) Set(x *Dec) *Dec {
if z != x {
z.SetUnscaledBig(x.UnscaledBig())
z.SetScale(x.Scale())
}
return z
}
// Sign returns:
//
// -1 if x < 0
// 0 if x == 0
// +1 if x > 0
//
func (x *Dec) Sign() int {
return x.UnscaledBig().Sign()
}
// Neg sets z to -x and returns z.
func (z *Dec) Neg(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Neg(x.UnscaledBig())
return z
}
// Cmp compares x and y and returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
func (x *Dec) Cmp(y *Dec) int {
xx, yy := upscale(x, y)
return xx.UnscaledBig().Cmp(yy.UnscaledBig())
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Dec) Abs(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Abs(x.UnscaledBig())
return z
}
// Add sets z to the sum x+y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Add(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Sub sets z to the difference x-y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Sub(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Mul sets z to the product x*y and returns z.
// The scale of z is the sum of the scales of x and y.
func (z *Dec) Mul(x, y *Dec) *Dec {
z.SetScale(x.Scale() + y.Scale())
z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
return z
}
// Round sets z to the value of x rounded to Scale s using Rounder r, and
// returns z.
func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
return z.QuoRound(x, NewDec(1, 0), s, r)
}
// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
// specified scale.
//
// If the rounder is RoundExact but the result can not be expressed exactly at
// the specified scale, QuoRound returns nil, and the value of z is undefined.
//
// There is no corresponding Div method; the equivalent can be achieved through
// the choice of Rounder used.
//
func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
return z.quo(x, y, sclr{s}, r)
}
func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
scl := s.Scale(x, y)
var zzz *Dec
if r.UseRemainder() {
zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
zzz = r.Round(new(Dec), zz, rA, rB)
} else {
zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
zzz = r.Round(new(Dec), zz, nil, nil)
}
if zzz == nil {
return nil
}
return z.Set(zzz)
}
// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
// decimal. Otherwise it returns nil and the value of z is undefined.
//
// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
// calculated so that the remainder will be zero whenever x/y is a finite
// decimal.
func (z *Dec) QuoExact(x, y *Dec) *Dec {
return z.quo(x, y, scaleQuoExact{}, RoundExact)
}
// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
// it sets remNum and remDen to the numerator and denominator of the remainder.
// It returns z, remNum and remDen.
//
// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
// that is, the results satisfy the following equation:
//
// x / y = z + (remNum/remDen) * 10**(-z.Scale())
//
// See Rounder for more details about rounding.
//
func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
// difference (required adjustment) compared to "canonical" result scale
shift := s - (x.Scale() - y.Scale())
// pointers to adjusted unscaled dividend and divisor
var ix, iy *big.Int
switch {
case shift > 0:
// increased scale: decimal-shift dividend left
ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
iy = y.UnscaledBig()
case shift < 0:
// decreased scale: decimal-shift divisor left
ix = x.UnscaledBig()
iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
default:
ix = x.UnscaledBig()
iy = y.UnscaledBig()
}
// save a copy of iy in case it to be overwritten with the result
iy2 := iy
if iy == z.UnscaledBig() {
iy2 = new(big.Int).Set(iy)
}
// set scale
z.SetScale(s)
// set unscaled
if useRem {
// Int division
_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
// set remainder
remNum.Set(intr)
remDen.Set(iy2)
} else {
z.UnscaledBig().Quo(ix, iy)
}
return z, remNum, remDen
}
type sclr struct{ s Scale }
func (s sclr) Scale(x, y *Dec) Scale {
return s.s
}
type scaleQuoExact struct{}
func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
var f10 Scale
if f2 > f5 {
f10 = Scale(f2)
} else {
f10 = Scale(f5)
}
return x.Scale() - y.Scale() + f10
}
func factor(n *big.Int, p *big.Int) int {
// could be improved for large factors
d, f := n, 0
for {
dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
if dm.Sign() == 0 {
f++
d = dd
} else {
break
}
}
return f
}
func factor2(n *big.Int) int {
// could be improved for large factors
f := 0
for ; n.Bit(f) == 0; f++ {
}
return f
}
func upscale(a, b *Dec) (*Dec, *Dec) {
if a.Scale() == b.Scale() {
return a, b
}
if a.Scale() > b.Scale() {
bb := b.rescale(a.Scale())
return a, bb
}
aa := a.rescale(b.Scale())
return aa, b
}
func exp10(x Scale) *big.Int {
if int(x) < len(exp10cache) {
return &exp10cache[int(x)]
}
return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
}
func (x *Dec) rescale(newScale Scale) *Dec {
shift := newScale - x.Scale()
switch {
case shift < 0:
e := exp10(-shift)
return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
case shift > 0:
e := exp10(shift)
return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
}
return x
}
var zeros = []byte("00000000000000000000000000000000" +
"00000000000000000000000000000000")
var lzeros = Scale(len(zeros))
func appendZeros(s []byte, n Scale) []byte {
for i := Scale(0); i < n; i += lzeros {
if n > i+lzeros {
s = append(s, zeros...)
} else {
s = append(s, zeros[0:n-i]...)
}
}
return s
}
func (x *Dec) String() string {
if x == nil {
return "<nil>"
}
scale := x.Scale()
s := []byte(x.UnscaledBig().String())
if scale <= 0 {
if scale != 0 && x.unscaled.Sign() != 0 {
s = appendZeros(s, -scale)
}
return string(s)
}
negbit := Scale(-((x.Sign() - 1) / 2))
// scale > 0
lens := Scale(len(s))
if lens-negbit <= scale {
ss := make([]byte, 0, scale+2)
if negbit == 1 {
ss = append(ss, '-')
}
ss = append(ss, '0', '.')
ss = appendZeros(ss, scale-lens+negbit)
ss = append(ss, s[negbit:]...)
return string(ss)
}
// lens > scale
ss := make([]byte, 0, lens+1)
ss = append(ss, s[:lens-scale]...)
ss = append(ss, '.')
ss = append(ss, s[lens-scale:]...)
return string(ss)
}
// Format is a support routine for fmt.Formatter. It accepts the decimal
// formats 'd' and 'f', and handles both equivalently.
// Width, precision, flags and bases 2, 8, 16 are not supported.
func (x *Dec) Format(s fmt.State, ch rune) {
if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
return
}
fmt.Fprintf(s, x.String())
}
func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
dp, dg := -1, -1 // indexes of decimal point, first digit
loop:
for {
ch, _, err := r.ReadRune()
if err == io.EOF {
break loop
}
if err != nil {
return nil, err
}
switch {
case ch == '+' || ch == '-':
if len(unscaled) > 0 || dp >= 0 { // must be first character
r.UnreadRune()
break loop
}
case ch == '.':
if dp >= 0 {
r.UnreadRune()
break loop
}
dp = len(unscaled)
continue // don't add to unscaled
case ch >= '0' && ch <= '9':
if dg == -1 {
dg = len(unscaled)
}
default:
r.UnreadRune()
break loop
}
unscaled = append(unscaled, byte(ch))
}
if dg == -1 {
return nil, fmt.Errorf("no digits read")
}
if dp >= 0 {
z.SetScale(Scale(len(unscaled) - dp))
} else {
z.SetScale(0)
}
_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
if !ok {
return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
}
return z, nil
}
// SetString sets z to the value of s, interpreted as a decimal (base 10),
// and returns z and a boolean indicating success. The scale of z is the
// number of digits after the decimal point (including any trailing 0s),
// or 0 if there is no decimal point. If SetString fails, the value of z
// is undefined but the returned value is nil.
func (z *Dec) SetString(s string) (*Dec, bool) {
r := strings.NewReader(s)
_, err := z.scan(r)
if err != nil {
return nil, false
}
_, _, err = r.ReadRune()
if err != io.EOF {
return nil, false
}
// err == io.EOF => scan consumed all of s
return z, true
}
// Scan is a support routine for fmt.Scanner; it sets z to the value of
// the scanned number. It accepts the decimal formats 'd' and 'f', and
// handles both equivalently. Bases 2, 8, 16 are not supported.
// The scale of z is the number of digits after the decimal point
// (including any trailing 0s), or 0 if there is no decimal point.
func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
}
s.SkipSpace()
_, err := z.scan(s)
return err
}
// Gob encoding version
const decGobVersion byte = 1
func scaleBytes(s Scale) []byte {
buf := make([]byte, scaleSize)
i := scaleSize
for j := 0; j < scaleSize; j++ {
i--
buf[i] = byte(s)
s >>= 8
}
return buf
}
func scale(b []byte) (s Scale) {
for j := 0; j < scaleSize; j++ {
s <<= 8
s |= Scale(b[j])
}
return
}
// GobEncode implements the gob.GobEncoder interface.
func (x *Dec) GobEncode() ([]byte, error) {
buf, err := x.UnscaledBig().GobEncode()
if err != nil {
return nil, err
}
buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
return buf, nil
}
// GobDecode implements the gob.GobDecoder interface.
func (z *Dec) GobDecode(buf []byte) error {
if len(buf) == 0 {
return fmt.Errorf("Dec.GobDecode: no data")
}
b := buf[len(buf)-1]
if b != decGobVersion {
return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
}
l := len(buf) - scaleSize - 1
err := z.UnscaledBig().GobDecode(buf[:l])
if err != nil {
return err
}
z.SetScale(scale(buf[l : l+scaleSize]))
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (x *Dec) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (z *Dec) UnmarshalText(data []byte) error {
_, ok := z.SetString(string(data))
if !ok {
return fmt.Errorf("invalid inf.Dec")
}
return nil
}
Dec now implements Un/MarshalJSON interfaces
// Package inf (type inf.Dec) implements "infinite-precision" decimal
// arithmetic.
// "Infinite precision" describes two characteristics: practically unlimited
// precision for decimal number representation and no support for calculating
// with any specific fixed precision.
// (Although there is no practical limit on precision, inf.Dec can only
// represent finite decimals.)
//
// This package is currently in experimental stage and the API may change.
//
// This package does NOT support:
// - rounding to specific precisions (as opposed to specific decimal positions)
// - the notion of context (each rounding must be explicit)
// - NaN and Inf values, and distinguishing between positive and negative zero
// - conversions to and from float32/64 types
//
// Features considered for possible addition:
// + formatting options
// + Exp method
// + combined operations such as AddRound/MulAdd etc
// + exchanging data in decimal32/64/128 formats
//
package inf
// TODO:
// - avoid excessive deep copying (quo and rounders)
import (
"fmt"
"io"
"math/big"
"strings"
)
// A Dec represents a signed arbitrary-precision decimal.
// It is a combination of a sign, an arbitrary-precision integer coefficient
// value, and a signed fixed-precision exponent value.
// The sign and the coefficient value are handled together as a signed value
// and referred to as the unscaled value.
// (Positive and negative zero values are not distinguished.)
// Since the exponent is most commonly non-positive, it is handled in negated
// form and referred to as scale.
//
// The mathematical value of a Dec equals:
//
// unscaled * 10**(-scale)
//
// Note that different Dec representations may have equal mathematical values.
//
// unscaled scale String()
// -------------------------
// 0 0 "0"
// 0 2 "0.00"
// 0 -2 "0"
// 1 0 "1"
// 100 2 "1.00"
// 10 0 "10"
// 1 -1 "10"
//
// The zero value for a Dec represents the value 0 with scale 0.
//
// Operations are typically performed through the *Dec type.
// The semantics of the assignment operation "=" for "bare" Dec values is
// undefined and should not be relied on.
//
// Methods are typically of the form:
//
// func (z *Dec) Op(x, y *Dec) *Dec
//
// and implement operations z = x Op y with the result as receiver; if it
// is one of the operands it may be overwritten (and its memory reused).
// To enable chaining of operations, the result is also returned. Methods
// returning a result other than *Dec take one of the operands as the receiver.
//
// A "bare" Quo method (quotient / division operation) is not provided, as the
// result is not always a finite decimal and thus in general cannot be
// represented as a Dec.
// Instead, in the common case when rounding is (potentially) necessary,
// QuoRound should be used with a Scale and a Rounder.
// QuoExact or QuoRound with RoundExact can be used in the special cases when it
// is known that the result is always a finite decimal.
//
type Dec struct {
unscaled big.Int
scale Scale
}
// Scale represents the type used for the scale of a Dec.
type Scale int32
const scaleSize = 4 // bytes in a Scale value
// Scaler represents a method for obtaining the scale to use for the result of
// an operation on x and y.
type scaler interface {
Scale(x *Dec, y *Dec) Scale
}
var bigInt = [...]*big.Int{
big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
big.NewInt(10),
}
var exp10cache [64]big.Int = func() [64]big.Int {
e10, e10i := [64]big.Int{}, bigInt[1]
for i, _ := range e10 {
e10[i].Set(e10i)
e10i = new(big.Int).Mul(e10i, bigInt[10])
}
return e10
}()
// NewDec allocates and returns a new Dec set to the given int64 unscaled value
// and scale.
func NewDec(unscaled int64, scale Scale) *Dec {
return new(Dec).SetUnscaled(unscaled).SetScale(scale)
}
// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
// value and scale.
func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
}
// Scale returns the scale of x.
func (x *Dec) Scale() Scale {
return x.scale
}
// Unscaled returns the unscaled value of x for u and true for ok when the
// unscaled value can be represented as int64; otherwise it returns an undefined
// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
// checking the validity of the value when the check is known to be redundant.
func (x *Dec) Unscaled() (u int64, ok bool) {
u = x.unscaled.Int64()
var i big.Int
ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
return
}
// UnscaledBig returns the unscaled value of x as *big.Int.
func (x *Dec) UnscaledBig() *big.Int {
return &x.unscaled
}
// SetScale sets the scale of z, with the unscaled value unchanged, and returns
// z.
// The mathematical value of the Dec changes as if it was multiplied by
// 10**(oldscale-scale).
func (z *Dec) SetScale(scale Scale) *Dec {
z.scale = scale
return z
}
// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaled(unscaled int64) *Dec {
z.unscaled.SetInt64(unscaled)
return z
}
// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
z.unscaled.Set(unscaled)
return z
}
// Set sets z to the value of x and returns z.
// It does nothing if z == x.
func (z *Dec) Set(x *Dec) *Dec {
if z != x {
z.SetUnscaledBig(x.UnscaledBig())
z.SetScale(x.Scale())
}
return z
}
// Sign returns:
//
// -1 if x < 0
// 0 if x == 0
// +1 if x > 0
//
func (x *Dec) Sign() int {
return x.UnscaledBig().Sign()
}
// Neg sets z to -x and returns z.
func (z *Dec) Neg(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Neg(x.UnscaledBig())
return z
}
// Cmp compares x and y and returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
func (x *Dec) Cmp(y *Dec) int {
xx, yy := upscale(x, y)
return xx.UnscaledBig().Cmp(yy.UnscaledBig())
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Dec) Abs(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Abs(x.UnscaledBig())
return z
}
// Add sets z to the sum x+y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Add(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Sub sets z to the difference x-y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Sub(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Mul sets z to the product x*y and returns z.
// The scale of z is the sum of the scales of x and y.
func (z *Dec) Mul(x, y *Dec) *Dec {
z.SetScale(x.Scale() + y.Scale())
z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
return z
}
// Round sets z to the value of x rounded to Scale s using Rounder r, and
// returns z.
func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
return z.QuoRound(x, NewDec(1, 0), s, r)
}
// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
// specified scale.
//
// If the rounder is RoundExact but the result can not be expressed exactly at
// the specified scale, QuoRound returns nil, and the value of z is undefined.
//
// There is no corresponding Div method; the equivalent can be achieved through
// the choice of Rounder used.
//
func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
return z.quo(x, y, sclr{s}, r)
}
func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
scl := s.Scale(x, y)
var zzz *Dec
if r.UseRemainder() {
zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
zzz = r.Round(new(Dec), zz, rA, rB)
} else {
zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
zzz = r.Round(new(Dec), zz, nil, nil)
}
if zzz == nil {
return nil
}
return z.Set(zzz)
}
// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
// decimal. Otherwise it returns nil and the value of z is undefined.
//
// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
// calculated so that the remainder will be zero whenever x/y is a finite
// decimal.
func (z *Dec) QuoExact(x, y *Dec) *Dec {
return z.quo(x, y, scaleQuoExact{}, RoundExact)
}
// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
// it sets remNum and remDen to the numerator and denominator of the remainder.
// It returns z, remNum and remDen.
//
// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
// that is, the results satisfy the following equation:
//
// x / y = z + (remNum/remDen) * 10**(-z.Scale())
//
// See Rounder for more details about rounding.
//
func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
// difference (required adjustment) compared to "canonical" result scale
shift := s - (x.Scale() - y.Scale())
// pointers to adjusted unscaled dividend and divisor
var ix, iy *big.Int
switch {
case shift > 0:
// increased scale: decimal-shift dividend left
ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
iy = y.UnscaledBig()
case shift < 0:
// decreased scale: decimal-shift divisor left
ix = x.UnscaledBig()
iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
default:
ix = x.UnscaledBig()
iy = y.UnscaledBig()
}
// save a copy of iy in case it to be overwritten with the result
iy2 := iy
if iy == z.UnscaledBig() {
iy2 = new(big.Int).Set(iy)
}
// set scale
z.SetScale(s)
// set unscaled
if useRem {
// Int division
_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
// set remainder
remNum.Set(intr)
remDen.Set(iy2)
} else {
z.UnscaledBig().Quo(ix, iy)
}
return z, remNum, remDen
}
type sclr struct{ s Scale }
func (s sclr) Scale(x, y *Dec) Scale {
return s.s
}
type scaleQuoExact struct{}
func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
var f10 Scale
if f2 > f5 {
f10 = Scale(f2)
} else {
f10 = Scale(f5)
}
return x.Scale() - y.Scale() + f10
}
func factor(n *big.Int, p *big.Int) int {
// could be improved for large factors
d, f := n, 0
for {
dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
if dm.Sign() == 0 {
f++
d = dd
} else {
break
}
}
return f
}
func factor2(n *big.Int) int {
// could be improved for large factors
f := 0
for ; n.Bit(f) == 0; f++ {
}
return f
}
func upscale(a, b *Dec) (*Dec, *Dec) {
if a.Scale() == b.Scale() {
return a, b
}
if a.Scale() > b.Scale() {
bb := b.rescale(a.Scale())
return a, bb
}
aa := a.rescale(b.Scale())
return aa, b
}
func exp10(x Scale) *big.Int {
if int(x) < len(exp10cache) {
return &exp10cache[int(x)]
}
return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
}
func (x *Dec) rescale(newScale Scale) *Dec {
shift := newScale - x.Scale()
switch {
case shift < 0:
e := exp10(-shift)
return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
case shift > 0:
e := exp10(shift)
return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
}
return x
}
var zeros = []byte("00000000000000000000000000000000" +
"00000000000000000000000000000000")
var lzeros = Scale(len(zeros))
func appendZeros(s []byte, n Scale) []byte {
for i := Scale(0); i < n; i += lzeros {
if n > i+lzeros {
s = append(s, zeros...)
} else {
s = append(s, zeros[0:n-i]...)
}
}
return s
}
func (x *Dec) String() string {
if x == nil {
return "<nil>"
}
scale := x.Scale()
s := []byte(x.UnscaledBig().String())
if scale <= 0 {
if scale != 0 && x.unscaled.Sign() != 0 {
s = appendZeros(s, -scale)
}
return string(s)
}
negbit := Scale(-((x.Sign() - 1) / 2))
// scale > 0
lens := Scale(len(s))
if lens-negbit <= scale {
ss := make([]byte, 0, scale+2)
if negbit == 1 {
ss = append(ss, '-')
}
ss = append(ss, '0', '.')
ss = appendZeros(ss, scale-lens+negbit)
ss = append(ss, s[negbit:]...)
return string(ss)
}
// lens > scale
ss := make([]byte, 0, lens+1)
ss = append(ss, s[:lens-scale]...)
ss = append(ss, '.')
ss = append(ss, s[lens-scale:]...)
return string(ss)
}
// Format is a support routine for fmt.Formatter. It accepts the decimal
// formats 'd' and 'f', and handles both equivalently.
// Width, precision, flags and bases 2, 8, 16 are not supported.
func (x *Dec) Format(s fmt.State, ch rune) {
if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
return
}
fmt.Fprintf(s, x.String())
}
func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
dp, dg := -1, -1 // indexes of decimal point, first digit
loop:
for {
ch, _, err := r.ReadRune()
if err == io.EOF {
break loop
}
if err != nil {
return nil, err
}
switch {
case ch == '+' || ch == '-':
if len(unscaled) > 0 || dp >= 0 { // must be first character
r.UnreadRune()
break loop
}
case ch == '.':
if dp >= 0 {
r.UnreadRune()
break loop
}
dp = len(unscaled)
continue // don't add to unscaled
case ch >= '0' && ch <= '9':
if dg == -1 {
dg = len(unscaled)
}
default:
r.UnreadRune()
break loop
}
unscaled = append(unscaled, byte(ch))
}
if dg == -1 {
return nil, fmt.Errorf("no digits read")
}
if dp >= 0 {
z.SetScale(Scale(len(unscaled) - dp))
} else {
z.SetScale(0)
}
_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
if !ok {
return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
}
return z, nil
}
// SetString sets z to the value of s, interpreted as a decimal (base 10),
// and returns z and a boolean indicating success. The scale of z is the
// number of digits after the decimal point (including any trailing 0s),
// or 0 if there is no decimal point. If SetString fails, the value of z
// is undefined but the returned value is nil.
func (z *Dec) SetString(s string) (*Dec, bool) {
r := strings.NewReader(s)
_, err := z.scan(r)
if err != nil {
return nil, false
}
_, _, err = r.ReadRune()
if err != io.EOF {
return nil, false
}
// err == io.EOF => scan consumed all of s
return z, true
}
// Scan is a support routine for fmt.Scanner; it sets z to the value of
// the scanned number. It accepts the decimal formats 'd' and 'f', and
// handles both equivalently. Bases 2, 8, 16 are not supported.
// The scale of z is the number of digits after the decimal point
// (including any trailing 0s), or 0 if there is no decimal point.
func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
}
s.SkipSpace()
_, err := z.scan(s)
return err
}
// Gob encoding version
const decGobVersion byte = 1
func scaleBytes(s Scale) []byte {
buf := make([]byte, scaleSize)
i := scaleSize
for j := 0; j < scaleSize; j++ {
i--
buf[i] = byte(s)
s >>= 8
}
return buf
}
func scale(b []byte) (s Scale) {
for j := 0; j < scaleSize; j++ {
s <<= 8
s |= Scale(b[j])
}
return
}
// GobEncode implements the gob.GobEncoder interface.
func (x *Dec) GobEncode() ([]byte, error) {
buf, err := x.UnscaledBig().GobEncode()
if err != nil {
return nil, err
}
buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
return buf, nil
}
// GobDecode implements the gob.GobDecoder interface.
func (z *Dec) GobDecode(buf []byte) error {
if len(buf) == 0 {
return fmt.Errorf("Dec.GobDecode: no data")
}
b := buf[len(buf)-1]
if b != decGobVersion {
return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
}
l := len(buf) - scaleSize - 1
err := z.UnscaledBig().GobDecode(buf[:l])
if err != nil {
return err
}
z.SetScale(scale(buf[l : l+scaleSize]))
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (x *Dec) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (z *Dec) UnmarshalText(data []byte) error {
_, ok := z.SetString(string(data))
if !ok {
return fmt.Errorf("invalid inf.Dec")
}
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (x *Dec) MarshalJSON() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (z *Dec) UnmarshalJSON(data []byte) error {
_, ok := z.SetString(string(data))
if !ok {
return fmt.Errorf("invalid inf.Dec")
}
return nil
}
|
package main
import (
"bytes"
"code.google.com/p/go.tools/go/vcs"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
)
// Godeps describes what a package needs to be rebuilt reproducibly.
// It's the same information stored in file Godeps.
type Godeps struct {
ImportPath string
GoVersion string
Packages []string `json:",omitempty"` // Arguments to save, if any.
Deps []Dependency
outerRoot string
}
// A Dependency is a specific revision of a package.
type Dependency struct {
ImportPath string
Comment string `json:",omitempty"` // Description of commit, if present.
Rev string // VCS-specific commit ID.
// used by command save
pkg *Package
// used by command go
outerRoot string // dir, if present, in outer GOPATH
repoRoot *vcs.RepoRoot
vcs *VCS
}
// pkgs is the list of packages to read dependencies
func (g *Godeps) Load(pkgs []*Package) error {
var err1 error
var path, seen []string
for _, p := range pkgs {
if p.Standard {
log.Println("ignoring stdlib package:", p.ImportPath)
continue
}
if p.Error.Err != "" {
log.Println(p.Error.Err)
err1 = errors.New("error loading packages")
continue
}
_, rr, err := VCSForImportPath(p.ImportPath)
if err != nil {
log.Println(err)
err1 = errors.New("error loading packages")
continue
}
seen = append(seen, rr.Root)
path = append(path, p.Deps...)
}
sort.Strings(path) // prefer parent directories to children
for _, pkg := range MustLoadPackages(path...) {
if pkg.Error.Err != "" {
log.Println(pkg.Error.Err)
err1 = errors.New("error loading dependencies")
continue
}
if pkg.Standard {
continue
}
vcs, rr, err := VCSForImportPath(pkg.ImportPath)
if err != nil {
log.Println(err)
err1 = errors.New("error loading dependencies")
continue
}
if contains(seen, rr.Root) {
continue
}
seen = append(seen, rr.Root)
id, err := vcs.identify(pkg.Dir)
if err != nil {
log.Println(err)
err1 = errors.New("error loading dependencies")
continue
}
if vcs.isDirty(pkg.Dir) {
log.Println("dirty working tree:", pkg.Dir)
err1 = errors.New("error loading dependencies")
continue
}
comment := vcs.describe(pkg.Dir, id)
g.Deps = append(g.Deps, Dependency{
ImportPath: pkg.ImportPath,
Rev: id,
Comment: comment,
pkg: pkg,
})
}
return err1
}
func ReadGodeps(path string) (*Godeps, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
g := new(Godeps)
err = json.NewDecoder(f).Decode(g)
if err != nil {
return nil, err
}
err = g.loadGoList()
if err != nil {
return nil, err
}
for i := range g.Deps {
d := &g.Deps[i]
d.vcs, d.repoRoot, err = VCSForImportPath(d.ImportPath)
if err != nil {
return nil, err
}
}
return g, nil
}
func (g *Godeps) loadGoList() error {
a := []string{g.ImportPath}
for _, d := range g.Deps {
a = append(a, d.ImportPath)
}
ps, err := LoadPackages(a...)
if err != nil {
return err
}
g.outerRoot = ps[0].Root
for i, p := range ps[1:] {
g.Deps[i].outerRoot = p.Root
}
return nil
}
func (g *Godeps) WriteTo(w io.Writer) (int, error) {
b, err := json.MarshalIndent(g, "", "\t")
if err != nil {
return 0, err
}
return w.Write(append(b, '\n'))
}
// Returns a path to the local copy of d's repository.
// E.g.
//
// ImportPath RepoPath
// github.com/kr/s3 $spool/github.com/kr/s3
// github.com/lib/pq/oid $spool/github.com/lib/pq
func (d Dependency) RepoPath() string {
return filepath.Join(spool, "repo", d.repoRoot.Root)
}
// Returns a URL for the remote copy of the repository.
func (d Dependency) RemoteURL() string {
return d.repoRoot.Repo
}
// Returns the url of a local disk clone of the repo, if any.
func (d Dependency) FastRemotePath() string {
if d.outerRoot != "" {
return d.outerRoot + "/src/" + d.repoRoot.Root
}
return ""
}
// Returns a path to the checked-out copy of d's commit.
func (d Dependency) Workdir() string {
return filepath.Join(d.Gopath(), "src", d.ImportPath)
}
// Returns a path to the checked-out copy of d's repo root.
func (d Dependency) WorkdirRoot() string {
return filepath.Join(d.Gopath(), "src", d.repoRoot.Root)
}
// Returns a path to a parent of Workdir such that using
// Gopath in GOPATH makes d available to the go tool.
func (d Dependency) Gopath() string {
return filepath.Join(spool, "rev", d.Rev[:2], d.Rev[2:])
}
// Creates an empty repo in d.RepoPath().
func (d Dependency) CreateRepo(fastRemote, mainRemote string) error {
if err := os.MkdirAll(d.RepoPath(), 0777); err != nil {
return err
}
if err := d.vcs.create(d.RepoPath()); err != nil {
return err
}
if err := d.link(fastRemote, d.FastRemotePath()); err != nil {
return err
}
return d.link(mainRemote, d.RemoteURL())
}
func (d Dependency) link(remote, url string) error {
return d.vcs.link(d.RepoPath(), remote, url)
}
func (d Dependency) fetchAndCheckout(remote string) error {
if err := d.fetch(remote); err != nil {
return fmt.Errorf("fetch: %s", err)
}
if err := d.checkout(); err != nil {
return fmt.Errorf("checkout: %s", err)
}
return nil
}
func (d Dependency) fetch(remote string) error {
return d.vcs.fetch(d.RepoPath(), remote)
}
func (d Dependency) checkout() error {
dir := d.WorkdirRoot()
if exists(dir) {
return nil
}
if !d.vcs.exists(d.RepoPath(), d.Rev) {
return fmt.Errorf("unknown rev %s for %s", d.Rev, d.ImportPath)
}
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
return d.vcs.checkout(dir, d.Rev, d.RepoPath())
}
func contains(a []string, s string) bool {
for _, p := range a {
if s == p {
return true
}
}
return false
}
func mustGoVersion() string {
cmd := exec.Command("go", "version")
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
return string(bytes.TrimSpace(out))
}
list each dependency only once
package main
import (
"bytes"
"code.google.com/p/go.tools/go/vcs"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
)
// Godeps describes what a package needs to be rebuilt reproducibly.
// It's the same information stored in file Godeps.
type Godeps struct {
ImportPath string
GoVersion string
Packages []string `json:",omitempty"` // Arguments to save, if any.
Deps []Dependency
outerRoot string
}
// A Dependency is a specific revision of a package.
type Dependency struct {
ImportPath string
Comment string `json:",omitempty"` // Description of commit, if present.
Rev string // VCS-specific commit ID.
// used by command save
pkg *Package
// used by command go
outerRoot string // dir, if present, in outer GOPATH
repoRoot *vcs.RepoRoot
vcs *VCS
}
// pkgs is the list of packages to read dependencies
func (g *Godeps) Load(pkgs []*Package) error {
var err1 error
var path, seen []string
for _, p := range pkgs {
if p.Standard {
log.Println("ignoring stdlib package:", p.ImportPath)
continue
}
if p.Error.Err != "" {
log.Println(p.Error.Err)
err1 = errors.New("error loading packages")
continue
}
_, rr, err := VCSForImportPath(p.ImportPath)
if err != nil {
log.Println(err)
err1 = errors.New("error loading packages")
continue
}
seen = append(seen, rr.Root)
path = append(path, p.Deps...)
}
sort.Strings(path)
path = uniq(path)
for _, pkg := range MustLoadPackages(path...) {
if pkg.Error.Err != "" {
log.Println(pkg.Error.Err)
err1 = errors.New("error loading dependencies")
continue
}
if pkg.Standard {
continue
}
vcs, rr, err := VCSForImportPath(pkg.ImportPath)
if err != nil {
log.Println(err)
err1 = errors.New("error loading dependencies")
continue
}
if contains(seen, rr.Root) {
continue
}
seen = append(seen, rr.Root)
id, err := vcs.identify(pkg.Dir)
if err != nil {
log.Println(err)
err1 = errors.New("error loading dependencies")
continue
}
if vcs.isDirty(pkg.Dir) {
log.Println("dirty working tree:", pkg.Dir)
err1 = errors.New("error loading dependencies")
continue
}
comment := vcs.describe(pkg.Dir, id)
g.Deps = append(g.Deps, Dependency{
ImportPath: pkg.ImportPath,
Rev: id,
Comment: comment,
pkg: pkg,
})
}
return err1
}
func ReadGodeps(path string) (*Godeps, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
g := new(Godeps)
err = json.NewDecoder(f).Decode(g)
if err != nil {
return nil, err
}
err = g.loadGoList()
if err != nil {
return nil, err
}
for i := range g.Deps {
d := &g.Deps[i]
d.vcs, d.repoRoot, err = VCSForImportPath(d.ImportPath)
if err != nil {
return nil, err
}
}
return g, nil
}
func (g *Godeps) loadGoList() error {
a := []string{g.ImportPath}
for _, d := range g.Deps {
a = append(a, d.ImportPath)
}
ps, err := LoadPackages(a...)
if err != nil {
return err
}
g.outerRoot = ps[0].Root
for i, p := range ps[1:] {
g.Deps[i].outerRoot = p.Root
}
return nil
}
func (g *Godeps) WriteTo(w io.Writer) (int, error) {
b, err := json.MarshalIndent(g, "", "\t")
if err != nil {
return 0, err
}
return w.Write(append(b, '\n'))
}
// Returns a path to the local copy of d's repository.
// E.g.
//
// ImportPath RepoPath
// github.com/kr/s3 $spool/github.com/kr/s3
// github.com/lib/pq/oid $spool/github.com/lib/pq
func (d Dependency) RepoPath() string {
return filepath.Join(spool, "repo", d.repoRoot.Root)
}
// Returns a URL for the remote copy of the repository.
func (d Dependency) RemoteURL() string {
return d.repoRoot.Repo
}
// Returns the url of a local disk clone of the repo, if any.
func (d Dependency) FastRemotePath() string {
if d.outerRoot != "" {
return d.outerRoot + "/src/" + d.repoRoot.Root
}
return ""
}
// Returns a path to the checked-out copy of d's commit.
func (d Dependency) Workdir() string {
return filepath.Join(d.Gopath(), "src", d.ImportPath)
}
// Returns a path to the checked-out copy of d's repo root.
func (d Dependency) WorkdirRoot() string {
return filepath.Join(d.Gopath(), "src", d.repoRoot.Root)
}
// Returns a path to a parent of Workdir such that using
// Gopath in GOPATH makes d available to the go tool.
func (d Dependency) Gopath() string {
return filepath.Join(spool, "rev", d.Rev[:2], d.Rev[2:])
}
// Creates an empty repo in d.RepoPath().
func (d Dependency) CreateRepo(fastRemote, mainRemote string) error {
if err := os.MkdirAll(d.RepoPath(), 0777); err != nil {
return err
}
if err := d.vcs.create(d.RepoPath()); err != nil {
return err
}
if err := d.link(fastRemote, d.FastRemotePath()); err != nil {
return err
}
return d.link(mainRemote, d.RemoteURL())
}
func (d Dependency) link(remote, url string) error {
return d.vcs.link(d.RepoPath(), remote, url)
}
func (d Dependency) fetchAndCheckout(remote string) error {
if err := d.fetch(remote); err != nil {
return fmt.Errorf("fetch: %s", err)
}
if err := d.checkout(); err != nil {
return fmt.Errorf("checkout: %s", err)
}
return nil
}
func (d Dependency) fetch(remote string) error {
return d.vcs.fetch(d.RepoPath(), remote)
}
func (d Dependency) checkout() error {
dir := d.WorkdirRoot()
if exists(dir) {
return nil
}
if !d.vcs.exists(d.RepoPath(), d.Rev) {
return fmt.Errorf("unknown rev %s for %s", d.Rev, d.ImportPath)
}
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
return d.vcs.checkout(dir, d.Rev, d.RepoPath())
}
func contains(a []string, s string) bool {
for _, p := range a {
if s == p {
return true
}
}
return false
}
func uniq(a []string) []string {
i := 0
s := ""
for _, t := range a {
if t != s {
a[i] = t
i++
s = t
}
}
return a[:i]
}
func mustGoVersion() string {
cmd := exec.Command("go", "version")
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
return string(bytes.TrimSpace(out))
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extended and bugfixes by Miek Gieben. Copyright 2010-2012.
// DOMAIN NAME SYSTEM
//
// Package dns implements a full featured interface to the Domain Name System.
// Server- and client-side programming is supported.
// The package allows complete control over what is send out to the DNS. The package
// API follows the less-is-more principle, by presenting a small, clean interface.
//
// The package dns supports (asynchronous) querying/replying, incoming/outgoing AXFR/IXFR,
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
// Note that domain names MUST be fully qualified, before sending them, unqualified
// names in a message will result in a packing failure.
//
// Resource records are native types. They are not stored in wire format.
// Basic usage pattern for creating a new resource record:
//
// r := new(dns.RR_TXT)
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
// r.Pref = 10
// r.Mx = "mx.miek.nl."
//
// Or directly from a string:
//
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
//
// Or when the default TTL (3600) and class (IN) suit you:
//
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
//
// Or even:
//
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
//
// In the DNS messages are exchanged, these messages contain resource
// records (sets). Use pattern for creating a message:
//
// m := dns.new(Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
//
// Or when not certain if the domain name is fully qualified:
//
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
//
// The message m is now a message with the question section set to ask
// the MX records for the miek.nl. zone.
//
// The following is slightly more verbose, but more flexible:
//
// m1 := new(dns.Msg)
// m1.MsgHdr.Id = Id()
// m1.MsgHdr.RecursionDesired = true
// m1.Question = make([]Question, 1)
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
//
// After creating a message it can be send.
// Basic use pattern for synchronous querying the DNS at a
// server configured on 127.0.0.1 and port 53:
//
// c := new(Client)
// in, err := c.Exchange(m1, "127.0.0.1:53")
//
// An asynchronous query is also possible, see client.Do and client.DoRtt.
//
// From a birds eye view a dns message consists out of four sections.
// The question section: in.Question, the answer section: in.Answer,
// the authority section: in.Ns and the additional section: in.Extra.
//
// Each of these sections (except the Question section) contain a []RR. Basic
// use pattern for accessing the rdata of a TXT RR as the first RR in
// the Answer section:
//
// if t, ok := in.Answer[0].(*RR_TXT); ok {
// // do something with t.Txt
// }
package dns
import (
"net"
"strconv"
)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
DefaultMsgSize = 4096 // Standard default for larger than 512 packets.
udpMsgSize = 512 // Default buffer size for servers receiving UDP packets.
MaxMsgSize = 65536 // Largest possible DNS packet.
defaultTtl = 3600 // Default TTL.
)
// Error represents a DNS error
type Error struct {
Err string
Name string
Server net.Addr
Timeout bool
}
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
if e.Name == "" {
return e.Err
}
return e.Name + ": " + e.Err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// Len returns the length (in octects) of the uncompressed RR in wire format.
Len() int
// Copy returns a copy of the RR
Copy() RR
}
// DNS resource records.
// There are many types of RRs,
// but they all share the same header.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // length of data after header
}
func (h *RR_Header) Header() *RR_Header {
return h
}
func (h *RR_Header) CopyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
// Just to imlement the RR interface
func (h *RR_Header) Copy() RR {
return nil
}
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
if len(h.Name) == 0 {
s += ".\t"
} else {
s += h.Name + "\t"
}
s = s + strconv.FormatInt(int64(h.Ttl), 10) + "\t"
if _, ok := Class_str[h.Class]; ok {
s += Class_str[h.Class] + "\t"
} else {
s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
}
if _, ok := Rr_str[h.Rrtype]; ok {
s += Rr_str[h.Rrtype] + "\t"
} else {
s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
}
return s
}
func (h *RR_Header) Len() int {
l := len(h.Name) + 1
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
// find best matching pattern for zone
func zoneMatch(pattern, zone string) (ok bool) {
if len(pattern) == 0 {
return
}
if len(zone) == 0 {
zone = "."
}
// pattern = Fqdn(pattern) // should already be a fqdn
zone = Fqdn(zone)
i := 0
for {
ok = pattern[len(pattern)-1-i] == zone[len(zone)-1-i]
i++
if !ok {
break
}
if len(pattern)-1-i < 0 || len(zone)-1-i < 0 {
break
}
}
return
}
small correction
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extended and bugfixes by Miek Gieben. Copyright 2010-2012.
// DOMAIN NAME SYSTEM
//
// Package dns implements a full featured interface to the Domain Name System.
// Server- and client-side programming is supported.
// The package allows complete control over what is send out to the DNS. The package
// API follows the less-is-more principle, by presenting a small, clean interface.
//
// The package dns supports (asynchronous) querying/replying, incoming/outgoing AXFR/IXFR,
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
// Note that domain names MUST be fully qualified, before sending them, unqualified
// names in a message will result in a packing failure.
//
// Resource records are native types. They are not stored in wire format.
// Basic usage pattern for creating a new resource record:
//
// r := new(dns.RR_MX)
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
// r.Pref = 10
// r.Mx = "mx.miek.nl."
//
// Or directly from a string:
//
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
//
// Or when the default TTL (3600) and class (IN) suit you:
//
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
//
// Or even:
//
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
//
// In the DNS messages are exchanged, these messages contain resource
// records (sets). Use pattern for creating a message:
//
// m := dns.new(Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
//
// Or when not certain if the domain name is fully qualified:
//
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
//
// The message m is now a message with the question section set to ask
// the MX records for the miek.nl. zone.
//
// The following is slightly more verbose, but more flexible:
//
// m1 := new(dns.Msg)
// m1.MsgHdr.Id = Id()
// m1.MsgHdr.RecursionDesired = true
// m1.Question = make([]Question, 1)
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
//
// After creating a message it can be send.
// Basic use pattern for synchronous querying the DNS at a
// server configured on 127.0.0.1 and port 53:
//
// c := new(Client)
// in, err := c.Exchange(m1, "127.0.0.1:53")
//
// An asynchronous query is also possible, see client.Do and client.DoRtt.
//
// From a birds eye view a dns message consists out of four sections.
// The question section: in.Question, the answer section: in.Answer,
// the authority section: in.Ns and the additional section: in.Extra.
//
// Each of these sections (except the Question section) contain a []RR. Basic
// use pattern for accessing the rdata of a TXT RR as the first RR in
// the Answer section:
//
// if t, ok := in.Answer[0].(*RR_TXT); ok {
// // do something with t.Txt
// }
package dns
import (
"net"
"strconv"
)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
DefaultMsgSize = 4096 // Standard default for larger than 512 packets.
udpMsgSize = 512 // Default buffer size for servers receiving UDP packets.
MaxMsgSize = 65536 // Largest possible DNS packet.
defaultTtl = 3600 // Default TTL.
)
// Error represents a DNS error
type Error struct {
Err string
Name string
Server net.Addr
Timeout bool
}
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
if e.Name == "" {
return e.Err
}
return e.Name + ": " + e.Err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// Len returns the length (in octects) of the uncompressed RR in wire format.
Len() int
// Copy returns a copy of the RR
Copy() RR
}
// DNS resource records.
// There are many types of RRs,
// but they all share the same header.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // length of data after header
}
func (h *RR_Header) Header() *RR_Header {
return h
}
func (h *RR_Header) CopyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
// Just to imlement the RR interface
func (h *RR_Header) Copy() RR {
return nil
}
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
if len(h.Name) == 0 {
s += ".\t"
} else {
s += h.Name + "\t"
}
s = s + strconv.FormatInt(int64(h.Ttl), 10) + "\t"
if _, ok := Class_str[h.Class]; ok {
s += Class_str[h.Class] + "\t"
} else {
s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
}
if _, ok := Rr_str[h.Rrtype]; ok {
s += Rr_str[h.Rrtype] + "\t"
} else {
s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
}
return s
}
func (h *RR_Header) Len() int {
l := len(h.Name) + 1
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
// find best matching pattern for zone
func zoneMatch(pattern, zone string) (ok bool) {
if len(pattern) == 0 {
return
}
if len(zone) == 0 {
zone = "."
}
// pattern = Fqdn(pattern) // should already be a fqdn
zone = Fqdn(zone)
i := 0
for {
ok = pattern[len(pattern)-1-i] == zone[len(zone)-1-i]
i++
if !ok {
break
}
if len(pattern)-1-i < 0 || len(zone)-1-i < 0 {
break
}
}
return
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extensions of the original work are copyright (c) 2011 Miek Gieben
// Package dns implements a full featured interface to the Domain Name System.
// Server- and client-side programming is supported.
// The package allows complete control over what is send out to the DNS. The package
// API follows the less-is-more principle, by presenting a small, clean interface.
//
// The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
// Note that domain names MUST be fully qualified, before sending them, unqualified
// names in a message will result in a packing failure.
//
// Resource records are native types. They are not stored in wire format.
// Basic usage pattern for creating a new resource record:
//
// r := new(dns.MX)
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
// r.Preference = 10
// r.Mx = "mx.miek.nl."
//
// Or directly from a string:
//
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
//
// Or when the default TTL (3600) and class (IN) suit you:
//
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
//
// Or even:
//
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
//
// In the DNS messages are exchanged, these messages contain resource
// records (sets). Use pattern for creating a message:
//
// m := new(dns.Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
//
// Or when not certain if the domain name is fully qualified:
//
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
//
// The message m is now a message with the question section set to ask
// the MX records for the miek.nl. zone.
//
// The following is slightly more verbose, but more flexible:
//
// m1 := new(dns.Msg)
// m1.Id = dns.Id()
// m1.RecursionDesired = true
// m1.Question = make([]Question, 1)
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
//
// After creating a message it can be send.
// Basic use pattern for synchronous querying the DNS at a
// server configured on 127.0.0.1 and port 53:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
//
// Suppressing
// multiple outstanding queries (with the same question, type and class) is as easy as setting:
//
// c.SingleInflight = true
//
// If these "advanced" features are not needed, a simple UDP query can be send,
// with:
//
// in, err := dns.Exchange(m1, "127.0.0.1:53")
//
// When this functions returns you will get dns message. A dns message consists
// out of four sections.
// The question section: in.Question, the answer section: in.Answer,
// the authority section: in.Ns and the additional section: in.Extra.
//
// Each of these sections (except the Question section) contain a []RR. Basic
// use pattern for accessing the rdata of a TXT RR as the first RR in
// the Answer section:
//
// if t, ok := in.Answer[0].(*dns.TXT); ok {
// // do something with t.Txt
// }
//
// Domain Name and TXT Character String Representations
//
// Both domain names and TXT character strings are converted to presentation
// form both when unpacked and when converted to strings.
//
// For TXT character strings, tabs, carriage returns and line feeds will be
// converted to \t, \r and \n respectively. Back slashes and quotations marks
// will be escaped. Bytes below 32 and above 127 will be converted to \DDD
// form.
//
// For domain names, in addition to the above rules brackets, periods,
// spaces, semicolons and the at symbol are escaped.
package dns
import (
"strconv"
)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
DefaultMsgSize = 4096 // Standard default for larger than 512 bytes.
MinMsgSize = 512 // Minimal size of a DNS packet.
MaxMsgSize = 65536 // Largest possible DNS packet.
defaultTtl = 3600 // Default TTL.
)
// Error represents a DNS error
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octects) of the uncompressed RR in wire format.
len() int
}
// DNS resource records.
// There are many types of RRs,
// but they all share the same header.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // length of data after header
}
func (h *RR_Header) Header() *RR_Header { return h }
// Just to imlement the RR interface
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len() int {
l := len(h.Name) + 1
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
// Find best matching pattern for zone.
func zoneMatch(pattern, zone string) (ok bool) {
if len(pattern) == 0 {
return
}
if len(zone) == 0 {
zone = "."
}
// pattern = Fqdn(pattern) // should already be a fqdn
zone = Fqdn(zone)
i := 0
for {
ok = pattern[len(pattern)-1-i] == zone[len(zone)-1-i]
i++
if !ok {
break
}
if len(pattern)-1-i < 0 || len(zone)-1-i < 0 {
break
}
}
return
}
// ToRFC3597 converts a known RR to the unknown RR representation
// from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, r.len()*2)
off, err := PackStruct(r, buf, 0)
if err != nil {
return err
}
buf = buf[:off]
rawSetRdlength(buf, 0, off)
_, err = UnpackStruct(rr, buf, 0)
if err != nil {
return err
}
return nil
}
zoneMatch is not used
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extensions of the original work are copyright (c) 2011 Miek Gieben
// Package dns implements a full featured interface to the Domain Name System.
// Server- and client-side programming is supported.
// The package allows complete control over what is send out to the DNS. The package
// API follows the less-is-more principle, by presenting a small, clean interface.
//
// The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
// Note that domain names MUST be fully qualified, before sending them, unqualified
// names in a message will result in a packing failure.
//
// Resource records are native types. They are not stored in wire format.
// Basic usage pattern for creating a new resource record:
//
// r := new(dns.MX)
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
// r.Preference = 10
// r.Mx = "mx.miek.nl."
//
// Or directly from a string:
//
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
//
// Or when the default TTL (3600) and class (IN) suit you:
//
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
//
// Or even:
//
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
//
// In the DNS messages are exchanged, these messages contain resource
// records (sets). Use pattern for creating a message:
//
// m := new(dns.Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
//
// Or when not certain if the domain name is fully qualified:
//
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
//
// The message m is now a message with the question section set to ask
// the MX records for the miek.nl. zone.
//
// The following is slightly more verbose, but more flexible:
//
// m1 := new(dns.Msg)
// m1.Id = dns.Id()
// m1.RecursionDesired = true
// m1.Question = make([]Question, 1)
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
//
// After creating a message it can be send.
// Basic use pattern for synchronous querying the DNS at a
// server configured on 127.0.0.1 and port 53:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
//
// Suppressing
// multiple outstanding queries (with the same question, type and class) is as easy as setting:
//
// c.SingleInflight = true
//
// If these "advanced" features are not needed, a simple UDP query can be send,
// with:
//
// in, err := dns.Exchange(m1, "127.0.0.1:53")
//
// When this functions returns you will get dns message. A dns message consists
// out of four sections.
// The question section: in.Question, the answer section: in.Answer,
// the authority section: in.Ns and the additional section: in.Extra.
//
// Each of these sections (except the Question section) contain a []RR. Basic
// use pattern for accessing the rdata of a TXT RR as the first RR in
// the Answer section:
//
// if t, ok := in.Answer[0].(*dns.TXT); ok {
// // do something with t.Txt
// }
//
// Domain Name and TXT Character String Representations
//
// Both domain names and TXT character strings are converted to presentation
// form both when unpacked and when converted to strings.
//
// For TXT character strings, tabs, carriage returns and line feeds will be
// converted to \t, \r and \n respectively. Back slashes and quotations marks
// will be escaped. Bytes below 32 and above 127 will be converted to \DDD
// form.
//
// For domain names, in addition to the above rules brackets, periods,
// spaces, semicolons and the at symbol are escaped.
package dns
import (
"strconv"
)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
DefaultMsgSize = 4096 // Standard default for larger than 512 bytes.
MinMsgSize = 512 // Minimal size of a DNS packet.
MaxMsgSize = 65536 // Largest possible DNS packet.
defaultTtl = 3600 // Default TTL.
)
// Error represents a DNS error
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octects) of the uncompressed RR in wire format.
len() int
}
// DNS resource records.
// There are many types of RRs,
// but they all share the same header.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // length of data after header
}
func (h *RR_Header) Header() *RR_Header { return h }
// Just to imlement the RR interface
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len() int {
l := len(h.Name) + 1
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
// ToRFC3597 converts a known RR to the unknown RR representation
// from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, r.len()*2)
off, err := PackStruct(r, buf, 0)
if err != nil {
return err
}
buf = buf[:off]
rawSetRdlength(buf, 0, off)
_, err = UnpackStruct(rr, buf, 0)
if err != nil {
return err
}
return nil
}
|
// Copyright 2017 MSolution.IO
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"database/sql"
"errors"
"fmt"
"strings"
"time"
"net/http"
"github.com/trackit/jsonlog"
"github.com/trackit/trackit-server/config"
"github.com/trackit/trackit-server/db"
"github.com/trackit/trackit-server/models"
"github.com/trackit/trackit-server/routes"
"github.com/trackit/trackit-server/mail"
"github.com/satori/go.uuid"
)
const (
passwordMaxLength = 12
)
var (
ErrPasswordTooShort = errors.New(fmt.Sprintf("Password must be at least %u characters.", passwordMaxLength))
)
func init() {
routes.MethodMuxer{
http.MethodPost: routes.H(createUser).With(
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "register a new user",
Description: "Registers a new user using an e-mail and password, and responds with the user's data.",
},
),
http.MethodPatch: routes.H(patchUser).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "edit the current user",
Description: "Edit the current user, and responds with the user's data.",
},
),
http.MethodGet: routes.H(me).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.Documentation{
Summary: "get the current user",
Description: "Responds with the currently authenticated user's data.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
routes.Documentation{
Summary: "register or get the user",
},
).Register("/user")
routes.MethodMuxer{
http.MethodPost: routes.H(createViewerUser).With(
routes.RequestContentType{"application/json"},
RequireAuthenticatedUser{ViewerCannot},
routes.RequestBody{createViewerUserRequestBody{"example@example.com"}},
routes.Documentation{
Summary: "register a new viewer user",
Description: "Registers a new viewer user linked to the current user, which will only be able to view its parent user's data.",
},
),
http.MethodGet: routes.H(getViewerUsers).With(
RequireAuthenticatedUser{ViewerAsParent},
routes.Documentation{
Summary: "list viewer users",
Description: "Lists the viewer users registered for the current account.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
).Register("/user/viewer")
}
type createUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
Password string `json:"password" req:"nonzero"`
}
func createUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createUserRequestBody
routes.MustRequestBody(a, &body)
tx := a[db.Transaction].(*sql.Tx)
code, resp := createUserWithValidBody(request, body, tx)
// Add the default role to the new account. No error is returned in case of failure
// The billing repository is not processed instantly
if code == 200 && config.DefaultRole != "" && config.DefaultRoleName != "" &&
config.DefaultRoleExternal != "" && config.DefaultRoleBucket != "" {
addDefaultRole(request, resp.(User), tx)
}
return code, resp
}
func createUserWithValidBody(request *http.Request, body createUserRequestBody, tx *sql.Tx) (int, interface{}) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
user, err := CreateUserWithPassword(ctx, tx, body.Email, body.Password)
if err == nil {
logger.Info("User created.", user)
return 200, user
} else {
logger.Error(err.Error(), nil)
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Account already exists.")
} else {
return 500, errors.New("Failed to create user.")
}
}
}
type createViewerUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
}
type createViewerUserResponseBody struct {
User
Password string `json:"password" req:"nonzero"`
}
func createViewerUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createViewerUserRequestBody
routes.MustRequestBody(a, &body)
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
token := uuid.NewV1().String()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
tokenHash, err := getPasswordHash(token)
if err != nil {
logger.Error("Failed to create token hash.", err.Error())
return 500, errors.New("Failed to create token hash")
}
viewerUser, viewerUserPassword, err := CreateUserWithParent(ctx, tx, body.Email, currentUser)
if err != nil {
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Email already taken.")
} else {
return 500, errors.New("Failed to create viewer user.")
}
}
response := createViewerUserResponseBody{
User: viewerUser,
Password: viewerUserPassword,
}
dbForgottenPassword := models.ForgottenPassword{
UserID: viewerUser.Id,
Token: tokenHash,
Created: time.Now(),
}
err = dbForgottenPassword.Insert(tx)
if err != nil {
logger.Error("Failed to insert viewer password token in database.", err.Error())
return 500, errors.New("Failed to create viewer password token")
}
mailSubject := "Your TrackIt viewer password"
mailBody := fmt.Sprintf("Please follow this link to create your password: https://re.trackit.io/reset/%d/%s.", viewerUser.Id, token)
err = mail.SendMail(viewerUser.Email, mailSubject, mailBody, request.Context())
if err != nil {
logger.Error("Failed to send viewer password email.", err.Error())
return 500, errors.New("Failed to send viewer password email")
}
return http.StatusOK, response
}
func getViewerUsers(request *http.Request, a routes.Arguments) (int, interface{}) {
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
users, err := GetUsersByParent(ctx, tx, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to get viewer users.")
}
return http.StatusOK, users
}
func addDefaultRole(request *http.Request, user User, tx *sql.Tx) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
accoundDB := models.AwsAccount{
UserID: user.Id,
Pretty: config.DefaultRoleName,
RoleArn: config.DefaultRole,
External: config.DefaultRoleExternal,
}
err := accoundDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default role", err)
} else {
brDB := models.AwsBillRepository{
AwsAccountID: accoundDB.ID,
Bucket: config.DefaultRoleBucket,
Prefix: config.DefaultRoleBucketPrefix,
}
err = brDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default bill repository", err)
}
}
}
correct id is now sent to user
// Copyright 2017 MSolution.IO
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"database/sql"
"errors"
"fmt"
"strings"
"time"
"net/http"
"github.com/trackit/jsonlog"
"github.com/trackit/trackit-server/config"
"github.com/trackit/trackit-server/db"
"github.com/trackit/trackit-server/models"
"github.com/trackit/trackit-server/routes"
"github.com/trackit/trackit-server/mail"
"github.com/satori/go.uuid"
)
const (
passwordMaxLength = 12
)
var (
ErrPasswordTooShort = errors.New(fmt.Sprintf("Password must be at least %u characters.", passwordMaxLength))
)
func init() {
routes.MethodMuxer{
http.MethodPost: routes.H(createUser).With(
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "register a new user",
Description: "Registers a new user using an e-mail and password, and responds with the user's data.",
},
),
http.MethodPatch: routes.H(patchUser).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "edit the current user",
Description: "Edit the current user, and responds with the user's data.",
},
),
http.MethodGet: routes.H(me).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.Documentation{
Summary: "get the current user",
Description: "Responds with the currently authenticated user's data.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
routes.Documentation{
Summary: "register or get the user",
},
).Register("/user")
routes.MethodMuxer{
http.MethodPost: routes.H(createViewerUser).With(
routes.RequestContentType{"application/json"},
RequireAuthenticatedUser{ViewerCannot},
routes.RequestBody{createViewerUserRequestBody{"example@example.com"}},
routes.Documentation{
Summary: "register a new viewer user",
Description: "Registers a new viewer user linked to the current user, which will only be able to view its parent user's data.",
},
),
http.MethodGet: routes.H(getViewerUsers).With(
RequireAuthenticatedUser{ViewerAsParent},
routes.Documentation{
Summary: "list viewer users",
Description: "Lists the viewer users registered for the current account.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
).Register("/user/viewer")
}
type createUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
Password string `json:"password" req:"nonzero"`
}
func createUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createUserRequestBody
routes.MustRequestBody(a, &body)
tx := a[db.Transaction].(*sql.Tx)
code, resp := createUserWithValidBody(request, body, tx)
// Add the default role to the new account. No error is returned in case of failure
// The billing repository is not processed instantly
if code == 200 && config.DefaultRole != "" && config.DefaultRoleName != "" &&
config.DefaultRoleExternal != "" && config.DefaultRoleBucket != "" {
addDefaultRole(request, resp.(User), tx)
}
return code, resp
}
func createUserWithValidBody(request *http.Request, body createUserRequestBody, tx *sql.Tx) (int, interface{}) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
user, err := CreateUserWithPassword(ctx, tx, body.Email, body.Password)
if err == nil {
logger.Info("User created.", user)
return 200, user
} else {
logger.Error(err.Error(), nil)
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Account already exists.")
} else {
return 500, errors.New("Failed to create user.")
}
}
}
type createViewerUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
}
type createViewerUserResponseBody struct {
User
Password string `json:"password" req:"nonzero"`
}
func createViewerUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createViewerUserRequestBody
routes.MustRequestBody(a, &body)
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
token := uuid.NewV1().String()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
tokenHash, err := getPasswordHash(token)
if err != nil {
logger.Error("Failed to create token hash.", err.Error())
return 500, errors.New("Failed to create token hash")
}
viewerUser, viewerUserPassword, err := CreateUserWithParent(ctx, tx, body.Email, currentUser)
if err != nil {
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Email already taken.")
} else {
return 500, errors.New("Failed to create viewer user.")
}
}
response := createViewerUserResponseBody{
User: viewerUser,
Password: viewerUserPassword,
}
dbForgottenPassword := models.ForgottenPassword{
UserID: viewerUser.Id,
Token: tokenHash,
Created: time.Now(),
}
err = dbForgottenPassword.Insert(tx)
if err != nil {
logger.Error("Failed to insert viewer password token in database.", err.Error())
return 500, errors.New("Failed to create viewer password token")
}
mailSubject := "Your TrackIt viewer password"
mailBody := fmt.Sprintf("Please follow this link to create your password: https://re.trackit.io/reset/%d/%s.", dbForgottenPassword.ID, token)
err = mail.SendMail(viewerUser.Email, mailSubject, mailBody, request.Context())
if err != nil {
logger.Error("Failed to send viewer password email.", err.Error())
return 500, errors.New("Failed to send viewer password email")
}
return http.StatusOK, response
}
func getViewerUsers(request *http.Request, a routes.Arguments) (int, interface{}) {
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
users, err := GetUsersByParent(ctx, tx, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to get viewer users.")
}
return http.StatusOK, users
}
func addDefaultRole(request *http.Request, user User, tx *sql.Tx) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
accoundDB := models.AwsAccount{
UserID: user.Id,
Pretty: config.DefaultRoleName,
RoleArn: config.DefaultRole,
External: config.DefaultRoleExternal,
}
err := accoundDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default role", err)
} else {
brDB := models.AwsBillRepository{
AwsAccountID: accoundDB.ID,
Bucket: config.DefaultRoleBucket,
Prefix: config.DefaultRoleBucketPrefix,
}
err = brDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default bill repository", err)
}
}
}
|
// Copyright 2017 MSolution.IO
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"database/sql"
"errors"
"fmt"
"net/http"
"github.com/trackit/jsonlog"
"github.com/trackit/trackit2/config"
"github.com/trackit/trackit2/db"
"github.com/trackit/trackit2/models"
"github.com/trackit/trackit2/routes"
"strings"
)
const (
passwordMaxLength = 12
)
var (
ErrPasswordTooShort = errors.New(fmt.Sprintf("Password must be at least %u characters.", passwordMaxLength))
)
func init() {
routes.MethodMuxer{
http.MethodPost: routes.H(createUser).With(
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "register a new user",
Description: "Registers a new user using an e-mail and password, and responds with the user's data.",
},
),
http.MethodPatch: routes.H(patchUser).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "edit the current user",
Description: "Edit the current user, and responds with the user's data.",
},
),
http.MethodGet: routes.H(me).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.Documentation{
Summary: "get the current user",
Description: "Responds with the currently authenticated user's data.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
routes.Documentation{
Summary: "register or get the user",
},
).Register("/user")
routes.MethodMuxer{
http.MethodPost: routes.H(createViewerUser).With(
routes.RequestContentType{"application/json"},
RequireAuthenticatedUser{ViewerCannot},
routes.RequestBody{createViewerUserRequestBody{"example@example.com"}},
routes.Documentation{
Summary: "register a new viewer user",
Description: "Registers a new viewer user linked to the current user, which will only be able to view its parent user's data.",
},
),
http.MethodGet: routes.H(getViewerUsers).With(
RequireAuthenticatedUser{ViewerAsParent},
routes.Documentation{
Summary: "list viewer users",
Description: "Lists the viewer users registered for the current account.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
).Register("/user/viewer")
}
type createUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
Password string `json:"password" req:"nonzero"`
}
func createUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createUserRequestBody
routes.MustRequestBody(a, &body)
tx := a[db.Transaction].(*sql.Tx)
code, resp := createUserWithValidBody(request, body, tx)
// Add the default role to the new account. No error is returned in case of failure
// The billing repository is not processed instantly
if code == 200 && config.DefaultRole != "" && config.DefaultRoleName != "" &&
config.DefaultRoleExternal != "" && config.DefaultRoleBucket != "" {
addDefaultRole(request, resp.(User), tx)
}
return code, resp
}
func createUserWithValidBody(request *http.Request, body createUserRequestBody, tx *sql.Tx) (int, interface{}) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
user, err := CreateUserWithPassword(ctx, tx, body.Email, body.Password)
if err == nil {
logger.Info("User created.", user)
return 200, user
} else {
logger.Error(err.Error(), nil)
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Account already exists.")
} else {
return 500, errors.New("Failed to create user.")
}
}
}
type createViewerUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
}
type createViewerUserResponseBody struct {
User
Password string `json:"password" req:"nonzero"`
}
func createViewerUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createViewerUserRequestBody
routes.MustRequestBody(a, &body)
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
viewerUser, viewerUserPassword, err := CreateUserWithParent(ctx, tx, body.Email, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to create viewer user.")
}
response := createViewerUserResponseBody{
User: viewerUser,
Password: viewerUserPassword,
}
return http.StatusOK, response
}
func getViewerUsers(request *http.Request, a routes.Arguments) (int, interface{}) {
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
users, err := GetUsersByParent(ctx, tx, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to get viewer users.")
}
return http.StatusOK, users
}
func addDefaultRole(request *http.Request, user User, tx *sql.Tx) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
accoundDB := models.AwsAccount{
UserID: user.Id,
Pretty: config.DefaultRoleName,
RoleArn: config.DefaultRole,
External: config.DefaultRoleExternal,
}
err := accoundDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default role", err)
} else {
brDB := models.AwsBillRepository{
AwsAccountID: accoundDB.ID,
Bucket: config.DefaultRoleBucket,
Prefix: config.DefaultRoleBucketPrefix,
}
err = brDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default bill repository", err)
}
}
}
better import organisation
// Copyright 2017 MSolution.IO
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"database/sql"
"errors"
"fmt"
"strings"
"net/http"
"github.com/trackit/jsonlog"
"github.com/trackit/trackit2/config"
"github.com/trackit/trackit2/db"
"github.com/trackit/trackit2/models"
"github.com/trackit/trackit2/routes"
)
const (
passwordMaxLength = 12
)
var (
ErrPasswordTooShort = errors.New(fmt.Sprintf("Password must be at least %u characters.", passwordMaxLength))
)
func init() {
routes.MethodMuxer{
http.MethodPost: routes.H(createUser).With(
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "register a new user",
Description: "Registers a new user using an e-mail and password, and responds with the user's data.",
},
),
http.MethodPatch: routes.H(patchUser).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.RequestContentType{"application/json"},
routes.RequestBody{createUserRequestBody{"example@example.com", "pa55w0rd"}},
routes.Documentation{
Summary: "edit the current user",
Description: "Edit the current user, and responds with the user's data.",
},
),
http.MethodGet: routes.H(me).With(
RequireAuthenticatedUser{ViewerAsSelf},
routes.Documentation{
Summary: "get the current user",
Description: "Responds with the currently authenticated user's data.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
routes.Documentation{
Summary: "register or get the user",
},
).Register("/user")
routes.MethodMuxer{
http.MethodPost: routes.H(createViewerUser).With(
routes.RequestContentType{"application/json"},
RequireAuthenticatedUser{ViewerCannot},
routes.RequestBody{createViewerUserRequestBody{"example@example.com"}},
routes.Documentation{
Summary: "register a new viewer user",
Description: "Registers a new viewer user linked to the current user, which will only be able to view its parent user's data.",
},
),
http.MethodGet: routes.H(getViewerUsers).With(
RequireAuthenticatedUser{ViewerAsParent},
routes.Documentation{
Summary: "list viewer users",
Description: "Lists the viewer users registered for the current account.",
},
),
}.H().With(
db.RequestTransaction{db.Db},
).Register("/user/viewer")
}
type createUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
Password string `json:"password" req:"nonzero"`
}
func createUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createUserRequestBody
routes.MustRequestBody(a, &body)
tx := a[db.Transaction].(*sql.Tx)
code, resp := createUserWithValidBody(request, body, tx)
// Add the default role to the new account. No error is returned in case of failure
// The billing repository is not processed instantly
if code == 200 && config.DefaultRole != "" && config.DefaultRoleName != "" &&
config.DefaultRoleExternal != "" && config.DefaultRoleBucket != "" {
addDefaultRole(request, resp.(User), tx)
}
return code, resp
}
func createUserWithValidBody(request *http.Request, body createUserRequestBody, tx *sql.Tx) (int, interface{}) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
user, err := CreateUserWithPassword(ctx, tx, body.Email, body.Password)
if err == nil {
logger.Info("User created.", user)
return 200, user
} else {
logger.Error(err.Error(), nil)
errSplit := strings.Split(err.Error(), ":")
if (len(errSplit) >= 1 && errSplit[0] == "Error 1062") {
return 409, errors.New("Account already exists.")
} else {
return 500, errors.New("Failed to create user.")
}
}
}
type createViewerUserRequestBody struct {
Email string `json:"email" req:"nonzero"`
}
type createViewerUserResponseBody struct {
User
Password string `json:"password" req:"nonzero"`
}
func createViewerUser(request *http.Request, a routes.Arguments) (int, interface{}) {
var body createViewerUserRequestBody
routes.MustRequestBody(a, &body)
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
viewerUser, viewerUserPassword, err := CreateUserWithParent(ctx, tx, body.Email, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to create viewer user.")
}
response := createViewerUserResponseBody{
User: viewerUser,
Password: viewerUserPassword,
}
return http.StatusOK, response
}
func getViewerUsers(request *http.Request, a routes.Arguments) (int, interface{}) {
currentUser := a[AuthenticatedUser].(User)
tx := a[db.Transaction].(*sql.Tx)
ctx := request.Context()
users, err := GetUsersByParent(ctx, tx, currentUser)
if err != nil {
return http.StatusInternalServerError, errors.New("Failed to get viewer users.")
}
return http.StatusOK, users
}
func addDefaultRole(request *http.Request, user User, tx *sql.Tx) {
ctx := request.Context()
logger := jsonlog.LoggerFromContextOrDefault(ctx)
accoundDB := models.AwsAccount{
UserID: user.Id,
Pretty: config.DefaultRoleName,
RoleArn: config.DefaultRole,
External: config.DefaultRoleExternal,
}
err := accoundDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default role", err)
} else {
brDB := models.AwsBillRepository{
AwsAccountID: accoundDB.ID,
Bucket: config.DefaultRoleBucket,
Prefix: config.DefaultRoleBucketPrefix,
}
err = brDB.Insert(tx)
if err != nil {
logger.Error("Failed to add default bill repository", err)
}
}
}
|
package log
import (
"fmt"
"github.com/ark-lang/ark/util"
"os"
"strings"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelWarning
LevelError
)
var LevelMap = map[string]LogLevel{
"debug": LevelDebug,
"verbose": LevelVerbose,
"info": LevelInfo,
"warning": LevelWarning,
"error": LevelError,
}
var currentLevel LogLevel
var enabledTags map[string]bool
var enableAll bool
func init() {
currentLevel = LevelInfo
enabledTags = make(map[string]bool)
enableAll = false
}
func SetLevel(level string) {
lvl, ok := LevelMap[level]
if !ok {
fmt.Printf("Invalid log level")
os.Exit(util.EXIT_FAILURE_SETUP)
}
currentLevel = lvl
}
func SetTags(tags string) {
enabledTags = make(map[string]bool)
enableAll = false
for _, tag := range strings.Split(tags, ",") {
if tag == "all" {
enableAll = true
} else {
enabledTags[tag] = true
}
}
}
func AtLevel(level LogLevel) bool {
return level >= currentLevel
}
func Log(level LogLevel, tag string, msg string, args ...interface{}) {
if !enableAll {
if !enabledTags[tag] {
return
}
}
if AtLevel(level) {
fmt.Printf("["+tag+"] "+msg, args...)
}
}
func Logln(level LogLevel, tag string, msg string, args ...interface{}) {
if !enableAll {
if !enabledTags[tag] {
return
}
}
if AtLevel(level) {
fmt.Printf("["+tag+"] "+msg+"\n", args...)
}
}
func Debug(tag string, msg string, args ...interface{}) {
Log(LevelDebug, tag, msg, args...)
}
func Debugln(tag string, msg string, args ...interface{}) {
Logln(LevelDebug, tag, msg, args...)
}
func Verbose(tag string, msg string, args ...interface{}) {
Log(LevelVerbose, tag, msg, args...)
}
func Verboseln(tag string, msg string, args ...interface{}) {
Logln(LevelVerbose, tag, msg, args...)
}
func Info(tag string, msg string, args ...interface{}) {
Log(LevelInfo, tag, msg, args...)
}
func Infoln(tag string, msg string, args ...interface{}) {
Logln(LevelInfo, tag, msg, args...)
}
func Warning(tag string, msg string, args ...interface{}) {
Log(LevelWarning, tag, msg, args...)
}
func Warningln(tag string, msg string, args ...interface{}) {
Logln(LevelWarning, tag, msg, args...)
}
func Error(tag string, msg string, args ...interface{}) {
Log(LevelError, tag, msg, args...)
}
func Errorln(tag string, msg string, args ...interface{}) {
Logln(LevelError, tag, msg, args...)
}
Remove log tags from messages
package log
import (
"fmt"
"github.com/ark-lang/ark/util"
"os"
"strings"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelWarning
LevelError
)
var LevelMap = map[string]LogLevel{
"debug": LevelDebug,
"verbose": LevelVerbose,
"info": LevelInfo,
"warning": LevelWarning,
"error": LevelError,
}
var currentLevel LogLevel
var enabledTags map[string]bool
var enableAll bool
func init() {
currentLevel = LevelInfo
enabledTags = make(map[string]bool)
enableAll = false
}
func SetLevel(level string) {
lvl, ok := LevelMap[level]
if !ok {
fmt.Printf("Invalid log level")
os.Exit(util.EXIT_FAILURE_SETUP)
}
currentLevel = lvl
}
func SetTags(tags string) {
enabledTags = make(map[string]bool)
enableAll = false
for _, tag := range strings.Split(tags, ",") {
if tag == "all" {
enableAll = true
} else {
enabledTags[tag] = true
}
}
}
func AtLevel(level LogLevel) bool {
return level >= currentLevel
}
func Log(level LogLevel, tag string, msg string, args ...interface{}) {
if !enableAll {
if !enabledTags[tag] {
return
}
}
if AtLevel(level) {
fmt.Printf(msg, args...)
}
}
func Logln(level LogLevel, tag string, msg string, args ...interface{}) {
if !enableAll {
if !enabledTags[tag] {
return
}
}
if AtLevel(level) {
fmt.Printf(msg+"\n", args...)
}
}
func Debug(tag string, msg string, args ...interface{}) {
Log(LevelDebug, tag, msg, args...)
}
func Debugln(tag string, msg string, args ...interface{}) {
Logln(LevelDebug, tag, msg, args...)
}
func Verbose(tag string, msg string, args ...interface{}) {
Log(LevelVerbose, tag, msg, args...)
}
func Verboseln(tag string, msg string, args ...interface{}) {
Logln(LevelVerbose, tag, msg, args...)
}
func Info(tag string, msg string, args ...interface{}) {
Log(LevelInfo, tag, msg, args...)
}
func Infoln(tag string, msg string, args ...interface{}) {
Logln(LevelInfo, tag, msg, args...)
}
func Warning(tag string, msg string, args ...interface{}) {
Log(LevelWarning, tag, msg, args...)
}
func Warningln(tag string, msg string, args ...interface{}) {
Logln(LevelWarning, tag, msg, args...)
}
func Error(tag string, msg string, args ...interface{}) {
Log(LevelError, tag, msg, args...)
}
func Errorln(tag string, msg string, args ...interface{}) {
Logln(LevelError, tag, msg, args...)
}
|
// Package girc provides a high level, yet flexible IRC library for use with
// interacting with IRC servers. girc has support for user/channel tracking,
// as well as a few other neat features (like auto-reconnect).
//
// Much of what girc can do, can also be disabled. The goal is to provide a
// solid API that you don't necessarily have to work with out of the box if
// you don't want to.
//
// See https://github.com/lrstanley/girc#examples for a few brief and useful
// examples taking advantage of girc, which should give you a general idea
// of how the API works.
package girc
update doc
// Package girc provides a high level, yet flexible IRC library for use with
// interacting with IRC servers. girc has support for user/channel tracking,
// as well as a few other neat features (like auto-reconnect).
//
// Much of what girc can do, can also be disabled. The goal is to provide a
// solid API that you don't necessarily have to work with out of the box if
// you don't want to.
//
// See the examples below for a few brief and useful snippets taking
// advantage of girc, which should give you a general idea of how the API
// works.
package girc
|
package client
import (
"bytes"
"crypto/rand"
"encoding/base64"
"errors"
"io"
"github.com/flynn/go-discoverd"
"github.com/flynn/rpcplus"
"github.com/flynn/sampi/types"
)
func New() (*Client, error) {
disc, err := discoverd.NewClient()
if err != nil {
return nil, err
}
services, err := disc.Services("flynn-sampi")
if err != nil {
return nil, err
}
if len(services) == 0 {
return nil, errors.New("sampi: no servers found")
}
c, err := rpcplus.DialHTTP("tcp", services[0].Addr)
return &Client{c}, err
}
type Client struct {
c *rpcplus.Client
}
func (c *Client) State() (map[string]sampi.Host, error) {
var state map[string]sampi.Host
err := c.c.Call("Scheduler.State", struct{}{}, &state)
return state, err
}
func (c *Client) Schedule(req *sampi.ScheduleReq) (*sampi.ScheduleRes, error) {
var res sampi.ScheduleRes
err := c.c.Call("Scheduler.Schedule", req, &res)
return &res, err
}
func (c *Client) RegisterHost(host *sampi.Host, stream chan *sampi.Job) *error {
return &c.c.StreamGo("Scheduler.RegisterHost", host, stream).Error
}
func (c *Client) RemoveJobs(jobIDs []string) error {
return c.c.Call("Scheduler.RemoveJobs", jobIDs, &struct{}{})
}
func RandomJobID(prefix string) string { return prefix + randomID() }
func randomID() string {
b := make([]byte, 16)
enc := make([]byte, 24)
_, err := io.ReadFull(rand.Reader, b)
if err != nil {
panic(err) // This shouldn't ever happen, right?
}
base64.URLEncoding.Encode(enc, b)
return string(bytes.TrimRight(enc, "="))
}
pkg/sampi: Update discoverd usage
package client
import (
"bytes"
"crypto/rand"
"encoding/base64"
"errors"
"io"
"github.com/flynn/go-discoverd"
"github.com/flynn/rpcplus"
"github.com/flynn/sampi/types"
)
func New() (*Client, error) {
services, err := discoverd.Services("flynn-sampi", discoverd.DefaultTimeout)
if err != nil {
return nil, err
}
if len(services) == 0 {
return nil, errors.New("sampi: no servers found")
}
c, err := rpcplus.DialHTTP("tcp", services[0].Addr)
return &Client{c}, err
}
type Client struct {
c *rpcplus.Client
}
func (c *Client) State() (map[string]sampi.Host, error) {
var state map[string]sampi.Host
err := c.c.Call("Scheduler.State", struct{}{}, &state)
return state, err
}
func (c *Client) Schedule(req *sampi.ScheduleReq) (*sampi.ScheduleRes, error) {
var res sampi.ScheduleRes
err := c.c.Call("Scheduler.Schedule", req, &res)
return &res, err
}
func (c *Client) RegisterHost(host *sampi.Host, stream chan *sampi.Job) *error {
return &c.c.StreamGo("Scheduler.RegisterHost", host, stream).Error
}
func (c *Client) RemoveJobs(jobIDs []string) error {
return c.c.Call("Scheduler.RemoveJobs", jobIDs, &struct{}{})
}
func RandomJobID(prefix string) string { return prefix + randomID() }
func randomID() string {
b := make([]byte, 16)
enc := make([]byte, 24)
_, err := io.ReadFull(rand.Reader, b)
if err != nil {
panic(err) // This shouldn't ever happen, right?
}
base64.URLEncoding.Encode(enc, b)
return string(bytes.TrimRight(enc, "="))
}
|
// Copyright 2014 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ebiten provides graphics and input API to develop a 2D game.
//
// You can start the game by calling the function Run.
//
// func update(screen *ebiten.Screen) {
// // Define your game.
// }
//
// func main() {
// ebiten.Run(update, 320, 240, 2, "Your game's title")
// }
package ebiten
Update example in doc.go
The function argument to ebiten.Run takes an *ebiten.Image,
and must return an error.
// Copyright 2014 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ebiten provides graphics and input API to develop a 2D game.
//
// You can start the game by calling the function Run.
//
// func update(screen *ebiten.Image) error {
// // Define your game.
// }
//
// func main() {
// ebiten.Run(update, 320, 240, 2, "Your game's title")
// }
package ebiten
|
/*
Package junos provides automation for Junos (Juniper Networks) devices.
Establishing A Session
To connect to a Junos device, the process is fairly straightforward.
jnpr := junos.NewSession(host, user, password)
defer jnpr.Close()
Compare Rollback Configurations
If you want to view the difference between the current configuration and a rollback
one, then you can use the ConfigDiff() function.
diff, err := jnpr.ConfigDiff(3)
if err != nil {
fmt.Println(err)
}
fmt.Println(diff)
This will output exactly how it does on the CLI when you "| compare."
Rolling Back to a Previous State
You can also rollback to a previous state, or the "rescue" configuration by using
the RollbackConfig() function:
err := jnpr.RollbackConfig(3)
if err != nil {
fmt.Println(err)
}
// Create a rescue config from the active configuration.
jnpr.Rescue("save")
// You can also delete a rescue config.
jnpr.Rescue("delete")
// Rollback to the "rescue" configuration.
err := jnpr.RollbackConfig("rescue")
if err != nil {
fmt.Println(err)
}
Device Configuration
When configuring a device, it is good practice to lock the configuration database,
load the config, commit the configuration, and then unlock the configuration database.
You can do this with the following functions:
Lock(), Commit(), Unlock()
There are multiple ways to commit a configuration as well:
// Commit the configuration as normal
Commit()
// Check the configuration for any syntax errors (NOTE: you must still issue a Commit())
CommitCheck()
// Commit at a later time, i.e. 4:30 PM
CommitAt("16:30:00")
// Rollback configuration if a Commit() is not issued within the given <minutes>.
CommitConfirm(15)
You can configure the Junos device by uploading a local file, or pulling from an
FTP/HTTP server. The LoadConfig() function takes three arguments:
filename or URL, format, and commit-on-load
If you specify a URL, it must be in the following format:
ftp://<username>:<password>@hostname/pathname/file-name
http://<username>:<password>@hostname/pathname/file-name
The format of the commands within the file must be one of the following types:
set
// system name-server 1.1.1.1
text
// system {
// name-server 1.1.1.1;
// }
xml
// <system>
// <name-server>
// <name>1.1.1.1</name>
// </name-server>
// </system>
If the third option is "true" then after the configuration is loaded, a commit
will be issued. If set to "false," you will have to commit the configuration
using the Commit() function.
jnpr.Lock()
err := jnpr.LoadConfig("path-to-file.txt", "set", true)
if err != nil {
fmt.Println(err)
}
jnpr.Unlock()
You don't have to use Lock() and Unlock() if you wish, but if by chance someone
else tries to edit the device configuration at the same time, there can be conflics
and most likely an error will be returned.
*/
package junos
Updated documentation
/*
Package junos provides automation for Junos (Juniper Networks) devices.
Establishing A Session
To connect to a Junos device, the process is fairly straightforward.
jnpr := junos.NewSession(host, user, password)
defer jnpr.Close()
Compare Rollback Configurations
If you want to view the difference between the current configuration and a rollback
one, then you can use the ConfigDiff() function.
diff, err := jnpr.ConfigDiff(3)
if err != nil {
fmt.Println(err)
}
fmt.Println(diff)
This will output exactly how it does on the CLI when you "| compare."
Rolling Back to a Previous State
You can also rollback to a previous state, or the "rescue" configuration by using
the RollbackConfig() function:
err := jnpr.RollbackConfig(3)
if err != nil {
fmt.Println(err)
}
// Create a rescue config from the active configuration.
jnpr.Rescue("save")
// You can also delete a rescue config.
jnpr.Rescue("delete")
// Rollback to the "rescue" configuration.
err := jnpr.RollbackConfig("rescue")
if err != nil {
fmt.Println(err)
}
Device Configuration
When configuring a device, it is good practice to lock the configuration database,
load the config, commit the configuration, and then unlock the configuration database.
You can do this with the following functions:
Lock(), Commit(), Unlock()
There are multiple ways to commit a configuration as well:
// Commit the configuration as normal
Commit()
// Check the configuration for any syntax errors (NOTE: you must still issue a Commit())
CommitCheck()
// Commit at a later time, i.e. 4:30 PM
CommitAt("16:30:00")
// Rollback configuration if a Commit() is not issued within the given <minutes>.
CommitConfirm(15)
You can configure the Junos device by uploading a local file, or pulling from an
FTP/HTTP server. The LoadConfig() function takes three arguments:
filename or URL, format, and commit-on-load
If you specify a URL, it must be in the following format:
ftp://<username>:<password>@hostname/pathname/file-name
http://<username>:<password>@hostname/pathname/file-name
Note: The default value for the FTP path variable is the user’s home directory. Thus,
by default the file path to the configuration file is relative to the user directory.
To specify an absolute path when using FTP, start the path with the characters %2F;
for example: ftp://username:password@hostname/%2Fpath/filename.
The format of the commands within the file must be one of the following types:
set
// system name-server 1.1.1.1
text
// system {
// name-server 1.1.1.1;
// }
xml
// <system>
// <name-server>
// <name>1.1.1.1</name>
// </name-server>
// </system>
If the third option is "true" then after the configuration is loaded, a commit
will be issued. If set to "false," you will have to commit the configuration
using the Commit() function.
jnpr.Lock()
err := jnpr.LoadConfig("path-to-file.txt", "set", true)
if err != nil {
fmt.Println(err)
}
jnpr.Unlock()
You don't have to use Lock() and Unlock() if you wish, but if by chance someone
else tries to edit the device configuration at the same time, there can be conflics
and most likely an error will be returned.
*/
package junos
|
// xslx is a package designed to help with reading data from
// spreadsheets stored in the XLSX format used in recent versions of
// Microsoft's Excel spreadsheet.
package xlsx
Made reference to xslx2csv (https://github.com/tealeg/xlsx2csv) as an example of how to use XLSX.
// xslx is a package designed to help with reading data from
// spreadsheets stored in the XLSX format used in recent versions of
// Microsoft's Excel spreadsheet.
//
// For a concise example of how to use this library why not check out
// the source for xlsx2csv here: https://github.com/tealeg/xlsx2csv
package xlsx
|
/*
Package templates is a thin wrapper around html/templates.
The templates package main function is to create a collection
of templates found in a templates directory
The templates directory structure only requires that a 'views' directory exist and it contains at least on html template.
An html template will be created for each template found in the views directory.
All other views that are not in the 'views' directory will be made available to each view template
Example directory structure
templates/
base.html
views/
index.html
about.html
partials/
css.html
nav.html
scripts.html
Usage
// templates collection
var tmpls *templates.Templates
// path to template directory
var templatesPath = "templates/"
func init() {
var err error
templs, err = templates.New().Parse(templatesPath)
if err != nil {
log.Fatal(err)
}
}
fund main() {
// the first method call specifies the 'index.html' view and the Render call
// specifies that the 'base.html' template should be rendered to os.Stdout
err := tmpls.Template("index").Render(os.Stdout, "base", nil)
if err != nil {
// handle error
}
}
Example Site
cd example
go run main.go -tmpl-dir=`pwd`
View site at http://localhost:8083
*/
package templates
fixed typo in doc.go
/*
Package templates is a thin wrapper around html/template.
The templates package main function is to create a collection
of templates found in a templates directory
The templates directory structure only requires that a 'views' directory exist and it contains at least on html template.
An html template will be created for each template found in the views directory.
All other views that are not in the 'views' directory will be made available to each view template
Example directory structure
templates/
base.html
views/
index.html
about.html
partials/
css.html
nav.html
scripts.html
Usage
// templates collection
var tmpls *templates.Templates
// path to template directory
var templatesPath = "templates/"
func init() {
var err error
templs, err = templates.New().Parse(templatesPath)
if err != nil {
log.Fatal(err)
}
}
fund main() {
// the first method call specifies the 'index.html' view and the Render call
// specifies that the 'base.html' template should be rendered to os.Stdout
err := tmpls.Template("index").Render(os.Stdout, "base", nil)
if err != nil {
// handle error
}
}
Example Site
cd example
go run main.go -tmpl-dir=`pwd`
View site at http://localhost:8083
*/
package templates
|
/*
* This file is part of the libvirt-go-xml project
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* Copyright (C) 2017 Red Hat, Inc.
*
*/
// Package libvirt-go-xml defines structs for parsing libvirt XML schemas
//
// The libvirt API uses XML schemas/documents to describe the configuration
// of many of its managed objects. Thus when using the libvirt-go package,
// it is often neccessary to either parse or format XML documents. This
// package defines a set of Go structs which have been annotated for use
// with the encoding/xml API to manage libvirt XML documents.
//
// Example creating a domain XML document from configuration:
//
// import (
// "github.com/libvirt/libvirt-go-xml"
// )
//
// domcfg := &libvirtxml.Domain{Type: "kvm", Name: "demo",
// UUID: "8f99e332-06c4-463a-9099-330fb244e1b3",
// ....}
// xmldoc, err := domcfg.Marshal()
//
// Example parsing a domainXML document, in combination with libvirt-go
//
// import (
// "github.com/libvirt/libvirt-go"
// "github.com/libvirt/libvirt-go-xml"
// "fmt"
// )
//
// conn, err := libvirt.NewConnect("qemu:///system")
// dom := conn.LookupDomainByName("demo")
// xmldoc, err := dom.GetXMLDesc(0)
//
// domcfg := &libvirtxml.Domain{}
// err := domcfg.Unmarshal(xmldoc)
//
// fmt.Printf("Virt type %s", domcfg.Type)
//
package libvirtxml
doc: Handle conn.LookupDomainByName() error
Reviewed-by: Daniel P. Berrangé <bb938cf255e055ff3507f2627d214e8e62118fcf@redhat.com>
Signed-off-by: Philipp Hahn <8cb7a2c205789112a9f61bc3102305826b6a8db1@univention.de>
/*
* This file is part of the libvirt-go-xml project
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* Copyright (C) 2017 Red Hat, Inc.
*
*/
// Package libvirt-go-xml defines structs for parsing libvirt XML schemas
//
// The libvirt API uses XML schemas/documents to describe the configuration
// of many of its managed objects. Thus when using the libvirt-go package,
// it is often neccessary to either parse or format XML documents. This
// package defines a set of Go structs which have been annotated for use
// with the encoding/xml API to manage libvirt XML documents.
//
// Example creating a domain XML document from configuration:
//
// import (
// "github.com/libvirt/libvirt-go-xml"
// )
//
// domcfg := &libvirtxml.Domain{Type: "kvm", Name: "demo",
// UUID: "8f99e332-06c4-463a-9099-330fb244e1b3",
// ....}
// xmldoc, err := domcfg.Marshal()
//
// Example parsing a domainXML document, in combination with libvirt-go
//
// import (
// "github.com/libvirt/libvirt-go"
// "github.com/libvirt/libvirt-go-xml"
// "fmt"
// )
//
// conn, err := libvirt.NewConnect("qemu:///system")
// dom, err := conn.LookupDomainByName("demo")
// xmldoc, err := dom.GetXMLDesc(0)
//
// domcfg := &libvirtxml.Domain{}
// err := domcfg.Unmarshal(xmldoc)
//
// fmt.Printf("Virt type %s", domcfg.Type)
//
package libvirtxml
|
// Package swf provides a full implementation of a client api for Amazon Simple Workflow Service
// http://docs.aws.amazon.com/amazonswf/latest/apireference/
//
// In addition it provides a basic facility for modeling swf workflows as FSMs (finite state machines), as well as
// implementations of pollers fof both decision and activty tasks.
//
//
package swf
fixdoctypo
// Package swf provides a full implementation of a client api for Amazon Simple Workflow Service
// http://docs.aws.amazon.com/amazonswf/latest/apireference/
//
// In addition it provides a basic facility for modeling swf workflows as FSMs (finite state machines), as well as
// implementations of pollers for both decision and activty tasks.
//
//
package swf
|
// Package cc contains methods for solving Chess Challange
package cc
Fix misspelled word
// Package cc contains methods for solving Chess Challenge
package cc
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"exec"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"unicode"
"utf8"
)
// Environment for commands.
var (
XGC []string // 6g -I _test -o _xtest_.6
GC []string // 6g -I _test _testmain.go
GL []string // 6l -L _test _testmain.6
GOARCH string
GOROOT string
GORUN string
O string
args []string // arguments passed to gotest; also passed to the binary
fileNames []string
env = os.Environ()
)
// These strings are created by getTestNames.
var (
insideFileNames []string // list of *.go files inside the package.
outsideFileNames []string // list of *.go files outside the package (in package foo_test).
)
var (
files []*File
importPath string
)
// Flags for our own purposes. We do our own flag processing.
var (
cFlag bool
xFlag bool
)
// File represents a file that contains tests.
type File struct {
name string
pkg string
file *os.File
astFile *ast.File
tests []string // The names of the TestXXXs.
benchmarks []string // The names of the BenchmarkXXXs.
}
func main() {
flags()
needMakefile()
setEnvironment()
getTestFileNames()
parseFiles()
getTestNames()
run("gomake", "testpackage-clean")
run("gomake", "testpackage", fmt.Sprintf("GOTESTFILES=%s", strings.Join(insideFileNames, " ")))
if len(outsideFileNames) > 0 {
run(append(XGC, outsideFileNames...)...)
}
importPath = runWithStdout("gomake", "-s", "importpath")
writeTestmainGo()
run(GC...)
run(GL...)
if !cFlag {
runTestWithArgs("./" + O + ".out")
}
}
// needMakefile tests that we have a Makefile in this directory.
func needMakefile() {
if _, err := os.Stat("Makefile"); err != nil {
Fatalf("please create a Makefile for gotest; see http://golang.org/doc/code.html for details")
}
}
// Fatalf formats its arguments, prints the message with a final newline, and exits.
func Fatalf(s string, args ...interface{}) {
fmt.Fprintf(os.Stderr, "gotest: "+s+"\n", args...)
os.Exit(2)
}
// theChar is the map from architecture to object character.
var theChar = map[string]string{
"arm": "5",
"amd64": "6",
"386": "8",
}
// addEnv adds a name=value pair to the environment passed to subcommands.
// If the item is already in the environment, addEnv replaces the value.
func addEnv(name, value string) {
for i := 0; i < len(env); i++ {
if strings.HasPrefix(env[i], name+"=") {
env[i] = name + "=" + value
return
}
}
env = append(env, name+"="+value)
}
// setEnvironment assembles the configuration for gotest and its subcommands.
func setEnvironment() {
// Basic environment.
GOROOT = runtime.GOROOT()
addEnv("GOROOT", GOROOT)
GOARCH = runtime.GOARCH
addEnv("GOARCH", GOARCH)
O = theChar[GOARCH]
if O == "" {
Fatalf("unknown architecture %s", GOARCH)
}
// Commands and their flags.
gc := os.Getenv("GC")
if gc == "" {
gc = O + "g"
}
XGC = []string{gc, "-I", "_test", "-o", "_xtest_." + O}
GC = []string{gc, "-I", "_test", "_testmain.go"}
gl := os.Getenv("GL")
if gl == "" {
gl = O + "l"
}
GL = []string{gl, "-L", "_test", "_testmain." + O}
// Silence make on Linux
addEnv("MAKEFLAGS", "")
addEnv("MAKELEVEL", "")
}
// getTestFileNames gets the set of files we're looking at.
// If gotest has no arguments, it scans the current directory for _test.go files.
func getTestFileNames() {
names := fileNames
if len(names) == 0 {
names = filepath.Glob("[^.]*_test.go")
if len(names) == 0 {
Fatalf(`no test files found: no match for "*_test.go"`)
}
}
for _, n := range names {
fd, err := os.Open(n, os.O_RDONLY, 0)
if err != nil {
Fatalf("%s: %s", n, err)
}
f := &File{name: n, file: fd}
files = append(files, f)
}
}
// parseFiles parses the files and remembers the packages we find.
func parseFiles() {
fileSet := token.NewFileSet()
for _, f := range files {
// Report declaration errors so we can abort if the files are incorrect Go.
file, err := parser.ParseFile(fileSet, f.name, nil, parser.DeclarationErrors)
if err != nil {
Fatalf("parse error: %s", err)
}
f.astFile = file
f.pkg = file.Name.String()
if f.pkg == "" {
Fatalf("cannot happen: no package name in %s", f.name)
}
}
}
// getTestNames extracts the names of tests and benchmarks. They are all
// top-level functions that are not methods.
func getTestNames() {
for _, f := range files {
for _, d := range f.astFile.Decls {
n, ok := d.(*ast.FuncDecl)
if !ok {
continue
}
if n.Recv != nil { // a method, not a function.
continue
}
name := n.Name.String()
if isTest(name, "Test") {
f.tests = append(f.tests, name)
} else if isTest(name, "Benchmark") {
f.benchmarks = append(f.benchmarks, name)
}
// TODO: worth checking the signature? Probably not.
}
if strings.HasSuffix(f.pkg, "_test") {
outsideFileNames = append(outsideFileNames, f.name)
} else {
insideFileNames = append(insideFileNames, f.name)
}
}
}
// isTest tells whether name looks like a test (or benchmark, according to prefix).
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
// We don't want TesticularCancer.
func isTest(name, prefix string) bool {
if !strings.HasPrefix(name, prefix) {
return false
}
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
func run(args ...string) {
doRun(args, false)
}
// runWithStdout is like run, but returns the text of standard output with the last newline dropped.
func runWithStdout(argv ...string) string {
s := doRun(argv, true)
if len(s) == 0 {
Fatalf("no output from command %s", strings.Join(argv, " "))
}
if s[len(s)-1] == '\n' {
s = s[:len(s)-1]
}
return s
}
// runTestWithArgs appends gotest's runs the provided binary with the args passed on the command line.
func runTestWithArgs(binary string) {
doRun(append([]string{binary}, args...), false)
}
// doRun is the general command runner. The flag says whether we want to
// retrieve standard output.
func doRun(argv []string, returnStdout bool) string {
if xFlag {
fmt.Printf("gotest: %s\n", strings.Join(argv, " "))
}
if runtime.GOOS == "windows" && argv[0] == "gomake" {
// gomake is a shell script and it cannot be executed directly on Windows.
argv = append([]string{"cmd", "/c", "sh", "-c"}, strings.Join(argv, " "))
}
var err os.Error
argv[0], err = exec.LookPath(argv[0])
if err != nil {
Fatalf("can't find %s: %s", argv[0], err)
}
procAttr := &os.ProcAttr{
Env: env,
Files: []*os.File{
os.Stdin,
os.Stdout,
os.Stderr,
},
}
var r, w *os.File
if returnStdout {
r, w, err = os.Pipe()
if err != nil {
Fatalf("can't create pipe: %s", err)
}
procAttr.Files[1] = w
}
proc, err := os.StartProcess(argv[0], argv, procAttr)
if err != nil {
Fatalf("make failed to start: %s", err)
}
if returnStdout {
defer r.Close()
w.Close()
}
waitMsg, err := proc.Wait(0)
if err != nil || waitMsg == nil {
Fatalf("%s failed: %s", argv[0], err)
}
if !waitMsg.Exited() || waitMsg.ExitStatus() != 0 {
Fatalf("%q failed: %s", strings.Join(argv, " "), waitMsg)
}
if returnStdout {
b, err := ioutil.ReadAll(r)
if err != nil {
Fatalf("can't read output from command: %s", err)
}
return string(b)
}
return ""
}
// writeTestmainGo generates the test program to be compiled, "./_testmain.go".
func writeTestmainGo() {
f, err := os.Open("_testmain.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
Fatalf("can't create _testmain.go: %s", err)
}
defer f.Close()
b := bufio.NewWriter(f)
defer b.Flush()
// Package and imports.
fmt.Fprint(b, "package main\n\n")
// Are there tests from a package other than the one we're testing?
// We can't just use file names because some of the things we compiled
// contain no tests.
outsideTests := false
insideTests := false
for _, f := range files {
//println(f.name, f.pkg)
if len(f.tests) == 0 && len(f.benchmarks) == 0 {
continue
}
if strings.HasSuffix(f.pkg, "_test") {
outsideTests = true
} else {
insideTests = true
}
}
if insideTests {
switch importPath {
case "testing":
case "main":
// Import path main is reserved, so import with
// explicit reference to ./_test/main instead.
// Also, the file we are writing defines a function named main,
// so rename this import to __main__ to avoid name conflict.
fmt.Fprintf(b, "import __main__ %q\n", "./_test/main")
default:
fmt.Fprintf(b, "import %q\n", importPath)
}
}
if outsideTests {
fmt.Fprintf(b, "import %q\n", "./_xtest_")
}
fmt.Fprintf(b, "import %q\n", "testing")
fmt.Fprintf(b, "import __os__ %q\n", "os") // rename in case tested package is called os
fmt.Fprintf(b, "import __regexp__ %q\n", "regexp") // rename in case tested package is called regexp
fmt.Fprintln(b) // for gofmt
// Tests.
fmt.Fprintln(b, "var tests = []testing.InternalTest{")
for _, f := range files {
for _, t := range f.tests {
fmt.Fprintf(b, "\t{\"%s.%s\", %s.%s},\n", f.pkg, t, notMain(f.pkg), t)
}
}
fmt.Fprintln(b, "}")
fmt.Fprintln(b)
// Benchmarks.
fmt.Fprintln(b, "var benchmarks = []testing.InternalBenchmark{")
for _, f := range files {
for _, bm := range f.benchmarks {
fmt.Fprintf(b, "\t{\"%s.%s\", %s.%s},\n", f.pkg, bm, notMain(f.pkg), bm)
}
}
fmt.Fprintln(b, "}")
// Body.
fmt.Fprintln(b, testBody)
}
// notMain returns the package, renaming as appropriate if it's "main".
func notMain(pkg string) string {
if pkg == "main" {
return "__main__"
}
return pkg
}
// testBody is just copied to the output. It's the code that runs the tests.
var testBody = `
var matchPat string
var matchRe *__regexp__.Regexp
func matchString(pat, str string) (result bool, err __os__.Error) {
if matchRe == nil || matchPat != pat {
matchPat = pat
matchRe, err = __regexp__.Compile(matchPat)
if err != nil {
return
}
}
return matchRe.MatchString(str), nil
}
func main() {
testing.Main(matchString, tests, benchmarks)
}`
gotest: another attempt to make it run on Windows
R=golang-dev, rsc1
CC=golang-dev
http://codereview.appspot.com/4347041
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"exec"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"unicode"
"utf8"
)
// Environment for commands.
var (
XGC []string // 6g -I _test -o _xtest_.6
GC []string // 6g -I _test _testmain.go
GL []string // 6l -L _test _testmain.6
GOARCH string
GOROOT string
GORUN string
O string
args []string // arguments passed to gotest; also passed to the binary
fileNames []string
env = os.Environ()
)
// These strings are created by getTestNames.
var (
insideFileNames []string // list of *.go files inside the package.
outsideFileNames []string // list of *.go files outside the package (in package foo_test).
)
var (
files []*File
importPath string
)
// Flags for our own purposes. We do our own flag processing.
var (
cFlag bool
xFlag bool
)
// File represents a file that contains tests.
type File struct {
name string
pkg string
file *os.File
astFile *ast.File
tests []string // The names of the TestXXXs.
benchmarks []string // The names of the BenchmarkXXXs.
}
func main() {
flags()
needMakefile()
setEnvironment()
getTestFileNames()
parseFiles()
getTestNames()
run("gomake", "testpackage-clean")
run("gomake", "testpackage", fmt.Sprintf("GOTESTFILES=%s", strings.Join(insideFileNames, " ")))
if len(outsideFileNames) > 0 {
run(append(XGC, outsideFileNames...)...)
}
importPath = runWithStdout("gomake", "-s", "importpath")
writeTestmainGo()
run(GC...)
run(GL...)
if !cFlag {
runTestWithArgs("./" + O + ".out")
}
}
// needMakefile tests that we have a Makefile in this directory.
func needMakefile() {
if _, err := os.Stat("Makefile"); err != nil {
Fatalf("please create a Makefile for gotest; see http://golang.org/doc/code.html for details")
}
}
// Fatalf formats its arguments, prints the message with a final newline, and exits.
func Fatalf(s string, args ...interface{}) {
fmt.Fprintf(os.Stderr, "gotest: "+s+"\n", args...)
os.Exit(2)
}
// theChar is the map from architecture to object character.
var theChar = map[string]string{
"arm": "5",
"amd64": "6",
"386": "8",
}
// addEnv adds a name=value pair to the environment passed to subcommands.
// If the item is already in the environment, addEnv replaces the value.
func addEnv(name, value string) {
for i := 0; i < len(env); i++ {
if strings.HasPrefix(env[i], name+"=") {
env[i] = name + "=" + value
return
}
}
env = append(env, name+"="+value)
}
// setEnvironment assembles the configuration for gotest and its subcommands.
func setEnvironment() {
// Basic environment.
GOROOT = runtime.GOROOT()
addEnv("GOROOT", GOROOT)
GOARCH = runtime.GOARCH
addEnv("GOARCH", GOARCH)
O = theChar[GOARCH]
if O == "" {
Fatalf("unknown architecture %s", GOARCH)
}
// Commands and their flags.
gc := os.Getenv("GC")
if gc == "" {
gc = O + "g"
}
XGC = []string{gc, "-I", "_test", "-o", "_xtest_." + O}
GC = []string{gc, "-I", "_test", "_testmain.go"}
gl := os.Getenv("GL")
if gl == "" {
gl = O + "l"
}
GL = []string{gl, "-L", "_test", "_testmain." + O}
// Silence make on Linux
addEnv("MAKEFLAGS", "")
addEnv("MAKELEVEL", "")
}
// getTestFileNames gets the set of files we're looking at.
// If gotest has no arguments, it scans the current directory for _test.go files.
func getTestFileNames() {
names := fileNames
if len(names) == 0 {
names = filepath.Glob("[^.]*_test.go")
if len(names) == 0 {
Fatalf(`no test files found: no match for "*_test.go"`)
}
}
for _, n := range names {
fd, err := os.Open(n, os.O_RDONLY, 0)
if err != nil {
Fatalf("%s: %s", n, err)
}
f := &File{name: n, file: fd}
files = append(files, f)
}
}
// parseFiles parses the files and remembers the packages we find.
func parseFiles() {
fileSet := token.NewFileSet()
for _, f := range files {
// Report declaration errors so we can abort if the files are incorrect Go.
file, err := parser.ParseFile(fileSet, f.name, nil, parser.DeclarationErrors)
if err != nil {
Fatalf("parse error: %s", err)
}
f.astFile = file
f.pkg = file.Name.String()
if f.pkg == "" {
Fatalf("cannot happen: no package name in %s", f.name)
}
}
}
// getTestNames extracts the names of tests and benchmarks. They are all
// top-level functions that are not methods.
func getTestNames() {
for _, f := range files {
for _, d := range f.astFile.Decls {
n, ok := d.(*ast.FuncDecl)
if !ok {
continue
}
if n.Recv != nil { // a method, not a function.
continue
}
name := n.Name.String()
if isTest(name, "Test") {
f.tests = append(f.tests, name)
} else if isTest(name, "Benchmark") {
f.benchmarks = append(f.benchmarks, name)
}
// TODO: worth checking the signature? Probably not.
}
if strings.HasSuffix(f.pkg, "_test") {
outsideFileNames = append(outsideFileNames, f.name)
} else {
insideFileNames = append(insideFileNames, f.name)
}
}
}
// isTest tells whether name looks like a test (or benchmark, according to prefix).
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
// We don't want TesticularCancer.
func isTest(name, prefix string) bool {
if !strings.HasPrefix(name, prefix) {
return false
}
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
func run(args ...string) {
doRun(args, false)
}
// runWithStdout is like run, but returns the text of standard output with the last newline dropped.
func runWithStdout(argv ...string) string {
s := doRun(argv, true)
if len(s) == 0 {
Fatalf("no output from command %s", strings.Join(argv, " "))
}
if s[len(s)-1] == '\n' {
s = s[:len(s)-1]
}
return s
}
// runTestWithArgs appends gotest's runs the provided binary with the args passed on the command line.
func runTestWithArgs(binary string) {
doRun(append([]string{binary}, args...), false)
}
// doRun is the general command runner. The flag says whether we want to
// retrieve standard output.
func doRun(argv []string, returnStdout bool) string {
if xFlag {
fmt.Printf("gotest: %s\n", strings.Join(argv, " "))
}
if runtime.GOOS == "windows" && argv[0] == "gomake" {
// gomake is a shell script and it cannot be executed directly on Windows.
cmd := ""
for i, v := range argv {
if i > 0 {
cmd += " "
}
cmd += `"` + v + `"`
}
argv = []string{"cmd", "/c", "sh", "-c", cmd}
}
var err os.Error
argv[0], err = exec.LookPath(argv[0])
if err != nil {
Fatalf("can't find %s: %s", argv[0], err)
}
procAttr := &os.ProcAttr{
Env: env,
Files: []*os.File{
os.Stdin,
os.Stdout,
os.Stderr,
},
}
var r, w *os.File
if returnStdout {
r, w, err = os.Pipe()
if err != nil {
Fatalf("can't create pipe: %s", err)
}
procAttr.Files[1] = w
}
proc, err := os.StartProcess(argv[0], argv, procAttr)
if err != nil {
Fatalf("make failed to start: %s", err)
}
if returnStdout {
defer r.Close()
w.Close()
}
waitMsg, err := proc.Wait(0)
if err != nil || waitMsg == nil {
Fatalf("%s failed: %s", argv[0], err)
}
if !waitMsg.Exited() || waitMsg.ExitStatus() != 0 {
Fatalf("%q failed: %s", strings.Join(argv, " "), waitMsg)
}
if returnStdout {
b, err := ioutil.ReadAll(r)
if err != nil {
Fatalf("can't read output from command: %s", err)
}
return string(b)
}
return ""
}
// writeTestmainGo generates the test program to be compiled, "./_testmain.go".
func writeTestmainGo() {
f, err := os.Open("_testmain.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
Fatalf("can't create _testmain.go: %s", err)
}
defer f.Close()
b := bufio.NewWriter(f)
defer b.Flush()
// Package and imports.
fmt.Fprint(b, "package main\n\n")
// Are there tests from a package other than the one we're testing?
// We can't just use file names because some of the things we compiled
// contain no tests.
outsideTests := false
insideTests := false
for _, f := range files {
//println(f.name, f.pkg)
if len(f.tests) == 0 && len(f.benchmarks) == 0 {
continue
}
if strings.HasSuffix(f.pkg, "_test") {
outsideTests = true
} else {
insideTests = true
}
}
if insideTests {
switch importPath {
case "testing":
case "main":
// Import path main is reserved, so import with
// explicit reference to ./_test/main instead.
// Also, the file we are writing defines a function named main,
// so rename this import to __main__ to avoid name conflict.
fmt.Fprintf(b, "import __main__ %q\n", "./_test/main")
default:
fmt.Fprintf(b, "import %q\n", importPath)
}
}
if outsideTests {
fmt.Fprintf(b, "import %q\n", "./_xtest_")
}
fmt.Fprintf(b, "import %q\n", "testing")
fmt.Fprintf(b, "import __os__ %q\n", "os") // rename in case tested package is called os
fmt.Fprintf(b, "import __regexp__ %q\n", "regexp") // rename in case tested package is called regexp
fmt.Fprintln(b) // for gofmt
// Tests.
fmt.Fprintln(b, "var tests = []testing.InternalTest{")
for _, f := range files {
for _, t := range f.tests {
fmt.Fprintf(b, "\t{\"%s.%s\", %s.%s},\n", f.pkg, t, notMain(f.pkg), t)
}
}
fmt.Fprintln(b, "}")
fmt.Fprintln(b)
// Benchmarks.
fmt.Fprintln(b, "var benchmarks = []testing.InternalBenchmark{")
for _, f := range files {
for _, bm := range f.benchmarks {
fmt.Fprintf(b, "\t{\"%s.%s\", %s.%s},\n", f.pkg, bm, notMain(f.pkg), bm)
}
}
fmt.Fprintln(b, "}")
// Body.
fmt.Fprintln(b, testBody)
}
// notMain returns the package, renaming as appropriate if it's "main".
func notMain(pkg string) string {
if pkg == "main" {
return "__main__"
}
return pkg
}
// testBody is just copied to the output. It's the code that runs the tests.
var testBody = `
var matchPat string
var matchRe *__regexp__.Regexp
func matchString(pat, str string) (result bool, err __os__.Error) {
if matchRe == nil || matchPat != pat {
matchPat = pat
matchRe, err = __regexp__.Compile(matchPat)
if err != nil {
return
}
}
return matchRe.MatchString(str), nil
}
func main() {
testing.Main(matchString, tests, benchmarks)
}`
|
// +build darwin
package scard
// #cgo LDFLAGS: -framework PCSC
// #cgo CFLAGS: -I /usr/include
// #include <stdlib.h>
// #include <PCSC/winscard.h>
// #include <PCSC/wintypes.h>
import "C"
import (
"unsafe"
)
func (e Error) Error() string {
return "scard: " + C.GoString(C.pcsc_stringify_error(C.int32_t(e)))
}
// Version returns the libpcsclite version string
func Version() string {
return C.PCSCLITE_VERSION_NUMBER
}
func scardEstablishContext(scope Scope, reserved1, reserved2 uintptr) (uintptr, Error) {
var ctx C.SCARDCONTEXT
r := C.SCardEstablishContext(C.uint32_t(scope), unsafe.Pointer(reserved1), unsafe.Pointer(reserved2), &ctx)
return uintptr(ctx), Error(r)
}
func scardIsValidContext(ctx uintptr) Error {
r := C.SCardIsValidContext(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardCancel(ctx uintptr) Error {
r := C.SCardCancel(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardReleaseContext(ctx uintptr) Error {
r := C.SCardReleaseContext(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardListReaders(ctx uintptr, groups, buf unsafe.Pointer, bufLen uint32) (uint32, Error) {
dwBufLen := C.uint32_t(bufLen)
r := C.SCardListReaders(C.SCARDCONTEXT(ctx), (C.LPCSTR)(groups), (C.LPSTR)(buf), &dwBufLen)
return uint32(dwBufLen), Error(r)
}
func scardListReaderGroups(ctx uintptr, buf unsafe.Pointer, bufLen uint32) (uint32, Error) {
dwBufLen := C.uint32_t(bufLen)
r := C.SCardListReaderGroups(C.SCARDCONTEXT(ctx), (C.LPSTR)(buf), &dwBufLen)
return uint32(dwBufLen), Error(r)
}
func scardGetStatusChange(ctx uintptr, timeout uint32, states []scardReaderState) Error {
// In darwin, the LPSCARD_READERSTATE_A has 1 byte alignment and hence
// has no trailing padding. Go does add 3 bytes of padding (on both 32
// and 64 bits), so we pack an array manually instead.
const size = int(unsafe.Sizeof(states[0])) - 3
buf := make([]byte, size*len(states))
for i, _ := range states {
copy(buf[i*size:(i+1)*size], (*(*[size]byte)(unsafe.Pointer(&states[i])))[:])
}
r := C.SCardGetStatusChange(C.SCARDCONTEXT(ctx), C.uint32_t(timeout), (C.LPSCARD_READERSTATE_A)(unsafe.Pointer(&buf[0])), C.uint32_t(len(states)))
for i, _ := range states {
copy((*(*[size]byte)(unsafe.Pointer(&states[i])))[:], buf[i*size:(i+1)*size])
}
return Error(r)
}
func scardConnect(ctx uintptr, reader unsafe.Pointer, shareMode ShareMode, proto Protocol) (uintptr, Protocol, Error) {
var handle C.SCARDHANDLE
var activeProto C.uint32_t
r := C.SCardConnect(C.SCARDCONTEXT(ctx), C.LPCSTR(reader), C.uint32_t(shareMode), C.uint32_t(proto), &handle, &activeProto)
return uintptr(handle), Protocol(activeProto), Error(r)
}
func scardDisconnect(card uintptr, d Disposition) Error {
r := C.SCardDisconnect(C.SCARDHANDLE(card), C.uint32_t(d))
return Error(r)
}
func scardReconnect(card uintptr, mode ShareMode, proto Protocol, disp Disposition) (Protocol, Error) {
var activeProtocol C.uint32_t
r := C.SCardReconnect(C.SCARDHANDLE(card), C.uint32_t(mode), C.uint32_t(proto), C.uint32_t(disp), &activeProtocol)
return Protocol(activeProtocol), Error(r)
}
func scardBeginTransaction(card uintptr) Error {
r := C.SCardBeginTransaction(C.SCARDHANDLE(card))
return Error(r)
}
func scardEndTransaction(card uintptr, disp Disposition) Error {
r := C.SCardEndTransaction(C.SCARDHANDLE(card), C.uint32_t(disp))
return Error(r)
}
func scardCardStatus(card uintptr, readerBuf strbuf, atrBuf []byte) (uint32, State, Protocol, uint32, Error) {
var readerLen = C.uint32_t(len(readerBuf))
var state, proto C.uint32_t
var atrLen = C.uint32_t(len(atrBuf))
r := C.SCardStatus(C.SCARDHANDLE(card), (C.LPSTR)(unsafe.Pointer(&readerBuf[0])), &readerLen, &state, &proto, (*C.uchar)(&atrBuf[0]), &atrLen)
return uint32(readerLen), State(state), Protocol(proto), uint32(atrLen), Error(r)
}
func scardTransmit(card uintptr, proto Protocol, cmd []byte, rsp []byte) (uint32, Error) {
var sendpci C.SCARD_IO_REQUEST
var recvpci C.SCARD_IO_REQUEST
var rspLen = C.uint32_t(len(rsp))
switch proto {
case ProtocolT0, ProtocolT1:
sendpci.dwProtocol = C.uint32_t(proto)
default:
panic("unknown protocol")
}
sendpci.cbPciLength = C.sizeof_SCARD_IO_REQUEST
r := C.SCardTransmit(C.SCARDHANDLE(card), &sendpci, (*C.uchar)(&cmd[0]), C.uint32_t(len(cmd)), &recvpci, (*C.uchar)(&rsp[0]), &rspLen)
return uint32(rspLen), Error(r)
}
func scardControl(card uintptr, ioctl uint32, in, out []byte) (uint32, Error) {
var ptrIn unsafe.Pointer
var outLen = C.uint32_t(len(out))
if len(in) != 0 {
ptrIn = unsafe.Pointer(&in[0])
}
r := C.SCardControl(C.SCARDHANDLE(card), C.uint32_t(ioctl), ptrIn, C.uint32_t(len(in)), unsafe.Pointer(&out[0]), C.uint32_t(len(out)), &outLen)
return uint32(outLen), Error(r)
}
func scardGetAttrib(card uintptr, id Attrib, buf []byte) (uint32, Error) {
var ptr *C.uint8_t
if len(buf) != 0 {
ptr = (*C.uint8_t)(&buf[0])
}
bufLen := C.uint32_t(len(buf))
r := C.SCardGetAttrib(C.SCARDHANDLE(card), C.uint32_t(id), ptr, &bufLen)
return uint32(bufLen), Error(r)
}
func scardSetAttrib(card uintptr, id Attrib, buf []byte) Error {
r := C.SCardSetAttrib(C.SCARDHANDLE(card), C.uint32_t(id), ((*C.uint8_t)(&buf[0])), C.uint32_t(len(buf)))
return Error(r)
}
type strbuf []byte
func encodestr(s string) (strbuf, error) {
buf := strbuf(s + "\x00")
return buf, nil
}
func decodestr(buf strbuf) string {
if len(buf) == 0 {
return ""
}
if buf[len(buf)-1] == 0 {
buf = buf[:len(buf)-1]
}
return string(buf)
}
type scardReaderState struct {
szReader uintptr
pvUserData uintptr
dwCurrentState uint32
dwEventState uint32
cbAtr uint32
rgbAtr [33]byte
}
var pinned = map[string]*strbuf{}
func (rs *ReaderState) toSys() (scardReaderState, error) {
var sys scardReaderState
creader, err := encodestr(rs.Reader)
if err != nil {
return scardReaderState{}, err
}
pinned[rs.Reader] = &creader
sys.szReader = uintptr(creader.ptr())
sys.dwCurrentState = uint32(rs.CurrentState)
sys.cbAtr = uint32(len(rs.Atr))
for i, v := range rs.Atr {
sys.rgbAtr[i] = byte(v)
}
return sys, nil
}
func (rs *ReaderState) update(sys *scardReaderState) {
rs.EventState = StateFlag(sys.dwEventState)
if sys.cbAtr > 0 {
rs.Atr = make([]byte, int(sys.cbAtr))
for i := 0; i < int(sys.cbAtr); i++ {
rs.Atr[i] = byte(sys.rgbAtr[i])
}
}
}
clean up cflags for darwin
// +build darwin
package scard
// #cgo LDFLAGS: -framework PCSC
// #include <stdlib.h>
// #include <PCSC/winscard.h>
// #include <PCSC/wintypes.h>
import "C"
import (
"unsafe"
)
func (e Error) Error() string {
return "scard: " + C.GoString(C.pcsc_stringify_error(C.int32_t(e)))
}
// Version returns the libpcsclite version string
func Version() string {
return C.PCSCLITE_VERSION_NUMBER
}
func scardEstablishContext(scope Scope, reserved1, reserved2 uintptr) (uintptr, Error) {
var ctx C.SCARDCONTEXT
r := C.SCardEstablishContext(C.uint32_t(scope), unsafe.Pointer(reserved1), unsafe.Pointer(reserved2), &ctx)
return uintptr(ctx), Error(r)
}
func scardIsValidContext(ctx uintptr) Error {
r := C.SCardIsValidContext(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardCancel(ctx uintptr) Error {
r := C.SCardCancel(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardReleaseContext(ctx uintptr) Error {
r := C.SCardReleaseContext(C.SCARDCONTEXT(ctx))
return Error(r)
}
func scardListReaders(ctx uintptr, groups, buf unsafe.Pointer, bufLen uint32) (uint32, Error) {
dwBufLen := C.uint32_t(bufLen)
r := C.SCardListReaders(C.SCARDCONTEXT(ctx), (C.LPCSTR)(groups), (C.LPSTR)(buf), &dwBufLen)
return uint32(dwBufLen), Error(r)
}
func scardListReaderGroups(ctx uintptr, buf unsafe.Pointer, bufLen uint32) (uint32, Error) {
dwBufLen := C.uint32_t(bufLen)
r := C.SCardListReaderGroups(C.SCARDCONTEXT(ctx), (C.LPSTR)(buf), &dwBufLen)
return uint32(dwBufLen), Error(r)
}
func scardGetStatusChange(ctx uintptr, timeout uint32, states []scardReaderState) Error {
// In darwin, the LPSCARD_READERSTATE_A has 1 byte alignment and hence
// has no trailing padding. Go does add 3 bytes of padding (on both 32
// and 64 bits), so we pack an array manually instead.
const size = int(unsafe.Sizeof(states[0])) - 3
buf := make([]byte, size*len(states))
for i := range states {
copy(buf[i*size:(i+1)*size], (*(*[size]byte)(unsafe.Pointer(&states[i])))[:])
}
r := C.SCardGetStatusChange(C.SCARDCONTEXT(ctx), C.uint32_t(timeout), (C.LPSCARD_READERSTATE_A)(unsafe.Pointer(&buf[0])), C.uint32_t(len(states)))
for i := range states {
copy((*(*[size]byte)(unsafe.Pointer(&states[i])))[:], buf[i*size:(i+1)*size])
}
return Error(r)
}
func scardConnect(ctx uintptr, reader unsafe.Pointer, shareMode ShareMode, proto Protocol) (uintptr, Protocol, Error) {
var handle C.SCARDHANDLE
var activeProto C.uint32_t
r := C.SCardConnect(C.SCARDCONTEXT(ctx), C.LPCSTR(reader), C.uint32_t(shareMode), C.uint32_t(proto), &handle, &activeProto)
return uintptr(handle), Protocol(activeProto), Error(r)
}
func scardDisconnect(card uintptr, d Disposition) Error {
r := C.SCardDisconnect(C.SCARDHANDLE(card), C.uint32_t(d))
return Error(r)
}
func scardReconnect(card uintptr, mode ShareMode, proto Protocol, disp Disposition) (Protocol, Error) {
var activeProtocol C.uint32_t
r := C.SCardReconnect(C.SCARDHANDLE(card), C.uint32_t(mode), C.uint32_t(proto), C.uint32_t(disp), &activeProtocol)
return Protocol(activeProtocol), Error(r)
}
func scardBeginTransaction(card uintptr) Error {
r := C.SCardBeginTransaction(C.SCARDHANDLE(card))
return Error(r)
}
func scardEndTransaction(card uintptr, disp Disposition) Error {
r := C.SCardEndTransaction(C.SCARDHANDLE(card), C.uint32_t(disp))
return Error(r)
}
func scardCardStatus(card uintptr, readerBuf strbuf, atrBuf []byte) (uint32, State, Protocol, uint32, Error) {
var readerLen = C.uint32_t(len(readerBuf))
var state, proto C.uint32_t
var atrLen = C.uint32_t(len(atrBuf))
r := C.SCardStatus(C.SCARDHANDLE(card), (C.LPSTR)(unsafe.Pointer(&readerBuf[0])), &readerLen, &state, &proto, (*C.uchar)(&atrBuf[0]), &atrLen)
return uint32(readerLen), State(state), Protocol(proto), uint32(atrLen), Error(r)
}
func scardTransmit(card uintptr, proto Protocol, cmd []byte, rsp []byte) (uint32, Error) {
var sendpci C.SCARD_IO_REQUEST
var recvpci C.SCARD_IO_REQUEST
var rspLen = C.uint32_t(len(rsp))
switch proto {
case ProtocolT0, ProtocolT1:
sendpci.dwProtocol = C.uint32_t(proto)
default:
panic("unknown protocol")
}
sendpci.cbPciLength = C.sizeof_SCARD_IO_REQUEST
r := C.SCardTransmit(C.SCARDHANDLE(card), &sendpci, (*C.uchar)(&cmd[0]), C.uint32_t(len(cmd)), &recvpci, (*C.uchar)(&rsp[0]), &rspLen)
return uint32(rspLen), Error(r)
}
func scardControl(card uintptr, ioctl uint32, in, out []byte) (uint32, Error) {
var ptrIn unsafe.Pointer
var outLen = C.uint32_t(len(out))
if len(in) != 0 {
ptrIn = unsafe.Pointer(&in[0])
}
r := C.SCardControl(C.SCARDHANDLE(card), C.uint32_t(ioctl), ptrIn, C.uint32_t(len(in)), unsafe.Pointer(&out[0]), C.uint32_t(len(out)), &outLen)
return uint32(outLen), Error(r)
}
func scardGetAttrib(card uintptr, id Attrib, buf []byte) (uint32, Error) {
var ptr *C.uint8_t
if len(buf) != 0 {
ptr = (*C.uint8_t)(&buf[0])
}
bufLen := C.uint32_t(len(buf))
r := C.SCardGetAttrib(C.SCARDHANDLE(card), C.uint32_t(id), ptr, &bufLen)
return uint32(bufLen), Error(r)
}
func scardSetAttrib(card uintptr, id Attrib, buf []byte) Error {
r := C.SCardSetAttrib(C.SCARDHANDLE(card), C.uint32_t(id), ((*C.uint8_t)(&buf[0])), C.uint32_t(len(buf)))
return Error(r)
}
type strbuf []byte
func encodestr(s string) (strbuf, error) {
buf := strbuf(s + "\x00")
return buf, nil
}
func decodestr(buf strbuf) string {
if len(buf) == 0 {
return ""
}
if buf[len(buf)-1] == 0 {
buf = buf[:len(buf)-1]
}
return string(buf)
}
type scardReaderState struct {
szReader uintptr
pvUserData uintptr
dwCurrentState uint32
dwEventState uint32
cbAtr uint32
rgbAtr [33]byte
}
var pinned = map[string]*strbuf{}
func (rs *ReaderState) toSys() (scardReaderState, error) {
var sys scardReaderState
creader, err := encodestr(rs.Reader)
if err != nil {
return scardReaderState{}, err
}
pinned[rs.Reader] = &creader
sys.szReader = uintptr(creader.ptr())
sys.dwCurrentState = uint32(rs.CurrentState)
sys.cbAtr = uint32(len(rs.Atr))
for i, v := range rs.Atr {
sys.rgbAtr[i] = byte(v)
}
return sys, nil
}
func (rs *ReaderState) update(sys *scardReaderState) {
rs.EventState = StateFlag(sys.dwEventState)
if sys.cbAtr > 0 {
rs.Atr = make([]byte, int(sys.cbAtr))
for i := 0; i < int(sys.cbAtr); i++ {
rs.Atr[i] = byte(sys.rgbAtr[i])
}
}
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacyregistry
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/component-base/metrics"
)
var (
defaultRegistry = metrics.NewKubeRegistry()
// DefaultGatherer exposes the global registry gatherer
DefaultGatherer metrics.Gatherer = defaultRegistry
)
func init() {
RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
RawMustRegister(prometheus.NewGoCollector())
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
func Handler() http.Handler {
return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{}))
}
// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes
// registry reset if the http method is DELETE.
func HandlerWithReset() http.Handler {
return promhttp.InstrumentMetricHandler(
prometheus.DefaultRegisterer,
metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{}))
}
// Register registers a collectable metric but uses the global registry
func Register(c metrics.Registerable) error {
err := defaultRegistry.Register(c)
return err
}
// MustRegister registers registerable metrics but uses the global registry.
func MustRegister(cs ...metrics.Registerable) {
defaultRegistry.MustRegister(cs...)
}
// RawMustRegister registers prometheus collectors but uses the global registry, this
// bypasses the metric stability framework
//
// Deprecated
func RawMustRegister(cs ...prometheus.Collector) {
defaultRegistry.RawMustRegister(cs...)
}
// CustomRegister registers a custom collector but uses the global registry.
func CustomRegister(c metrics.StableCollector) error {
err := defaultRegistry.CustomRegister(c)
//TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13)
_ = prometheus.Register(c)
return err
}
// CustomMustRegister registers custom collectors but uses the global registry.
func CustomMustRegister(cs ...metrics.StableCollector) {
defaultRegistry.CustomMustRegister(cs...)
for _, c := range cs {
prometheus.MustRegister(c)
}
}
// Reset calls reset on the global registry
func Reset() {
defaultRegistry.Reset()
}
refactor wrapped functions into variables
Change-Id: I1eec309c537920ba520c7a90f9a1f557ec53cf2c
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacyregistry
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/component-base/metrics"
)
var (
defaultRegistry = metrics.NewKubeRegistry()
// DefaultGatherer exposes the global registry gatherer
DefaultGatherer metrics.Gatherer = defaultRegistry
// Reset calls reset on the global registry
Reset = defaultRegistry.Reset
// MustRegister registers registerable metrics but uses the global registry.
MustRegister = defaultRegistry.MustRegister
// RawMustRegister registers prometheus collectors but uses the global registry, this
// bypasses the metric stability framework
//
// Deprecated
RawMustRegister = defaultRegistry.RawMustRegister
// Register registers a collectable metric but uses the global registry
Register = defaultRegistry.Register
)
func init() {
RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
RawMustRegister(prometheus.NewGoCollector())
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
func Handler() http.Handler {
return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{}))
}
// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes
// registry reset if the http method is DELETE.
func HandlerWithReset() http.Handler {
return promhttp.InstrumentMetricHandler(
prometheus.DefaultRegisterer,
metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{}))
}
// CustomRegister registers a custom collector but uses the global registry.
func CustomRegister(c metrics.StableCollector) error {
err := defaultRegistry.CustomRegister(c)
//TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13)
_ = prometheus.Register(c)
return err
}
// CustomMustRegister registers custom collectors but uses the global registry.
func CustomMustRegister(cs ...metrics.StableCollector) {
defaultRegistry.CustomMustRegister(cs...)
for _, c := range cs {
prometheus.MustRegister(c)
}
}
|
// Copyright (c) 2014, The Tor Project, Inc.
// See LICENSE for licensing information
package natpmp
import (
"net"
"syscall"
"unsafe"
)
var iphlpapi = syscall.NewLazyDLL("Iphlpapi.dll")
var procGetBestRoute = iphlpapi.NewProc("GetBestRoute")
// mibIPForwardRow is MIBIPFORWARDROW from windows.
type mibIPForwardRow struct {
dwForwardDest uint32
dwForwardMask uint32
dwForwardPolicy uint32
dwForwardNextHop uint32
dwForwardIfIndex uint32
dwForwardType uint32
dwForwardProto uint32
dwForwardAge uint32
dwForwardNextHopAS uint32
dwForwardMetric1 uint32
dwForwardMetric2 uint32
dwForwardMetric3 uint32
dwForwardMetric4 uint32
dwForwardMetric5 uint32
}
func getGateway() (net.IP, error) {
// This routine uses "unsafe". Yolo, swag, 420 blaze it.
if err := iphlpapi.Load(); err != nil {
return nil, err
}
if err := procGetBestRoute.Find(); err != nil {
return nil, err
}
var dwDestAddr, dwSourceAddr uintptr // 0.0.0.0
row := mibIPForwardRow{}
r0, _, e1 := syscall.Syscall(procGetBestRoute.Addr(), 3, dwDestAddr, dwSourceAddr, uintptr(unsafe.Pointer(&row)))
if r0 != 0 { // r0 != NO_ERROR
return nil, e1
}
// Ok, row should have what windows thinks is the best route to "0.0.0.0"
// now, which will be the default gateway, per the documentation this is in
// network byte order. Assume host byte order is little endian because
// this is windows.
a := row.dwForwardNextHop
return net.IPv4(byte(a), byte(a >> 8), byte(a >> 16), byte(a >> 24)), nil
}
Display a useful error when getGateway() fails on Windows.
// Copyright (c) 2014, The Tor Project, Inc.
// See LICENSE for licensing information
package natpmp
import (
"net"
"syscall"
"unsafe"
)
var iphlpapi = syscall.NewLazyDLL("Iphlpapi.dll")
var procGetBestRoute = iphlpapi.NewProc("GetBestRoute")
// mibIPForwardRow is MIBIPFORWARDROW from windows.
type mibIPForwardRow struct {
dwForwardDest uint32
dwForwardMask uint32
dwForwardPolicy uint32
dwForwardNextHop uint32
dwForwardIfIndex uint32
dwForwardType uint32
dwForwardProto uint32
dwForwardAge uint32
dwForwardNextHopAS uint32
dwForwardMetric1 uint32
dwForwardMetric2 uint32
dwForwardMetric3 uint32
dwForwardMetric4 uint32
dwForwardMetric5 uint32
}
func getGateway() (net.IP, error) {
// This routine uses "unsafe". Yolo, swag, 420 blaze it.
if err := iphlpapi.Load(); err != nil {
return nil, err
}
if err := procGetBestRoute.Find(); err != nil {
return nil, err
}
var dwDestAddr, dwSourceAddr uintptr // 0.0.0.0
row := mibIPForwardRow{}
r0, _, _ := syscall.Syscall(procGetBestRoute.Addr(), 3, dwDestAddr, dwSourceAddr, uintptr(unsafe.Pointer(&row)))
if r0 != 0 { // r0 != NO_ERROR
return nil, syscall.Errno(r0)
}
// Ok, row should have what windows thinks is the best route to "0.0.0.0"
// now, which will be the default gateway, per the documentation this is in
// network byte order. Assume host byte order is little endian because
// this is windows.
a := row.dwForwardNextHop
return net.IPv4(byte(a), byte(a >> 8), byte(a >> 16), byte(a >> 24)), nil
}
|
package utils
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
)
//Client for http requests
type Client struct {
*http.Client
BaseURL string
ContentType string
}
//Response wrapper
type Response struct {
Resp *http.Response
Err error
}
//InfoExec for the MapExec
type InfoExec struct {
Interface interface{}
F func(*http.Response) error
}
//MapExec associates status code with InfoExec
type MapExec map[int]InfoExec
var emptyJSON, _ = json.Marshal(struct{}{})
//CallRequest with body
func (c Client) CallRequest(method string, path string, reader io.Reader) *Response {
return c.CallRequestWithHeaders(method, path, reader, make(map[string]string))
}
//CallRequestNoBody without body
func (c Client) CallRequestNoBody(method string, path string) *Response {
reader := bytes.NewReader(emptyJSON)
return c.CallRequestWithHeaders(method, path, reader, make(map[string]string))
}
//CallRequestNoBodytWithHeaders without body and with headers
func (c Client) CallRequestNoBodytWithHeaders(method string, path string, headers map[string]string) *Response {
reader := bytes.NewReader(emptyJSON)
return c.CallRequestWithHeaders(method, path, reader, headers)
}
//CallRequestWithHeaders with headers
func (c Client) CallRequestWithHeaders(method string, path string, reader io.Reader, headers map[string]string) *Response {
req, _ := http.NewRequest(method, c.BaseURL+path, reader)
req.Header.Set("Content-Type", c.ContentType)
for key, val := range headers {
req.Header.Set(key, val)
}
resp, err := c.Do(req)
return &Response{resp, err}
}
//WithResponse Extracts response
func (r *Response) WithResponse(f func(*http.Response) error) error {
if r.Err != nil {
return r.Err
}
defer r.Resp.Body.Close()
return f(r.Resp)
}
//Solve with status codes
func (r *Response) Solve(mapExec MapExec) error {
if r.Err != nil {
return r.Err
}
if val, ok := mapExec[r.Resp.StatusCode]; ok {
if val.Interface != nil {
GetBodyJSON(r.Resp, val.Interface)
}
return val.F(r.Resp)
}
return errors.New("Status key not found")
}
//GetBodyJSON Gets json form body
func GetBodyJSON(resp *http.Response, i interface{}) {
if jsonDataFromHTTP, err := ioutil.ReadAll(resp.Body); err == nil {
if err := json.Unmarshal([]byte(jsonDataFromHTTP), &i); err != nil {
panic(err)
}
} else {
panic(err)
}
}
client changes
package utils
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
)
var Default = 0
//Client for http requests
type Client struct {
*http.Client
BaseURL string
ContentType string
}
//Response wrapper
type Response struct {
Resp *http.Response
Err error
}
//InfoExec for the MapExec
type InfoExec struct {
Interface interface{}
F func(*http.Response) error
}
//MapExec associates status code with InfoExec
type MapExec map[int]InfoExec
var emptyJSON, _ = json.Marshal(struct{}{})
//CallRequest with body
func (c Client) CallRequest(method string, path string, reader io.Reader) *Response {
return c.CallRequestWithHeaders(method, path, reader, make(map[string]string))
}
//CallRequestNoBody without body
func (c Client) CallRequestNoBody(method string, path string) *Response {
reader := bytes.NewReader(emptyJSON)
return c.CallRequestWithHeaders(method, path, reader, make(map[string]string))
}
//CallRequestNoBodytWithHeaders without body and with headers
func (c Client) CallRequestNoBodytWithHeaders(method string, path string, headers map[string]string) *Response {
reader := bytes.NewReader(emptyJSON)
return c.CallRequestWithHeaders(method, path, reader, headers)
}
//CallRequestWithHeaders with headers
func (c Client) CallRequestWithHeaders(method string, path string, reader io.Reader, headers map[string]string) *Response {
req, _ := http.NewRequest(method, c.BaseURL+path, reader)
req.Header.Set("Content-Type", c.ContentType)
for key, val := range headers {
req.Header.Set(key, val)
}
resp, err := c.Do(req)
return &Response{resp, err}
}
//WithResponse Extracts response
func (r *Response) WithResponse(f func(*http.Response) error) error {
if r.Err != nil {
return r.Err
}
defer r.Resp.Body.Close()
return f(r.Resp)
}
//Solve with status codes
func (r *Response) Solve(mapExec MapExec) error {
if r.Err != nil {
return r.Err
}
if val, ok := mapExec[r.Resp.StatusCode]; ok {
if val.Interface != nil {
GetBodyJSON(r.Resp, val.Interface)
}
return val.F(r.Resp)
}
if val, ok := mapExec[0]; ok {
return val.F(r.Resp)
}
return errors.New("Status key not found")
}
//GetBodyJSON Gets json form body
func GetBodyJSON(resp *http.Response, i interface{}) {
if jsonDataFromHTTP, err := ioutil.ReadAll(resp.Body); err == nil {
if err := json.Unmarshal([]byte(jsonDataFromHTTP), &i); err != nil {
panic(err)
}
} else {
panic(err)
}
}
|
// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package utils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
l4g "github.com/alecthomas/log4go"
"github.com/mattermost/platform/einterfaces"
"github.com/mattermost/platform/model"
)
const (
MODE_DEV = "dev"
MODE_BETA = "beta"
MODE_PROD = "prod"
LOG_ROTATE_SIZE = 10000
)
var Cfg *model.Config = &model.Config{}
var CfgDiagnosticId = ""
var CfgLastModified int64 = 0
var CfgFileName string = ""
var ClientCfg map[string]string = map[string]string{}
func FindConfigFile(fileName string) string {
if _, err := os.Stat("/tmp/" + fileName); err == nil {
fileName, _ = filepath.Abs("/tmp/" + fileName)
} else if _, err := os.Stat("./config/" + fileName); err == nil {
fileName, _ = filepath.Abs("./config/" + fileName)
} else if _, err := os.Stat("../config/" + fileName); err == nil {
fileName, _ = filepath.Abs("../config/" + fileName)
} else if _, err := os.Stat(fileName); err == nil {
fileName, _ = filepath.Abs(fileName)
}
return fileName
}
func FindDir(dir string) string {
fileName := "."
if _, err := os.Stat("./" + dir + "/"); err == nil {
fileName, _ = filepath.Abs("./" + dir + "/")
} else if _, err := os.Stat("../" + dir + "/"); err == nil {
fileName, _ = filepath.Abs("../" + dir + "/")
} else if _, err := os.Stat("/tmp/" + dir); err == nil {
fileName, _ = filepath.Abs("/tmp/" + dir)
}
return fileName + "/"
}
func DisableDebugLogForTest() {
l4g.Global["stdout"].Level = l4g.WARNING
}
func EnableDebugLogForTest() {
l4g.Global["stdout"].Level = l4g.DEBUG
}
func ConfigureCmdLineLog() {
ls := model.LogSettings{}
ls.EnableConsole = true
ls.ConsoleLevel = "WARN"
configureLog(&ls)
}
func configureLog(s *model.LogSettings) {
l4g.Close()
if s.EnableConsole {
level := l4g.DEBUG
if s.ConsoleLevel == "INFO" {
level = l4g.INFO
} else if s.ConsoleLevel == "WARN" {
level = l4g.WARNING
} else if s.ConsoleLevel == "ERROR" {
level = l4g.ERROR
}
lw := l4g.NewConsoleLogWriter()
lw.SetFormat("[%D %T] [%L] %M")
l4g.AddFilter("stdout", level, lw)
}
if s.EnableFile {
var fileFormat = s.FileFormat
if fileFormat == "" {
fileFormat = "[%D %T] [%L] %M"
}
level := l4g.DEBUG
if s.FileLevel == "INFO" {
level = l4g.INFO
} else if s.FileLevel == "WARN" {
level = l4g.WARNING
} else if s.FileLevel == "ERROR" {
level = l4g.ERROR
}
flw := l4g.NewFileLogWriter(GetLogFileLocation(s.FileLocation), false)
flw.SetFormat(fileFormat)
flw.SetRotate(true)
flw.SetRotateLines(LOG_ROTATE_SIZE)
l4g.AddFilter("file", level, flw)
}
}
func GetLogFileLocation(fileLocation string) string {
if fileLocation == "" {
return FindDir("logs") + "mattermost.log"
} else {
return fileLocation
}
}
func SaveConfig(fileName string, config *model.Config) *model.AppError {
b, err := json.MarshalIndent(config, "", " ")
if err != nil {
return model.NewLocAppError("SaveConfig", "utils.config.save_config.saving.app_error",
map[string]interface{}{"Filename": fileName}, err.Error())
}
err = ioutil.WriteFile(fileName, b, 0644)
if err != nil {
return model.NewLocAppError("SaveConfig", "utils.config.save_config.saving.app_error",
map[string]interface{}{"Filename": fileName}, err.Error())
}
return nil
}
// LoadConfig will try to search around for the corresponding config file.
// It will search /tmp/fileName then attempt ./config/fileName,
// then ../config/fileName and last it will look at fileName
func LoadConfig(fileName string) {
fileName = FindConfigFile(fileName)
file, err := os.Open(fileName)
if err != nil {
panic(T("utils.config.load_config.opening.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
}
decoder := json.NewDecoder(file)
config := model.Config{}
err = decoder.Decode(&config)
if err != nil {
panic(T("utils.config.load_config.decoding.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
}
if info, err := file.Stat(); err != nil {
panic(T("utils.config.load_config.getting.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
} else {
CfgLastModified = info.ModTime().Unix()
CfgFileName = fileName
}
config.SetDefaults()
if err := config.IsValid(); err != nil {
panic(T("utils.config.load_config.validating.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Message}))
}
if err := ValidateLdapFilter(&config); err != nil {
panic(T("utils.config.load_config.validating.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Message}))
}
configureLog(&config.LogSettings)
TestConnection(&config)
if config.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
dir := config.FileSettings.Directory
if len(dir) > 0 && dir[len(dir)-1:] != "/" {
config.FileSettings.Directory += "/"
}
}
Cfg = &config
ClientCfg = getClientConfig(Cfg)
}
func getClientConfig(c *model.Config) map[string]string {
props := make(map[string]string)
props["Version"] = model.CurrentVersion
props["BuildNumber"] = model.BuildNumber
props["BuildDate"] = model.BuildDate
props["BuildHash"] = model.BuildHash
props["BuildEnterpriseReady"] = model.BuildEnterpriseReady
props["SiteName"] = c.TeamSettings.SiteName
props["EnableTeamCreation"] = strconv.FormatBool(c.TeamSettings.EnableTeamCreation)
props["EnableUserCreation"] = strconv.FormatBool(c.TeamSettings.EnableUserCreation)
props["EnableOpenServer"] = strconv.FormatBool(*c.TeamSettings.EnableOpenServer)
props["RestrictTeamNames"] = strconv.FormatBool(*c.TeamSettings.RestrictTeamNames)
props["RestrictDirectMessage"] = *c.TeamSettings.RestrictDirectMessage
props["EnableOAuthServiceProvider"] = strconv.FormatBool(c.ServiceSettings.EnableOAuthServiceProvider)
props["SegmentDeveloperKey"] = c.ServiceSettings.SegmentDeveloperKey
props["GoogleDeveloperKey"] = c.ServiceSettings.GoogleDeveloperKey
props["EnableIncomingWebhooks"] = strconv.FormatBool(c.ServiceSettings.EnableIncomingWebhooks)
props["EnableOutgoingWebhooks"] = strconv.FormatBool(c.ServiceSettings.EnableOutgoingWebhooks)
props["EnableCommands"] = strconv.FormatBool(*c.ServiceSettings.EnableCommands)
props["EnableOnlyAdminIntegrations"] = strconv.FormatBool(*c.ServiceSettings.EnableOnlyAdminIntegrations)
props["EnablePostUsernameOverride"] = strconv.FormatBool(c.ServiceSettings.EnablePostUsernameOverride)
props["EnablePostIconOverride"] = strconv.FormatBool(c.ServiceSettings.EnablePostIconOverride)
props["EnableDeveloper"] = strconv.FormatBool(*c.ServiceSettings.EnableDeveloper)
props["SendEmailNotifications"] = strconv.FormatBool(c.EmailSettings.SendEmailNotifications)
props["EnableSignUpWithEmail"] = strconv.FormatBool(c.EmailSettings.EnableSignUpWithEmail)
props["EnableSignInWithEmail"] = strconv.FormatBool(*c.EmailSettings.EnableSignInWithEmail)
props["EnableSignInWithUsername"] = strconv.FormatBool(*c.EmailSettings.EnableSignInWithUsername)
props["RequireEmailVerification"] = strconv.FormatBool(c.EmailSettings.RequireEmailVerification)
props["FeedbackEmail"] = c.EmailSettings.FeedbackEmail
props["EnableSignUpWithGitLab"] = strconv.FormatBool(c.GitLabSettings.Enable)
props["EnableSignUpWithGoogle"] = strconv.FormatBool(c.GoogleSettings.Enable)
props["ShowEmailAddress"] = strconv.FormatBool(c.PrivacySettings.ShowEmailAddress)
props["TermsOfServiceLink"] = *c.SupportSettings.TermsOfServiceLink
props["PrivacyPolicyLink"] = *c.SupportSettings.PrivacyPolicyLink
props["AboutLink"] = *c.SupportSettings.AboutLink
props["HelpLink"] = *c.SupportSettings.HelpLink
props["ReportAProblemLink"] = *c.SupportSettings.ReportAProblemLink
props["SupportEmail"] = *c.SupportSettings.SupportEmail
props["EnablePublicLink"] = strconv.FormatBool(c.FileSettings.EnablePublicLink)
props["ProfileHeight"] = fmt.Sprintf("%v", c.FileSettings.ProfileHeight)
props["ProfileWidth"] = fmt.Sprintf("%v", c.FileSettings.ProfileWidth)
props["WebsocketPort"] = fmt.Sprintf("%v", *c.ServiceSettings.WebsocketPort)
props["WebsocketSecurePort"] = fmt.Sprintf("%v", *c.ServiceSettings.WebsocketSecurePort)
props["AllowCorsFrom"] = *c.ServiceSettings.AllowCorsFrom
if IsLicensed {
if *License.Features.CustomBrand {
props["EnableCustomBrand"] = strconv.FormatBool(*c.TeamSettings.EnableCustomBrand)
props["CustomBrandText"] = *c.TeamSettings.CustomBrandText
}
if *License.Features.LDAP {
props["EnableLdap"] = strconv.FormatBool(*c.LdapSettings.Enable)
props["LdapLoginFieldName"] = *c.LdapSettings.LoginFieldName
props["NicknameAttributeSet"] = strconv.FormatBool(*c.LdapSettings.NicknameAttribute != "")
}
if *License.Features.MFA {
props["EnableMultifactorAuthentication"] = strconv.FormatBool(*c.ServiceSettings.EnableMultifactorAuthentication)
}
if *License.Features.Compliance {
props["EnableCompliance"] = strconv.FormatBool(*c.ComplianceSettings.Enable)
}
}
return props
}
func ValidateLdapFilter(cfg *model.Config) *model.AppError {
ldapInterface := einterfaces.GetLdapInterface()
if *cfg.LdapSettings.Enable && ldapInterface != nil && *cfg.LdapSettings.UserFilter != "" {
if err := ldapInterface.ValidateFilter(*cfg.LdapSettings.UserFilter); err != nil {
return err
}
}
return nil
}
func Desanitize(cfg *model.Config) {
if *cfg.LdapSettings.BindPassword == model.FAKE_SETTING {
*cfg.LdapSettings.BindPassword = *Cfg.LdapSettings.BindPassword
}
if cfg.FileSettings.PublicLinkSalt == model.FAKE_SETTING {
cfg.FileSettings.PublicLinkSalt = Cfg.FileSettings.PublicLinkSalt
}
if cfg.FileSettings.AmazonS3SecretAccessKey == model.FAKE_SETTING {
cfg.FileSettings.AmazonS3SecretAccessKey = Cfg.FileSettings.AmazonS3SecretAccessKey
}
if cfg.EmailSettings.InviteSalt == model.FAKE_SETTING {
cfg.EmailSettings.InviteSalt = Cfg.EmailSettings.InviteSalt
}
if cfg.EmailSettings.PasswordResetSalt == model.FAKE_SETTING {
cfg.EmailSettings.PasswordResetSalt = Cfg.EmailSettings.PasswordResetSalt
}
if cfg.EmailSettings.SMTPPassword == model.FAKE_SETTING {
cfg.EmailSettings.SMTPPassword = Cfg.EmailSettings.SMTPPassword
}
if cfg.GitLabSettings.Secret == model.FAKE_SETTING {
cfg.GitLabSettings.Secret = Cfg.GitLabSettings.Secret
}
if cfg.SqlSettings.DataSource == model.FAKE_SETTING {
cfg.SqlSettings.DataSource = Cfg.SqlSettings.DataSource
}
if cfg.SqlSettings.AtRestEncryptKey == model.FAKE_SETTING {
cfg.SqlSettings.AtRestEncryptKey = Cfg.SqlSettings.AtRestEncryptKey
}
for i := range cfg.SqlSettings.DataSourceReplicas {
cfg.SqlSettings.DataSourceReplicas[i] = Cfg.SqlSettings.DataSourceReplicas[i]
}
}
PLT-2972 Fix config Desanitaze (#3018)
// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package utils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
l4g "github.com/alecthomas/log4go"
"github.com/mattermost/platform/einterfaces"
"github.com/mattermost/platform/model"
)
const (
MODE_DEV = "dev"
MODE_BETA = "beta"
MODE_PROD = "prod"
LOG_ROTATE_SIZE = 10000
)
var Cfg *model.Config = &model.Config{}
var CfgDiagnosticId = ""
var CfgLastModified int64 = 0
var CfgFileName string = ""
var ClientCfg map[string]string = map[string]string{}
func FindConfigFile(fileName string) string {
if _, err := os.Stat("/tmp/" + fileName); err == nil {
fileName, _ = filepath.Abs("/tmp/" + fileName)
} else if _, err := os.Stat("./config/" + fileName); err == nil {
fileName, _ = filepath.Abs("./config/" + fileName)
} else if _, err := os.Stat("../config/" + fileName); err == nil {
fileName, _ = filepath.Abs("../config/" + fileName)
} else if _, err := os.Stat(fileName); err == nil {
fileName, _ = filepath.Abs(fileName)
}
return fileName
}
func FindDir(dir string) string {
fileName := "."
if _, err := os.Stat("./" + dir + "/"); err == nil {
fileName, _ = filepath.Abs("./" + dir + "/")
} else if _, err := os.Stat("../" + dir + "/"); err == nil {
fileName, _ = filepath.Abs("../" + dir + "/")
} else if _, err := os.Stat("/tmp/" + dir); err == nil {
fileName, _ = filepath.Abs("/tmp/" + dir)
}
return fileName + "/"
}
func DisableDebugLogForTest() {
l4g.Global["stdout"].Level = l4g.WARNING
}
func EnableDebugLogForTest() {
l4g.Global["stdout"].Level = l4g.DEBUG
}
func ConfigureCmdLineLog() {
ls := model.LogSettings{}
ls.EnableConsole = true
ls.ConsoleLevel = "WARN"
configureLog(&ls)
}
func configureLog(s *model.LogSettings) {
l4g.Close()
if s.EnableConsole {
level := l4g.DEBUG
if s.ConsoleLevel == "INFO" {
level = l4g.INFO
} else if s.ConsoleLevel == "WARN" {
level = l4g.WARNING
} else if s.ConsoleLevel == "ERROR" {
level = l4g.ERROR
}
lw := l4g.NewConsoleLogWriter()
lw.SetFormat("[%D %T] [%L] %M")
l4g.AddFilter("stdout", level, lw)
}
if s.EnableFile {
var fileFormat = s.FileFormat
if fileFormat == "" {
fileFormat = "[%D %T] [%L] %M"
}
level := l4g.DEBUG
if s.FileLevel == "INFO" {
level = l4g.INFO
} else if s.FileLevel == "WARN" {
level = l4g.WARNING
} else if s.FileLevel == "ERROR" {
level = l4g.ERROR
}
flw := l4g.NewFileLogWriter(GetLogFileLocation(s.FileLocation), false)
flw.SetFormat(fileFormat)
flw.SetRotate(true)
flw.SetRotateLines(LOG_ROTATE_SIZE)
l4g.AddFilter("file", level, flw)
}
}
func GetLogFileLocation(fileLocation string) string {
if fileLocation == "" {
return FindDir("logs") + "mattermost.log"
} else {
return fileLocation
}
}
func SaveConfig(fileName string, config *model.Config) *model.AppError {
b, err := json.MarshalIndent(config, "", " ")
if err != nil {
return model.NewLocAppError("SaveConfig", "utils.config.save_config.saving.app_error",
map[string]interface{}{"Filename": fileName}, err.Error())
}
err = ioutil.WriteFile(fileName, b, 0644)
if err != nil {
return model.NewLocAppError("SaveConfig", "utils.config.save_config.saving.app_error",
map[string]interface{}{"Filename": fileName}, err.Error())
}
return nil
}
// LoadConfig will try to search around for the corresponding config file.
// It will search /tmp/fileName then attempt ./config/fileName,
// then ../config/fileName and last it will look at fileName
func LoadConfig(fileName string) {
fileName = FindConfigFile(fileName)
file, err := os.Open(fileName)
if err != nil {
panic(T("utils.config.load_config.opening.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
}
decoder := json.NewDecoder(file)
config := model.Config{}
err = decoder.Decode(&config)
if err != nil {
panic(T("utils.config.load_config.decoding.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
}
if info, err := file.Stat(); err != nil {
panic(T("utils.config.load_config.getting.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Error()}))
} else {
CfgLastModified = info.ModTime().Unix()
CfgFileName = fileName
}
config.SetDefaults()
if err := config.IsValid(); err != nil {
panic(T("utils.config.load_config.validating.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Message}))
}
if err := ValidateLdapFilter(&config); err != nil {
panic(T("utils.config.load_config.validating.panic",
map[string]interface{}{"Filename": fileName, "Error": err.Message}))
}
configureLog(&config.LogSettings)
TestConnection(&config)
if config.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
dir := config.FileSettings.Directory
if len(dir) > 0 && dir[len(dir)-1:] != "/" {
config.FileSettings.Directory += "/"
}
}
Cfg = &config
ClientCfg = getClientConfig(Cfg)
}
func getClientConfig(c *model.Config) map[string]string {
props := make(map[string]string)
props["Version"] = model.CurrentVersion
props["BuildNumber"] = model.BuildNumber
props["BuildDate"] = model.BuildDate
props["BuildHash"] = model.BuildHash
props["BuildEnterpriseReady"] = model.BuildEnterpriseReady
props["SiteName"] = c.TeamSettings.SiteName
props["EnableTeamCreation"] = strconv.FormatBool(c.TeamSettings.EnableTeamCreation)
props["EnableUserCreation"] = strconv.FormatBool(c.TeamSettings.EnableUserCreation)
props["EnableOpenServer"] = strconv.FormatBool(*c.TeamSettings.EnableOpenServer)
props["RestrictTeamNames"] = strconv.FormatBool(*c.TeamSettings.RestrictTeamNames)
props["RestrictDirectMessage"] = *c.TeamSettings.RestrictDirectMessage
props["EnableOAuthServiceProvider"] = strconv.FormatBool(c.ServiceSettings.EnableOAuthServiceProvider)
props["SegmentDeveloperKey"] = c.ServiceSettings.SegmentDeveloperKey
props["GoogleDeveloperKey"] = c.ServiceSettings.GoogleDeveloperKey
props["EnableIncomingWebhooks"] = strconv.FormatBool(c.ServiceSettings.EnableIncomingWebhooks)
props["EnableOutgoingWebhooks"] = strconv.FormatBool(c.ServiceSettings.EnableOutgoingWebhooks)
props["EnableCommands"] = strconv.FormatBool(*c.ServiceSettings.EnableCommands)
props["EnableOnlyAdminIntegrations"] = strconv.FormatBool(*c.ServiceSettings.EnableOnlyAdminIntegrations)
props["EnablePostUsernameOverride"] = strconv.FormatBool(c.ServiceSettings.EnablePostUsernameOverride)
props["EnablePostIconOverride"] = strconv.FormatBool(c.ServiceSettings.EnablePostIconOverride)
props["EnableDeveloper"] = strconv.FormatBool(*c.ServiceSettings.EnableDeveloper)
props["SendEmailNotifications"] = strconv.FormatBool(c.EmailSettings.SendEmailNotifications)
props["EnableSignUpWithEmail"] = strconv.FormatBool(c.EmailSettings.EnableSignUpWithEmail)
props["EnableSignInWithEmail"] = strconv.FormatBool(*c.EmailSettings.EnableSignInWithEmail)
props["EnableSignInWithUsername"] = strconv.FormatBool(*c.EmailSettings.EnableSignInWithUsername)
props["RequireEmailVerification"] = strconv.FormatBool(c.EmailSettings.RequireEmailVerification)
props["FeedbackEmail"] = c.EmailSettings.FeedbackEmail
props["EnableSignUpWithGitLab"] = strconv.FormatBool(c.GitLabSettings.Enable)
props["EnableSignUpWithGoogle"] = strconv.FormatBool(c.GoogleSettings.Enable)
props["ShowEmailAddress"] = strconv.FormatBool(c.PrivacySettings.ShowEmailAddress)
props["TermsOfServiceLink"] = *c.SupportSettings.TermsOfServiceLink
props["PrivacyPolicyLink"] = *c.SupportSettings.PrivacyPolicyLink
props["AboutLink"] = *c.SupportSettings.AboutLink
props["HelpLink"] = *c.SupportSettings.HelpLink
props["ReportAProblemLink"] = *c.SupportSettings.ReportAProblemLink
props["SupportEmail"] = *c.SupportSettings.SupportEmail
props["EnablePublicLink"] = strconv.FormatBool(c.FileSettings.EnablePublicLink)
props["ProfileHeight"] = fmt.Sprintf("%v", c.FileSettings.ProfileHeight)
props["ProfileWidth"] = fmt.Sprintf("%v", c.FileSettings.ProfileWidth)
props["WebsocketPort"] = fmt.Sprintf("%v", *c.ServiceSettings.WebsocketPort)
props["WebsocketSecurePort"] = fmt.Sprintf("%v", *c.ServiceSettings.WebsocketSecurePort)
props["AllowCorsFrom"] = *c.ServiceSettings.AllowCorsFrom
if IsLicensed {
if *License.Features.CustomBrand {
props["EnableCustomBrand"] = strconv.FormatBool(*c.TeamSettings.EnableCustomBrand)
props["CustomBrandText"] = *c.TeamSettings.CustomBrandText
}
if *License.Features.LDAP {
props["EnableLdap"] = strconv.FormatBool(*c.LdapSettings.Enable)
props["LdapLoginFieldName"] = *c.LdapSettings.LoginFieldName
props["NicknameAttributeSet"] = strconv.FormatBool(*c.LdapSettings.NicknameAttribute != "")
}
if *License.Features.MFA {
props["EnableMultifactorAuthentication"] = strconv.FormatBool(*c.ServiceSettings.EnableMultifactorAuthentication)
}
if *License.Features.Compliance {
props["EnableCompliance"] = strconv.FormatBool(*c.ComplianceSettings.Enable)
}
}
return props
}
func ValidateLdapFilter(cfg *model.Config) *model.AppError {
ldapInterface := einterfaces.GetLdapInterface()
if *cfg.LdapSettings.Enable && ldapInterface != nil && *cfg.LdapSettings.UserFilter != "" {
if err := ldapInterface.ValidateFilter(*cfg.LdapSettings.UserFilter); err != nil {
return err
}
}
return nil
}
func Desanitize(cfg *model.Config) {
if cfg.LdapSettings.BindPassword != nil && *cfg.LdapSettings.BindPassword == model.FAKE_SETTING {
*cfg.LdapSettings.BindPassword = *Cfg.LdapSettings.BindPassword
}
if cfg.FileSettings.PublicLinkSalt == model.FAKE_SETTING {
cfg.FileSettings.PublicLinkSalt = Cfg.FileSettings.PublicLinkSalt
}
if cfg.FileSettings.AmazonS3SecretAccessKey == model.FAKE_SETTING {
cfg.FileSettings.AmazonS3SecretAccessKey = Cfg.FileSettings.AmazonS3SecretAccessKey
}
if cfg.EmailSettings.InviteSalt == model.FAKE_SETTING {
cfg.EmailSettings.InviteSalt = Cfg.EmailSettings.InviteSalt
}
if cfg.EmailSettings.PasswordResetSalt == model.FAKE_SETTING {
cfg.EmailSettings.PasswordResetSalt = Cfg.EmailSettings.PasswordResetSalt
}
if cfg.EmailSettings.SMTPPassword == model.FAKE_SETTING {
cfg.EmailSettings.SMTPPassword = Cfg.EmailSettings.SMTPPassword
}
if cfg.GitLabSettings.Secret == model.FAKE_SETTING {
cfg.GitLabSettings.Secret = Cfg.GitLabSettings.Secret
}
if cfg.SqlSettings.DataSource == model.FAKE_SETTING {
cfg.SqlSettings.DataSource = Cfg.SqlSettings.DataSource
}
if cfg.SqlSettings.AtRestEncryptKey == model.FAKE_SETTING {
cfg.SqlSettings.AtRestEncryptKey = Cfg.SqlSettings.AtRestEncryptKey
}
for i := range cfg.SqlSettings.DataSourceReplicas {
cfg.SqlSettings.DataSourceReplicas[i] = Cfg.SqlSettings.DataSourceReplicas[i]
}
}
|
package scrape
// The following code was sourced and modified from the
// https://github.com/andrew-d/goscrape package governed by MIT license.
//TODO: add paginator to details
import (
"errors"
"fmt"
"io"
"regexp"
"strings"
"time"
"github.com/slotix/dataflowkit/splash"
"github.com/sirupsen/logrus"
"github.com/PuerkitoBio/goquery"
"github.com/segmentio/ksuid"
"github.com/slotix/dataflowkit/errs"
"github.com/slotix/dataflowkit/extract"
"github.com/slotix/dataflowkit/fetch"
"github.com/slotix/dataflowkit/logger"
"github.com/slotix/dataflowkit/paginate"
"github.com/slotix/dataflowkit/utils"
"github.com/spf13/viper"
"github.com/temoto/robotstxt"
)
var logger *logrus.Logger
func init() {
logger = log.NewLogger(true)
}
// NewTask creates new task to parse fetched page following the rules from Payload.
func NewTask(p Payload) *Task {
//https://blog.kowalczyk.info/article/JyRZ/generating-good-random-and-unique-ids-in-go.html
id := ksuid.New()
//tQueue := make(chan *Scraper, 100)
return &Task{
ID: id.String(),
Payload: p,
Visited: make(map[string]error),
Robots: make(map[string]*robotstxt.RobotsData),
}
}
// Parse processes specified task which parses fetched page.
func (task *Task) Parse() (io.ReadCloser, error) {
scraper, err := task.Payload.newScraper()
if err != nil {
return nil, err
}
//scrape request and return results.
results, err := task.scrape(scraper)
if err != nil {
if task.Payload.Request.Type() == "splash" {
return nil, err
}
logger.Error(err)
task.Payload.FetcherType = "splash"
request, err := task.Payload.initRequest()
if err != nil {
return nil, err
}
task.Payload.Request = request
scraper.Request = request
results, err = task.scrape(scraper)
if err != nil {
return nil, err
}
}
//logger.Info(task.Visited)
var e encoder
switch strings.ToLower(task.Payload.Format) {
case "csv":
e = CSVEncoder{
comma: ",",
partNames: scraper.partNames(),
}
case "json":
e = JSONEncoder{
paginateResults: *task.Payload.PaginateResults,
}
case "xml":
e = XMLEncoder{}
default:
return nil, errors.New("invalid output format specified")
}
//logger.Info(results)
r, err := e.Encode(results)
if err != nil {
return nil, err
}
return r, err
}
// Create a new scraper with the provided configuration.
func (p Payload) newScraper() (*Scraper, error) {
parts, err := p.fields2parts()
if err != nil {
return nil, err
}
var paginator paginate.Paginator
if p.Paginator == nil {
paginator = &dummyPaginator{}
} else {
paginator = paginate.BySelector(p.Paginator.Selector, p.Paginator.Attribute)
}
selectors, err := p.selectors()
if err != nil {
return nil, err
}
//TODO: need to test the case when there are no selectors found in payload.
var dividePageFunc DividePageFunc
if len(selectors) == 0 {
dividePageFunc = DividePageBySelector("body")
} else {
dividePageFunc = DividePageByIntersection(selectors)
}
scraper := &Scraper{
Request: p.Request,
DividePage: dividePageFunc,
Parts: parts,
Paginator: paginator,
}
// All set!
return scraper, nil
}
//fields2parts converts payload []field to []scrape.Part
func (p Payload) fields2parts() ([]Part, error) {
parts := []Part{}
//Payload fields
for _, f := range p.Fields {
params := make(map[string]interface{})
if f.Extractor.Params != nil {
params = f.Extractor.Params
}
var err error
for _, t := range f.Extractor.Types {
part := Part{
Name: f.Name + "_" + t,
Selector: f.Selector,
}
var e extract.Extractor
switch strings.ToLower(t) {
case "text":
e = &extract.Text{
Filters: f.Extractor.Filters,
}
case "href", "src":
e = &extract.Attr{
Attr: t,
//BaseURL: p.Request.URL,
}
scraper := &Scraper{}
//******* details
if f.Details != nil {
detailsPayload := p
detailsPayload.Name = f.Name + "Details"
detailsPayload.Fields = f.Details.Fields
detailsPayload.Paginator = f.Details.Paginator
//Request refers to srarting URL here. Requests will be changed in Scrape function to Details pages afterwards
scraper, err = detailsPayload.newScraper()
if err != nil {
return nil, err
}
} else {
scraper = nil
}
part.Details = scraper
case "alt":
e = &extract.Attr{
Attr: t,
Filters: f.Extractor.Filters,
}
case "width", "height":
e = &extract.Attr{Attr: t}
case "regex":
r := &extract.Regex{}
regExp := params["regexp"]
r.Regex = regexp.MustCompile(regExp.(string))
//it is obligatory parameter and we don't need to add it again in further fillStruct() func. So we can delete it here
delete(params, "regexp")
e = r
case "const":
// c := &extract.Const{Val: params["value"]}
// e = c
e = &extract.Const{}
case "count":
e = &extract.Count{}
case "html":
e = &extract.Html{}
case "outerHtml":
e = &extract.OuterHtml{}
default:
logger.Error(errors.New(t + ": Unknown selector type"))
continue
}
part.Extractor = e
if params != nil {
err := fillStruct(params, e)
if err != nil {
logger.Error(err)
}
}
//logger.Info(e)
parts = append(parts, part)
}
}
// Validate payload fields
if len(parts) == 0 {
return nil, &errs.BadPayload{errs.ErrNoParts}
}
seenNames := map[string]struct{}{}
for i, part := range parts {
if len(part.Name) == 0 {
return nil, fmt.Errorf("no name provided for part %d", i)
}
if _, seen := seenNames[part.Name]; seen {
return nil, fmt.Errorf("part %s has a duplicate name", part.Name)
}
seenNames[part.Name] = struct{}{}
if len(part.Selector) == 0 {
return nil, fmt.Errorf("no selector provided for part %d", i)
}
}
return parts, nil
}
// scrape is a core function which follows the rules listed in task payload, processes all pages/ details pages. It stores parsed results to Task.Results
func (t *Task) scrape(scraper *Scraper) (*Results, error) {
//logger.Info(time.Now())
output := [][]map[string]interface{}{}
req := scraper.Request
//req := t.Payload.Request
url := req.GetURL()
var numPages int
//get Robotstxt Data
host, err := req.Host()
if err != nil {
t.Visited[url] = err
logger.Error(err)
//return err
}
if _, ok := t.Robots[host]; !ok {
robots, err := fetch.RobotstxtData(url)
if err != nil {
robotsURL, err1 := fetch.AssembleRobotstxtURL(url)
if err1 != nil {
return nil, err1
}
t.Visited[url] = err
logger.WithFields(
logrus.Fields{
"err": err,
}).Warn("Robots.txt URL: ", robotsURL)
//logger.Warning(err)
//return err
}
t.Robots[host] = robots
}
for {
results := []map[string]interface{}{}
//check if scraping of current url is not forbidden
if !fetch.AllowedByRobots(url, t.Robots[host]) {
t.Visited[url] = &errs.ForbiddenByRobots{url}
}
// Repeat until we don't have any more URLs, or until we hit our page limit.
if len(url) == 0 ||
(t.Payload.Paginator != nil && (t.Payload.Paginator.MaxPages > 0 && numPages >= t.Payload.Paginator.MaxPages)) {
break
}
//call remote fetcher to download web page
content, err := fetchContent(req)
if err != nil {
return nil, err
}
// Create a goquery document.
doc, err := goquery.NewDocumentFromReader(content)
if err != nil {
return nil, err
}
t.Visited[url] = nil
// Divide this page into blocks
for _, block := range scraper.DividePage(doc.Selection) {
blockResults := map[string]interface{}{}
// Process each part of this block
for _, part := range scraper.Parts {
sel := block
if part.Selector != "." {
sel = sel.Find(part.Selector)
}
//update base URL to reflect attr relative URL change
//fmt.Println(reflect.TypeOf(part.Extractor))
switch part.Extractor.(type) {
case *extract.Attr:
// if part.Extractor.GetType() == "attr" {
attr := part.Extractor.(*extract.Attr)
if attr.Attr == "href" || attr.Attr == "src" {
attr.BaseURL = url
}
// }
}
extractedPartResults, err := part.Extractor.Extract(sel)
if err != nil {
return nil, err
}
// A nil response from an extractor means that we don't even include it in
// the results.
if extractedPartResults == nil {
continue
}
blockResults[part.Name] = extractedPartResults
//********* details
//part.Details = nil
if part.Details != nil {
var requests []fetch.FetchRequester
switch extractedPartResults.(type) {
case string:
var rq fetch.FetchRequester
switch t.Payload.Request.Type() {
case "base":
rq = &fetch.BaseFetcherRequest{URL: extractedPartResults.(string)}
case "splash":
rq = &splash.Request{URL: extractedPartResults.(string)}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
requests = append(requests, rq)
case []string:
for _, r := range extractedPartResults.([]string) {
var rq fetch.FetchRequester
switch t.Payload.Request.Type() {
case "base":
rq = &fetch.BaseFetcherRequest{URL: r}
case "splash":
rq = &splash.Request{URL: r}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
requests = append(requests, rq)
}
}
for _, r := range requests {
//part.Details.Request = splash.Request{
// URL: extractedPartResults.(string),
//}
part.Details.Request = r
//check if domain is the same for initial URL and details' URLs
//If original host is the same as details' host sleep for some time before fetching of details page to avoid ban and other sanctions
detailsHost, err := part.Details.Request.Host()
if err != nil {
logger.Error(err)
}
if detailsHost == host {
if !viper.GetBool("IGNORE_FETCH_DELAY") {
if *t.Payload.RandomizeFetchDelay {
//Sleep for time equal to FetchDelay * random value between 500 and 1500 msec
rand := utils.Random(500, 1500)
delay := *t.Payload.FetchDelay * time.Duration(rand) / 1000
logger.Infof("%s -> %v", delay, part.Details.Request.GetURL())
time.Sleep(delay)
} else {
time.Sleep(*t.Payload.FetchDelay)
}
}
}
resDetails, err := t.scrape(part.Details)
if err != nil {
return nil, err
}
blockResults[part.Name+"_details"] = resDetails.AllBlocks()
}
}
//********* end details
}
if len(blockResults) > 0 {
// Append the results from this block.
results = append(results, blockResults)
}
}
if len(results) != 0 {
output = append(output, results)
}
numPages++
// Get the next page. If empty URL is returned there is no Next Pages to proceed.
if t.Payload.Paginator.InfiniteScroll {
url = ""
}
if t.Payload.Paginator != nil && !t.Payload.Paginator.InfiniteScroll {
url, err = scraper.Paginator.NextPage(url, doc.Selection)
if err != nil {
return nil, err
}
}
if url != "" {
var rq fetch.FetchRequester
switch req.Type() {
case "splash":
rq = &splash.Request{URL: url}
case "base":
rq = &fetch.BaseFetcherRequest{URL: url}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
req = rq
if !viper.GetBool("IGNORE_FETCH_DELAY") {
if *t.Payload.RandomizeFetchDelay {
//Sleep for time equal to FetchDelay * random value between 500 and 1500 msec
rand := utils.Random(500, 1500)
delay := *t.Payload.FetchDelay * time.Duration(rand) / 1000
logger.Infof("%s -> %v", delay, req.GetURL())
time.Sleep(delay)
} else {
time.Sleep(*t.Payload.FetchDelay)
}
}
}
}
if len(output) == 0 {
return nil, &errs.BadPayload{errs.ErrEmptyResults}
}
// All good!
return &Results{output}, err
}
//selectors returns selectors from payload
func (p Payload) selectors() ([]string, error) {
selectors := []string{}
for _, f := range p.Fields {
if f.Selector != "" {
selectors = append(selectors, f.Selector)
}
}
if len(selectors) == 0 {
return nil, &errs.BadPayload{errs.ErrNoSelectors}
}
return selectors, nil
}
//response sends request to fetch service and returns fetch.FetchResponser
func fetchContent(req fetch.FetchRequester) (io.ReadCloser, error) {
svc, err := fetch.NewHTTPClient(viper.GetString("DFK_FETCH") /*, gklog.NewNopLogger()*/)
if err != nil {
logger.Error(err)
}
resp, err := svc.Response(req)
if err != nil {
logger.Error(err)
return nil, err
}
return resp.GetHTML()
}
//partNames returns Part Names which are used as a header of output CSV
func (s Scraper) partNames() []string {
names := []string{}
for _, part := range s.Parts {
names = append(names, part.Name)
}
return names
}
// First returns the first set of results - i.e. the results from the first
// block on the first page.
// This function can return nil if there were no blocks found on the first page
// of the scrape.
func (r *Results) First() map[string]interface{} {
if len(r.Output[0]) == 0 {
return nil
}
return r.Output[0][0]
}
// AllBlocks returns a single list of results from every block on all pages.
// This function will always return a list, even if no blocks were found.
func (r *Results) AllBlocks() []map[string]interface{} {
ret := []map[string]interface{}{}
for _, page := range r.Output {
for _, block := range page {
ret = append(ret, block)
}
}
return ret
}
//KSUID stores the timestamp portion in ID. So we can retrieve it from Task object as a Time object
func (t Task) startTime() (*time.Time, error) {
id, err := ksuid.Parse(t.ID)
if err != nil {
return nil, err
}
idTime := id.Time()
return &idTime, nil
}
fix paginator
package scrape
// The following code was sourced and modified from the
// https://github.com/andrew-d/goscrape package governed by MIT license.
//TODO: add paginator to details
import (
"errors"
"fmt"
"io"
"regexp"
"strings"
"time"
"github.com/slotix/dataflowkit/splash"
"github.com/sirupsen/logrus"
"github.com/PuerkitoBio/goquery"
"github.com/segmentio/ksuid"
"github.com/slotix/dataflowkit/errs"
"github.com/slotix/dataflowkit/extract"
"github.com/slotix/dataflowkit/fetch"
"github.com/slotix/dataflowkit/logger"
"github.com/slotix/dataflowkit/paginate"
"github.com/slotix/dataflowkit/utils"
"github.com/spf13/viper"
"github.com/temoto/robotstxt"
)
var logger *logrus.Logger
func init() {
logger = log.NewLogger(true)
}
// NewTask creates new task to parse fetched page following the rules from Payload.
func NewTask(p Payload) *Task {
//https://blog.kowalczyk.info/article/JyRZ/generating-good-random-and-unique-ids-in-go.html
id := ksuid.New()
//tQueue := make(chan *Scraper, 100)
return &Task{
ID: id.String(),
Payload: p,
Visited: make(map[string]error),
Robots: make(map[string]*robotstxt.RobotsData),
}
}
// Parse processes specified task which parses fetched page.
func (task *Task) Parse() (io.ReadCloser, error) {
scraper, err := task.Payload.newScraper()
if err != nil {
return nil, err
}
//scrape request and return results.
results, err := task.scrape(scraper)
if err != nil {
if task.Payload.Request.Type() == "splash" {
return nil, err
}
logger.Error(err)
task.Payload.FetcherType = "splash"
request, err := task.Payload.initRequest()
if err != nil {
return nil, err
}
task.Payload.Request = request
scraper.Request = request
results, err = task.scrape(scraper)
if err != nil {
return nil, err
}
}
//logger.Info(task.Visited)
var e encoder
switch strings.ToLower(task.Payload.Format) {
case "csv":
e = CSVEncoder{
comma: ",",
partNames: scraper.partNames(),
}
case "json":
e = JSONEncoder{
paginateResults: *task.Payload.PaginateResults,
}
case "xml":
e = XMLEncoder{}
default:
return nil, errors.New("invalid output format specified")
}
//logger.Info(results)
r, err := e.Encode(results)
if err != nil {
return nil, err
}
return r, err
}
// Create a new scraper with the provided configuration.
func (p Payload) newScraper() (*Scraper, error) {
parts, err := p.fields2parts()
if err != nil {
return nil, err
}
var paginator paginate.Paginator
if p.Paginator == nil {
paginator = &dummyPaginator{}
} else {
paginator = paginate.BySelector(p.Paginator.Selector, p.Paginator.Attribute)
}
selectors, err := p.selectors()
if err != nil {
return nil, err
}
//TODO: need to test the case when there are no selectors found in payload.
var dividePageFunc DividePageFunc
if len(selectors) == 0 {
dividePageFunc = DividePageBySelector("body")
} else {
dividePageFunc = DividePageByIntersection(selectors)
}
scraper := &Scraper{
Request: p.Request,
DividePage: dividePageFunc,
Parts: parts,
Paginator: paginator,
}
// All set!
return scraper, nil
}
//fields2parts converts payload []field to []scrape.Part
func (p Payload) fields2parts() ([]Part, error) {
parts := []Part{}
//Payload fields
for _, f := range p.Fields {
params := make(map[string]interface{})
if f.Extractor.Params != nil {
params = f.Extractor.Params
}
var err error
for _, t := range f.Extractor.Types {
part := Part{
Name: f.Name + "_" + t,
Selector: f.Selector,
}
var e extract.Extractor
switch strings.ToLower(t) {
case "text":
e = &extract.Text{
Filters: f.Extractor.Filters,
}
case "href", "src":
e = &extract.Attr{
Attr: t,
//BaseURL: p.Request.URL,
}
scraper := &Scraper{}
//******* details
if f.Details != nil {
detailsPayload := p
detailsPayload.Name = f.Name + "Details"
detailsPayload.Fields = f.Details.Fields
detailsPayload.Paginator = f.Details.Paginator
//Request refers to srarting URL here. Requests will be changed in Scrape function to Details pages afterwards
scraper, err = detailsPayload.newScraper()
if err != nil {
return nil, err
}
} else {
scraper = nil
}
part.Details = scraper
case "alt":
e = &extract.Attr{
Attr: t,
Filters: f.Extractor.Filters,
}
case "width", "height":
e = &extract.Attr{Attr: t}
case "regex":
r := &extract.Regex{}
regExp := params["regexp"]
r.Regex = regexp.MustCompile(regExp.(string))
//it is obligatory parameter and we don't need to add it again in further fillStruct() func. So we can delete it here
delete(params, "regexp")
e = r
case "const":
// c := &extract.Const{Val: params["value"]}
// e = c
e = &extract.Const{}
case "count":
e = &extract.Count{}
case "html":
e = &extract.Html{}
case "outerHtml":
e = &extract.OuterHtml{}
default:
logger.Error(errors.New(t + ": Unknown selector type"))
continue
}
part.Extractor = e
if params != nil {
err := fillStruct(params, e)
if err != nil {
logger.Error(err)
}
}
//logger.Info(e)
parts = append(parts, part)
}
}
// Validate payload fields
if len(parts) == 0 {
return nil, &errs.BadPayload{errs.ErrNoParts}
}
seenNames := map[string]struct{}{}
for i, part := range parts {
if len(part.Name) == 0 {
return nil, fmt.Errorf("no name provided for part %d", i)
}
if _, seen := seenNames[part.Name]; seen {
return nil, fmt.Errorf("part %s has a duplicate name", part.Name)
}
seenNames[part.Name] = struct{}{}
if len(part.Selector) == 0 {
return nil, fmt.Errorf("no selector provided for part %d", i)
}
}
return parts, nil
}
// scrape is a core function which follows the rules listed in task payload, processes all pages/ details pages. It stores parsed results to Task.Results
func (t *Task) scrape(scraper *Scraper) (*Results, error) {
//logger.Info(time.Now())
output := [][]map[string]interface{}{}
req := scraper.Request
//req := t.Payload.Request
url := req.GetURL()
var numPages int
//get Robotstxt Data
host, err := req.Host()
if err != nil {
t.Visited[url] = err
logger.Error(err)
//return err
}
if _, ok := t.Robots[host]; !ok {
robots, err := fetch.RobotstxtData(url)
if err != nil {
robotsURL, err1 := fetch.AssembleRobotstxtURL(url)
if err1 != nil {
return nil, err1
}
t.Visited[url] = err
logger.WithFields(
logrus.Fields{
"err": err,
}).Warn("Robots.txt URL: ", robotsURL)
//logger.Warning(err)
//return err
}
t.Robots[host] = robots
}
for {
results := []map[string]interface{}{}
//check if scraping of current url is not forbidden
if !fetch.AllowedByRobots(url, t.Robots[host]) {
t.Visited[url] = &errs.ForbiddenByRobots{url}
}
// Repeat until we don't have any more URLs, or until we hit our page limit.
if len(url) == 0 ||
(t.Payload.Paginator != nil && (t.Payload.Paginator.MaxPages > 0 && numPages >= t.Payload.Paginator.MaxPages)) {
break
}
//call remote fetcher to download web page
content, err := fetchContent(req)
if err != nil {
return nil, err
}
// Create a goquery document.
doc, err := goquery.NewDocumentFromReader(content)
if err != nil {
return nil, err
}
t.Visited[url] = nil
// Divide this page into blocks
for _, block := range scraper.DividePage(doc.Selection) {
blockResults := map[string]interface{}{}
// Process each part of this block
for _, part := range scraper.Parts {
sel := block
if part.Selector != "." {
sel = sel.Find(part.Selector)
}
//update base URL to reflect attr relative URL change
//fmt.Println(reflect.TypeOf(part.Extractor))
switch part.Extractor.(type) {
case *extract.Attr:
// if part.Extractor.GetType() == "attr" {
attr := part.Extractor.(*extract.Attr)
if attr.Attr == "href" || attr.Attr == "src" {
attr.BaseURL = url
}
// }
}
extractedPartResults, err := part.Extractor.Extract(sel)
if err != nil {
return nil, err
}
// A nil response from an extractor means that we don't even include it in
// the results.
if extractedPartResults == nil {
continue
}
blockResults[part.Name] = extractedPartResults
//********* details
//part.Details = nil
if part.Details != nil {
var requests []fetch.FetchRequester
switch extractedPartResults.(type) {
case string:
var rq fetch.FetchRequester
switch t.Payload.Request.Type() {
case "base":
rq = &fetch.BaseFetcherRequest{URL: extractedPartResults.(string)}
case "splash":
rq = &splash.Request{URL: extractedPartResults.(string)}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
requests = append(requests, rq)
case []string:
for _, r := range extractedPartResults.([]string) {
var rq fetch.FetchRequester
switch t.Payload.Request.Type() {
case "base":
rq = &fetch.BaseFetcherRequest{URL: r}
case "splash":
rq = &splash.Request{URL: r}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
requests = append(requests, rq)
}
}
for _, r := range requests {
//part.Details.Request = splash.Request{
// URL: extractedPartResults.(string),
//}
part.Details.Request = r
//check if domain is the same for initial URL and details' URLs
//If original host is the same as details' host sleep for some time before fetching of details page to avoid ban and other sanctions
detailsHost, err := part.Details.Request.Host()
if err != nil {
logger.Error(err)
}
if detailsHost == host {
if !viper.GetBool("IGNORE_FETCH_DELAY") {
if *t.Payload.RandomizeFetchDelay {
//Sleep for time equal to FetchDelay * random value between 500 and 1500 msec
rand := utils.Random(500, 1500)
delay := *t.Payload.FetchDelay * time.Duration(rand) / 1000
logger.Infof("%s -> %v", delay, part.Details.Request.GetURL())
time.Sleep(delay)
} else {
time.Sleep(*t.Payload.FetchDelay)
}
}
}
resDetails, err := t.scrape(part.Details)
if err != nil {
return nil, err
}
blockResults[part.Name+"_details"] = resDetails.AllBlocks()
}
}
//********* end details
}
if len(blockResults) > 0 {
// Append the results from this block.
results = append(results, blockResults)
}
}
if len(results) != 0 {
output = append(output, results)
}
numPages++
// Get the next page. If empty URL is returned there is no Next Pages to proceed.
if t.Payload.Paginator != nil {
if t.Payload.Paginator.InfiniteScroll {
url = ""
} else {
url, err = scraper.Paginator.NextPage(url, doc.Selection)
if err != nil {
return nil, err
}
}
}
if url != "" {
var rq fetch.FetchRequester
switch req.Type() {
case "splash":
rq = &splash.Request{URL: url}
case "base":
rq = &fetch.BaseFetcherRequest{URL: url}
default:
err := errors.New("invalid fetcher type specified")
logger.Error(err.Error())
return nil, err
}
req = rq
if !viper.GetBool("IGNORE_FETCH_DELAY") {
if *t.Payload.RandomizeFetchDelay {
//Sleep for time equal to FetchDelay * random value between 500 and 1500 msec
rand := utils.Random(500, 1500)
delay := *t.Payload.FetchDelay * time.Duration(rand) / 1000
logger.Infof("%s -> %v", delay, req.GetURL())
time.Sleep(delay)
} else {
time.Sleep(*t.Payload.FetchDelay)
}
}
}
}
if len(output) == 0 {
return nil, &errs.BadPayload{errs.ErrEmptyResults}
}
// All good!
return &Results{output}, err
}
//selectors returns selectors from payload
func (p Payload) selectors() ([]string, error) {
selectors := []string{}
for _, f := range p.Fields {
if f.Selector != "" {
selectors = append(selectors, f.Selector)
}
}
if len(selectors) == 0 {
return nil, &errs.BadPayload{errs.ErrNoSelectors}
}
return selectors, nil
}
//response sends request to fetch service and returns fetch.FetchResponser
func fetchContent(req fetch.FetchRequester) (io.ReadCloser, error) {
svc, err := fetch.NewHTTPClient(viper.GetString("DFK_FETCH") /*, gklog.NewNopLogger()*/)
if err != nil {
logger.Error(err)
}
resp, err := svc.Response(req)
if err != nil {
logger.Error(err)
return nil, err
}
return resp.GetHTML()
}
//partNames returns Part Names which are used as a header of output CSV
func (s Scraper) partNames() []string {
names := []string{}
for _, part := range s.Parts {
names = append(names, part.Name)
}
return names
}
// First returns the first set of results - i.e. the results from the first
// block on the first page.
// This function can return nil if there were no blocks found on the first page
// of the scrape.
func (r *Results) First() map[string]interface{} {
if len(r.Output[0]) == 0 {
return nil
}
return r.Output[0][0]
}
// AllBlocks returns a single list of results from every block on all pages.
// This function will always return a list, even if no blocks were found.
func (r *Results) AllBlocks() []map[string]interface{} {
ret := []map[string]interface{}{}
for _, page := range r.Output {
for _, block := range page {
ret = append(ret, block)
}
}
return ret
}
//KSUID stores the timestamp portion in ID. So we can retrieve it from Task object as a Time object
func (t Task) startTime() (*time.Time, error) {
id, err := ksuid.Parse(t.ID)
if err != nil {
return nil, err
}
idTime := id.Time()
return &idTime, nil
}
|
package postgres
import (
"database/sql"
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"time"
"regexp"
"github.com/lfq7413/tomato/errs"
"github.com/lfq7413/tomato/types"
"github.com/lfq7413/tomato/utils"
"github.com/lib/pq"
)
const postgresSchemaCollectionName = "_SCHEMA"
const postgresRelationDoesNotExistError = "42P01"
const postgresDuplicateRelationError = "42P07"
const postgresDuplicateColumnError = "42701"
const postgresDuplicateObjectError = "42710"
const postgresUniqueIndexViolationError = "23505"
const postgresTransactionAbortedError = "25P02"
// PostgresAdapter postgres 数据库适配器
type PostgresAdapter struct {
collectionPrefix string
collectionList []string
db *sql.DB
}
// NewPostgresAdapter ...
func NewPostgresAdapter(collectionPrefix string, db *sql.DB) *PostgresAdapter {
return &PostgresAdapter{
collectionPrefix: collectionPrefix,
collectionList: []string{},
db: db,
}
}
// ensureSchemaCollectionExists 确保 _SCHEMA 表存在,不存在则创建表
func (p *PostgresAdapter) ensureSchemaCollectionExists() error {
_, err := p.db.Exec(`CREATE TABLE IF NOT EXISTS "_SCHEMA" ( "className" varChar(120), "schema" jsonb, "isParseClass" bool, PRIMARY KEY ("className") )`)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError || e.Code == postgresUniqueIndexViolationError || e.Code == postgresDuplicateObjectError {
// _SCHEMA 表已经存在,已经由其他请求创建,忽略错误
return nil
}
} else {
return err
}
}
return nil
}
// ClassExists 检测数据库中是否存在指定类
func (p *PostgresAdapter) ClassExists(name string) bool {
var result bool
err := p.db.QueryRow(`SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = $1)`, name).Scan(&result)
if err != nil {
return false
}
return result
}
// SetClassLevelPermissions 设置类级别权限
func (p *PostgresAdapter) SetClassLevelPermissions(className string, CLPs types.M) error {
err := p.ensureSchemaCollectionExists()
if err != nil {
return err
}
if CLPs == nil {
CLPs = types.M{}
}
b, err := json.Marshal(CLPs)
if err != nil {
return err
}
qs := `UPDATE "_SCHEMA" SET "schema" = json_object_set_key("schema", $1::text, $2::jsonb) WHERE "className"=$3 `
_, err = p.db.Exec(qs, "classLevelPermissions", string(b), className)
if err != nil {
return err
}
return nil
}
// CreateClass 创建类
func (p *PostgresAdapter) CreateClass(className string, schema types.M) (types.M, error) {
if schema == nil {
schema = types.M{}
}
schema["className"] = className
b, err := json.Marshal(schema)
if err != nil {
return nil, err
}
err = p.createTable(className, schema)
if err != nil {
return nil, err
}
_, err = p.db.Exec(`INSERT INTO "_SCHEMA" ("className", "schema", "isParseClass") VALUES ($1, $2, $3)`, className, string(b), true)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresUniqueIndexViolationError {
return nil, errs.E(errs.DuplicateValue, "Class "+className+" already exists.")
}
}
return nil, err
}
return toParseSchema(schema), nil
}
// createTable 仅创建表,不加入 schema 中
func (p *PostgresAdapter) createTable(className string, schema types.M) error {
if schema == nil {
schema = types.M{}
}
valuesArray := types.S{}
patternsArray := []string{}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
if className == "_User" {
fields["_email_verify_token_expires_at"] = types.M{"type": "Date"}
fields["_email_verify_token"] = types.M{"type": "String"}
fields["_account_lockout_expires_at"] = types.M{"type": "Date"}
fields["_failed_login_count"] = types.M{"type": "Number"}
fields["_perishable_token"] = types.M{"type": "String"}
fields["_perishable_token_expires_at"] = types.M{"type": "Date"}
fields["_password_changed_at"] = types.M{"type": "Date"}
fields["_password_history"] = types.M{"type": "Array"}
}
relations := []string{}
for fieldName, t := range fields {
parseType := utils.M(t)
if parseType == nil {
parseType = types.M{}
}
if utils.S(parseType["type"]) == "Relation" {
relations = append(relations, fieldName)
continue
}
if fieldName == "_rperm" || fieldName == "_wperm" {
parseType["contents"] = types.M{"type": "String"}
}
valuesArray = append(valuesArray, fieldName)
postgresType, err := parseTypeToPostgresType(parseType)
if err != nil {
return err
}
valuesArray = append(valuesArray, postgresType)
patternsArray = append(patternsArray, `"%s" %s`)
if fieldName == "objectId" {
valuesArray = append(valuesArray, fieldName)
patternsArray = append(patternsArray, `PRIMARY KEY ("%s")`)
}
}
qs := `CREATE TABLE IF NOT EXISTS "%s" (` + strings.Join(patternsArray, ",") + `)`
values := append(types.S{className}, valuesArray...)
qs = fmt.Sprintf(qs, values...)
err := p.ensureSchemaCollectionExists()
if err != nil {
return err
}
_, err = p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError {
// 表已经存在,已经由其他请求创建,忽略错误
} else {
return err
}
} else {
return err
}
}
// 创建 relation 表
for _, fieldName := range relations {
name := fmt.Sprintf(`_Join:%s:%s`, fieldName, className)
_, err = p.db.Exec(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS "%s" ("relatedId" varChar(120), "owningId" varChar(120), PRIMARY KEY("relatedId", "owningId") )`, name))
if err != nil {
return err
}
}
return nil
}
// AddFieldIfNotExists 添加字段定义
func (p *PostgresAdapter) AddFieldIfNotExists(className, fieldName string, fieldType types.M) error {
if fieldType == nil {
fieldType = types.M{}
}
if utils.S(fieldType["type"]) != "Relation" {
tp, err := parseTypeToPostgresType(fieldType)
if err != nil {
return err
}
qs := fmt.Sprintf(`ALTER TABLE "%s" ADD COLUMN "%s" %s`, className, fieldName, tp)
_, err = p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresRelationDoesNotExistError {
// TODO 添加默认字段
_, ce := p.CreateClass(className, types.M{"fields": types.M{fieldName: fieldType}})
if ce != nil {
return ce
}
} else if e.Code == postgresDuplicateColumnError {
// Column 已经存在,由其他请求创建
} else {
return err
}
} else {
return err
}
}
} else {
name := fmt.Sprintf(`_Join:%s:%s`, fieldName, className)
qs := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS "%s" ("relatedId" varChar(120), "owningId" varChar(120), PRIMARY KEY("relatedId", "owningId") )`, name)
_, err := p.db.Exec(qs)
if err != nil {
return err
}
}
qs := `SELECT "schema" FROM "_SCHEMA" WHERE "className" = $1`
rows, err := p.db.Query(qs, className)
if err != nil {
return err
}
if rows.Next() {
var sch types.M
var v []byte
err := rows.Scan(&v)
if err != nil {
return err
}
err = json.Unmarshal(v, &sch)
if err != nil {
return err
}
if sch == nil {
sch = types.M{}
}
var fields types.M
if v := utils.M(sch["fields"]); v != nil {
fields = v
} else {
fields = types.M{}
}
if _, ok := fields[fieldName]; ok {
// 当表不存在时,会进行新建表,所以也会走到这里,不再处理错误
// Attempted to add a field that already exists
return nil
}
fields[fieldName] = fieldType
sch["fields"] = fields
b, err := json.Marshal(sch)
qs := `UPDATE "_SCHEMA" SET "schema"=$1 WHERE "className"=$2`
_, err = p.db.Exec(qs, b, className)
if err != nil {
return err
}
}
return nil
}
// DeleteClass 删除指定表
func (p *PostgresAdapter) DeleteClass(className string) (types.M, error) {
qs := fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, className)
_, err := p.db.Exec(qs)
if err != nil {
return nil, err
}
qs = `DELETE FROM "_SCHEMA" WHERE "className"=$1`
_, err = p.db.Exec(qs, className)
if err != nil {
return nil, err
}
return types.M{}, nil
}
// DeleteAllClasses 删除所有表,仅用于测试
func (p *PostgresAdapter) DeleteAllClasses() error {
qs := `SELECT "className","schema" FROM "_SCHEMA"`
rows, err := p.db.Query(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok && e.Code == postgresRelationDoesNotExistError {
// _SCHEMA 不存在,则不删除
return nil
}
return err
}
classNames := []string{}
schemas := []types.M{}
for rows.Next() {
var clsName string
var sch types.M
var v []byte
err := rows.Scan(&clsName, &v)
if err != nil {
return err
}
err = json.Unmarshal(v, &sch)
if err != nil {
return err
}
classNames = append(classNames, clsName)
schemas = append(schemas, sch)
}
joins := []string{}
for _, sch := range schemas {
joins = append(joins, joinTablesForSchema(sch)...)
}
classes := []string{"_SCHEMA", "_PushStatus", "_JobStatus", "_Hooks", "_GlobalConfig"}
classes = append(classes, classNames...)
classes = append(classes, joins...)
for _, name := range classes {
qs = fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, name)
p.db.Exec(qs)
}
return nil
}
// DeleteFields 删除字段
func (p *PostgresAdapter) DeleteFields(className string, schema types.M, fieldNames []string) error {
if schema == nil {
schema = types.M{}
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
fldNames := types.S{}
for _, fieldName := range fieldNames {
field := utils.M(fields[fieldName])
if field != nil && utils.S(field["type"]) == "Relation" {
// 不处理 Relation 类型字段
} else {
fldNames = append(fldNames, fieldName)
}
delete(fields, fieldName)
}
schema["fields"] = fields
values := append(types.S{className}, fldNames...)
columnArray := []string{}
for _ = range fldNames {
columnArray = append(columnArray, `"%s"`)
}
columns := strings.Join(columnArray, ", DROP COLUMN ")
b, err := json.Marshal(schema)
if err != nil {
return err
}
qs := `UPDATE "_SCHEMA" SET "schema"=$1 WHERE "className"=$2`
_, err = p.db.Exec(qs, b, className)
if err != nil {
return err
}
if len(values) > 1 {
qs = fmt.Sprintf(`ALTER TABLE "%%s" DROP COLUMN %s`, columns)
qs = fmt.Sprintf(qs, values...)
_, err = p.db.Exec(qs)
if err != nil {
return err
}
}
return nil
}
// CreateObject 创建对象
func (p *PostgresAdapter) CreateObject(className string, schema, object types.M) error {
columnsArray := []string{}
valuesArray := types.S{}
geoPoints := types.M{}
if schema == nil {
schema = types.M{}
}
if len(object) == 0 {
return nil
}
schema = toPostgresSchema(schema)
object = handleDotFields(object)
err := validateKeys(object)
if err != nil {
return err
}
// 预处理 authData 字段,避免在遍历 map 并向其添加元素时造成的不稳定性
for fieldName := range object {
re := regexp.MustCompile(`^_auth_data_([a-zA-Z0-9_]+)$`)
authDataMatch := re.FindStringSubmatch(fieldName)
if authDataMatch != nil && len(authDataMatch) == 2 {
provider := authDataMatch[1]
authData := utils.M(object["authData"])
if authData == nil {
authData = types.M{}
}
authData[provider] = object[fieldName]
delete(object, fieldName)
object["authData"] = authData
}
}
for fieldName := range object {
columnsArray = append(columnsArray, fieldName)
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
if fields[fieldName] == nil && className == "_User" {
if fieldName == "_email_verify_token" ||
fieldName == "_failed_login_count" ||
fieldName == "_perishable_token" {
valuesArray = append(valuesArray, object[fieldName])
}
if fieldName == "_password_history" {
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
valuesArray = append(valuesArray, b)
}
if fieldName == "_email_verify_token_expires_at" ||
fieldName == "_account_lockout_expires_at" ||
fieldName == "_perishable_token_expires_at" ||
fieldName == "_password_changed_at" {
if v := utils.M(object[fieldName]); v != nil && utils.S(v["iso"]) != "" {
valuesArray = append(valuesArray, v["iso"])
} else {
valuesArray = append(valuesArray, nil)
}
}
continue
}
tp := utils.M(fields[fieldName])
if tp == nil {
tp = types.M{}
}
switch utils.S(tp["type"]) {
case "Date":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["iso"]) != "" {
valuesArray = append(valuesArray, v["iso"])
} else {
valuesArray = append(valuesArray, nil)
}
case "Pointer":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["objectId"]) != "" {
valuesArray = append(valuesArray, v["objectId"])
} else {
valuesArray = append(valuesArray, "")
}
case "Array":
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
if fieldName == "_rperm" || fieldName == "_wperm" {
// '[' => '{'
if b[0] == 91 {
b[0] = 123
}
// ']' => '}'
if len(b) > 0 && b[len(b)-1] == 93 {
b[len(b)-1] = 125
}
}
valuesArray = append(valuesArray, b)
case "Object":
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
valuesArray = append(valuesArray, b)
case "String", "Number", "Boolean":
valuesArray = append(valuesArray, object[fieldName])
case "File":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["name"]) != "" {
valuesArray = append(valuesArray, v["name"])
} else {
valuesArray = append(valuesArray, "")
}
case "GeoPoint":
geoPoints[fieldName] = object[fieldName]
columnsArray = columnsArray[:len(columnsArray)-1]
default:
return errs.E(errs.OtherCause, "Type "+utils.S(tp["type"])+" not supported yet")
}
}
for key := range geoPoints {
columnsArray = append(columnsArray, key)
}
initialValues := []string{}
for index := range valuesArray {
termination := ""
fieldName := columnsArray[index]
if fieldName == "_rperm" || fieldName == "_wperm" {
termination = "::text[]"
} else {
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
tp := utils.M(fields[fieldName])
if tp == nil {
tp = types.M{}
}
if utils.S(tp["type"]) == "Array" {
termination = "::jsonb"
}
}
initialValues = append(initialValues, fmt.Sprintf(`$%d%s`, index+1, termination))
}
geoPointsInjects := []string{}
for _, v := range geoPoints {
value := utils.M(v)
if value == nil {
value = types.M{}
}
valuesArray = append(valuesArray, value["longitude"], value["latitude"])
l := len(valuesArray)
geoPointsInjects = append(geoPointsInjects, fmt.Sprintf(`POINT($%d, $%d)`, l-1, l))
}
columnsPatternArray := []string{}
for _, key := range columnsArray {
columnsPatternArray = append(columnsPatternArray, fmt.Sprintf(`"%s"`, key))
}
columnsPattern := strings.Join(columnsPatternArray, ",")
initialValues = append(initialValues, geoPointsInjects...)
valuesPattern := strings.Join(initialValues, ",")
qs := fmt.Sprintf(`INSERT INTO "%s" (%s) VALUES (%s)`, className, columnsPattern, valuesPattern)
_, err = p.db.Exec(qs, valuesArray...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresUniqueIndexViolationError {
return errs.E(errs.DuplicateValue, "A duplicate value for a field with unique values was provided")
}
}
return err
}
return nil
}
// GetAllClasses ...
func (p *PostgresAdapter) GetAllClasses() ([]types.M, error) {
err := p.ensureSchemaCollectionExists()
if err != nil {
return nil, err
}
qs := `SELECT "className","schema" FROM "_SCHEMA"`
rows, err := p.db.Query(qs)
if err != nil {
return nil, err
}
schemas := []types.M{}
for rows.Next() {
var clsName string
var sch types.M
var v []byte
err := rows.Scan(&clsName, &v)
if err != nil {
return nil, err
}
err = json.Unmarshal(v, &sch)
if err != nil {
return nil, err
}
sch["className"] = clsName
schemas = append(schemas, toParseSchema(sch))
}
return schemas, nil
}
// GetClass ...
func (p *PostgresAdapter) GetClass(className string) (types.M, error) {
err := p.ensureSchemaCollectionExists()
if err != nil {
return nil, err
}
qs := `SELECT "schema" FROM "_SCHEMA" WHERE "className"=$1`
rows, err := p.db.Query(qs, className)
if err != nil {
return nil, err
}
schema := types.M{}
if rows.Next() {
var v []byte
err = rows.Scan(&v)
if err != nil {
return nil, err
}
err = json.Unmarshal(v, &schema)
if err != nil {
return nil, err
}
} else {
return schema, nil
}
return toParseSchema(schema), nil
}
// DeleteObjectsByQuery 删除符合条件的所有对象
func (p *PostgresAdapter) DeleteObjectsByQuery(className string, schema, query types.M) error {
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return err
}
if len(query) == 0 {
where.pattern = "TRUE"
}
qs := fmt.Sprintf(`WITH deleted AS (DELETE FROM "%s" WHERE %s RETURNING *) SELECT count(*) FROM deleted`, className, where.pattern)
row := p.db.QueryRow(qs, where.values...)
var count int
err = row.Scan(&count)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return errs.E(errs.ObjectNotFound, "Object not found.")
}
}
return err
}
if count == 0 {
return errs.E(errs.ObjectNotFound, "Object not found.")
}
return nil
}
// Find ...
func (p *PostgresAdapter) Find(className string, schema, query, options types.M) ([]types.M, error) {
if schema == nil {
schema = types.M{}
}
if options == nil {
options = types.M{}
}
var hasLimit bool
var hasSkip bool
if _, ok := options["limit"]; ok {
hasLimit = true
}
if _, ok := options["skip"]; ok {
hasSkip = true
}
values := types.S{}
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return nil, err
}
values = append(values, where.values...)
var wherePattern string
var limitPattern string
var skipPattern string
if where.pattern != "" {
wherePattern = `WHERE ` + where.pattern
}
if hasLimit {
limitPattern = fmt.Sprintf(`LIMIT $%d`, len(values)+1)
values = append(values, options["limit"])
}
if hasSkip {
skipPattern = fmt.Sprintf(`OFFSET $%d`, len(values)+1)
values = append(values, options["skip"])
}
var sortPattern string
if _, ok := options["sort"]; ok {
if keys, ok := options["sort"].([]string); ok {
postgresSort := []string{}
for _, key := range keys {
var postgresKey string
if strings.HasPrefix(key, "-") {
key = key[1:]
postgresKey = fmt.Sprintf(`"%s" DESC`, key)
} else {
postgresKey = fmt.Sprintf(`"%s" ASC`, key)
}
postgresSort = append(postgresSort, postgresKey)
}
sorting := strings.Join(postgresSort, ",")
if len(postgresSort) > 0 {
sortPattern = fmt.Sprintf(`ORDER BY %s`, sorting)
}
}
}
if len(where.sorts) > 0 {
sortPattern = fmt.Sprintf(`ORDER BY %s`, strings.Join(where.sorts, ","))
}
columns := "*"
if _, ok := options["keys"]; ok {
if keys, ok := options["keys"].([]string); ok {
postgresKeys := []string{}
for _, key := range keys {
if key != "" {
postgresKeys = append(postgresKeys, fmt.Sprintf(`"%s"`, key))
}
}
if len(postgresKeys) > 0 {
columns = strings.Join(postgresKeys, ",")
}
}
}
qs := fmt.Sprintf(`SELECT %s FROM "%s" %s %s %s %s`, columns, className, wherePattern, sortPattern, limitPattern, skipPattern)
rows, err := p.db.Query(qs, values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return []types.M{}, nil
}
}
return nil, err
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
results := []types.M{}
var resultColumns []string
for rows.Next() {
if resultColumns == nil {
resultColumns, err = rows.Columns()
if err != nil {
return nil, err
}
}
resultValues := []*interface{}{}
values := types.S{}
for i := 0; i < len(resultColumns); i++ {
var v interface{}
resultValues = append(resultValues, &v)
values = append(values, &v)
}
err = rows.Scan(values...)
if err != nil {
return nil, err
}
object := types.M{}
for i, field := range resultColumns {
object[field] = *resultValues[i]
}
object, err = postgresObjectToParseObject(object, fields)
if err != nil {
return nil, err
}
results = append(results, object)
}
return results, nil
}
// Count ...
func (p *PostgresAdapter) Count(className string, schema, query types.M) (int, error) {
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return 0, err
}
wherePattern := ""
if len(where.pattern) > 0 {
wherePattern = `WHERE ` + where.pattern
}
qs := fmt.Sprintf(`SELECT count(*) FROM "%s" %s`, className, wherePattern)
rows, err := p.db.Query(qs, where.values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresRelationDoesNotExistError {
return 0, nil
}
}
return 0, err
}
var count int
if rows.Next() {
err = rows.Scan(&count)
if err != nil {
return 0, nil
}
}
return count, nil
}
// UpdateObjectsByQuery ...
func (p *PostgresAdapter) UpdateObjectsByQuery(className string, schema, query, update types.M) error {
_, err := p.FindOneAndUpdate(className, schema, query, update)
return err
}
// FindOneAndUpdate ...
func (p *PostgresAdapter) FindOneAndUpdate(className string, schema, query, update types.M) (types.M, error) {
updatePatterns := []string{}
values := types.S{}
index := 1
if schema == nil {
schema = types.M{}
}
schema = toPostgresSchema(schema)
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
originalUpdate := utils.CopyMapM(update)
update = handleDotFields(update)
for fieldName, v := range update {
re := regexp.MustCompile(`^_auth_data_([a-zA-Z0-9_]+)$`)
authDataMatch := re.FindStringSubmatch(fieldName)
if authDataMatch != nil && len(authDataMatch) == 2 {
provider := authDataMatch[1]
delete(update, fieldName)
authData := utils.M(update["authData"])
if authData == nil {
authData = types.M{}
}
authData[provider] = v
update["authData"] = authData
}
}
for fieldName, fieldValue := range update {
if fieldValue == nil {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = NULL`, fieldName))
continue
}
if fieldName == "authData" {
generate := func(jsonb, key, value string) string {
return fmt.Sprintf(`json_object_set_key(COALESCE(%s, '{}'::jsonb), %s, %s)::jsonb`, jsonb, key, value)
}
lastKey := fmt.Sprintf(`"%s"`, fieldName)
authData := utils.M(fieldValue)
if authData == nil {
continue
}
for key, value := range authData {
lastKey = generate(lastKey, fmt.Sprintf(`$%d::text`, index), fmt.Sprintf(`$%d::jsonb`, index+1))
index = index + 2
if value != nil {
if v := utils.M(value); v != nil && utils.S(v["__op"]) == "Delete" {
value = nil
} else {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
value = string(b)
}
}
values = append(values, key, value)
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = %s`, fieldName, lastKey))
continue
}
if fieldName == "updatedAt" {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
continue
}
switch fieldValue.(type) {
case string, bool, float64, int:
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
continue
case time.Time:
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, utils.TimetoString(fieldValue.(time.Time)))
index = index + 1
continue
}
if object := utils.M(fieldValue); object != nil {
switch utils.S(object["__op"]) {
case "Increment":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = COALESCE("%s", 0) + $%d`, fieldName, fieldName, index))
values = append(values, object["amount"])
index = index + 1
continue
case "Add":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_add(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
case "Delete":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, nil)
index = index + 1
continue
case "Remove":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_remove(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
case "AddUnique":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_add_unique(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
}
switch utils.S(object["__type"]) {
case "Pointer":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, object["objectId"])
index = index + 1
continue
case "Date", "File":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, toPostgresValue(object))
index = index + 1
continue
case "GeoPoint":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = POINT($%d, $%d)`, fieldName, index, index+1))
values = append(values, object["longitude"], object["latitude"])
index = index + 2
continue
case "Relation":
continue
}
if tp := utils.M(fields[fieldName]); tp != nil && utils.S(tp["type"]) == "Object" {
keysToIncrement := []string{}
for k, v := range originalUpdate {
if o := utils.M(v); o != nil && utils.S(o["__op"]) == "Increment" {
if keys := strings.Split(k, "."); len(keys) == 2 && keys[0] == fieldName {
keysToIncrement = append(keysToIncrement, keys[1])
}
}
}
incrementPatterns := ""
if len(keysToIncrement) > 0 {
for _, key := range keysToIncrement {
increment := utils.M(object[key])
if increment == nil {
continue
}
var amount interface{}
switch increment["amount"].(type) {
case float64, int:
amount = increment["amount"]
}
if amount == nil {
continue
}
incrementPatterns += " || " + fmt.Sprintf(`CONCAT('{"%s":', COALESCE("%s"->>'%s', '0')::float + %v, '}')::jsonb`, key, fieldName, key, amount)
}
for _, key := range keysToIncrement {
delete(object, key)
}
}
keysToDelete := []string{}
for k, v := range originalUpdate {
if o := utils.M(v); o != nil && utils.S(o["__op"]) == "Delete" {
if keys := strings.Split(k, "."); len(keys) == 2 && keys[0] == fieldName {
keysToDelete = append(keysToDelete, keys[1])
}
}
}
deletePatterns := ""
for _, k := range keysToDelete {
deletePatterns = deletePatterns + fmt.Sprintf(` - '%s'`, k)
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = ( COALESCE("%s", '{}'::jsonb) %s %s || $%d::jsonb )`, fieldName, fieldName, deletePatterns, incrementPatterns, index))
b, err := json.Marshal(object)
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
}
}
if array := utils.A(fieldValue); array != nil {
if tp := utils.M(fields[fieldName]); tp != nil && utils.S(tp["type"]) == "Array" {
expectedType, err := parseTypeToPostgresType(tp)
if err != nil {
return nil, err
}
b, err := json.Marshal(fieldValue)
if err != nil {
return nil, err
}
if expectedType == "text[]" {
// '[' => '{'
if b[0] == 91 {
b[0] = 123
}
// ']' => '}'
if len(b) > 0 && b[len(b)-1] == 93 {
b[len(b)-1] = 125
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d::text[]`, fieldName, index))
} else {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d::jsonb`, fieldName, index))
}
values = append(values, string(b))
index = index + 1
continue
}
}
b, _ := json.Marshal(fieldValue)
return nil, errs.E(errs.OperationForbidden, "Postgres doesn't support update "+string(b)+" yet")
}
where, err := buildWhereClause(schema, query, index)
if err != nil {
return nil, err
}
values = append(values, where.values...)
// TODO 需要添加限制,只更新一条,UpdateObjectsByQuery 时更新多条
qs := fmt.Sprintf(`UPDATE "%s" SET %s WHERE %s RETURNING *`, className, strings.Join(updatePatterns, ","), where.pattern)
rows, err := p.db.Query(qs, values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return nil, errs.E(errs.ObjectNotFound, "Object not found.")
}
}
return nil, err
}
object := types.M{}
if rows.Next() {
resultColumns, err := rows.Columns()
if err != nil {
return nil, err
}
resultValues := []*interface{}{}
values := types.S{}
for i := 0; i < len(resultColumns); i++ {
var v interface{}
resultValues = append(resultValues, &v)
values = append(values, &v)
}
err = rows.Scan(values...)
if err != nil {
return nil, err
}
for i, field := range resultColumns {
object[field] = *resultValues[i]
}
object, err = postgresObjectToParseObject(object, fields)
if err != nil {
return nil, err
}
}
return object, nil
}
// UpsertOneObject 仅用于 config 和 hooks
func (p *PostgresAdapter) UpsertOneObject(className string, schema, query, update types.M) error {
object, err := p.FindOneAndUpdate(className, schema, query, update)
if err != nil {
return err
}
if len(object) == 0 {
createValue := types.M{}
for k, v := range query {
createValue[k] = v
}
for k, v := range update {
createValue[k] = v
}
err = p.CreateObject(className, schema, createValue)
if err != nil {
return err
}
}
return nil
}
// EnsureUniqueness 创建索引
func (p *PostgresAdapter) EnsureUniqueness(className string, schema types.M, fieldNames []string) error {
sort.Sort(sort.StringSlice(fieldNames))
constraintName := `unique_` + strings.Join(fieldNames, "_")
constraintPatterns := []string{}
for _, fieldName := range fieldNames {
constraintPatterns = append(constraintPatterns, `"`+fieldName+`"`)
}
qs := fmt.Sprintf(`ALTER TABLE "%s" ADD CONSTRAINT "%s" UNIQUE (%s)`, className, constraintName, strings.Join(constraintPatterns, ","))
_, err := p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError && strings.Contains(e.Message, constraintName) {
// 索引已存在,忽略错误
} else if e.Code == postgresUniqueIndexViolationError && strings.Contains(e.Message, constraintName) {
return errs.E(errs.DuplicateValue, "A duplicate value for a field with unique values was provided")
}
} else {
return err
}
}
return nil
}
// PerformInitialization ...
func (p *PostgresAdapter) PerformInitialization(options types.M) error {
if options == nil {
options = types.M{}
}
if volatileClassesSchemas, ok := options["VolatileClassesSchemas"].([]types.M); ok {
for _, schema := range volatileClassesSchemas {
err := p.createTable(utils.S(schema["className"]), schema)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code != postgresDuplicateRelationError {
return err
}
} else if e, ok := err.(*errs.TomatoError); ok {
if e.Code != errs.InvalidClassName {
return err
}
} else {
return err
}
}
}
}
_, err := p.db.Exec(jsonObjectSetKey)
if err != nil {
return err
}
_, err = p.db.Exec(arrayAdd)
if err != nil {
return err
}
_, err = p.db.Exec(arrayAddUnique)
if err != nil {
return err
}
_, err = p.db.Exec(arrayRemove)
if err != nil {
return err
}
_, err = p.db.Exec(arrayContainsAll)
if err != nil {
return err
}
_, err = p.db.Exec(arrayContains)
if err != nil {
return err
}
return nil
}
func postgresObjectToParseObject(object, fields types.M) (types.M, error) {
if len(object) == 0 {
return object, nil
}
for fieldName, v := range fields {
tp := utils.M(v)
if tp == nil {
continue
}
objectType := utils.S(tp["type"])
if objectType == "Pointer" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = types.M{
"objectId": string(v),
"__type": "Pointer",
"className": tp["targetClass"],
}
} else {
object[fieldName] = nil
}
} else if objectType == "Relation" {
object[fieldName] = types.M{
"__type": "Relation",
"className": tp["targetClass"],
}
} else if objectType == "GeoPoint" && object[fieldName] != nil {
// object[fieldName] = (10,20) (longitude, latitude)
resString := ""
if v, ok := object[fieldName].([]byte); ok {
resString = string(v)
}
if len(resString) < 5 {
object[fieldName] = nil
continue
}
pointString := strings.Split(resString[1:len(resString)-1], ",")
if len(pointString) != 2 {
object[fieldName] = nil
continue
}
longitude, err := strconv.ParseFloat(pointString[0], 64)
if err != nil {
return nil, err
}
latitude, err := strconv.ParseFloat(pointString[1], 64)
if err != nil {
return nil, err
}
object[fieldName] = types.M{
"__type": "GeoPoint",
"longitude": longitude,
"latitude": latitude,
}
} else if objectType == "File" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = types.M{
"__type": "File",
"name": string(v),
}
} else {
object[fieldName] = nil
}
} else if objectType == "String" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = string(v)
} else {
object[fieldName] = nil
}
} else if objectType == "Object" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
var r types.M
err := json.Unmarshal(v, &r)
if err != nil {
return nil, err
}
object[fieldName] = r
} else {
object[fieldName] = nil
}
} else if objectType == "Array" && object[fieldName] != nil {
if fieldName == "_rperm" || fieldName == "_wperm" {
continue
}
if v, ok := object[fieldName].([]byte); ok {
var r types.S
err := json.Unmarshal(v, &r)
if err != nil {
return nil, err
}
object[fieldName] = r
} else {
object[fieldName] = nil
}
}
}
if object["_rperm"] != nil {
// object["_rperm"] = {hello,world}
// 在添加 _rperm 时已保证值里不含 ','
resString := ""
if v, ok := object["_rperm"].([]byte); ok {
resString = string(v)
}
if len(resString) < 2 {
object["_rperm"] = nil
} else {
keys := strings.Split(resString[1:len(resString)-1], ",")
rperm := make(types.S, len(keys))
for i, k := range keys {
rperm[i] = k
}
object["_rperm"] = rperm
}
}
if object["_wperm"] != nil {
// object["_wperm"] = {hello,world}
// 在添加 _wperm 时已保证值里不含 ','
resString := ""
if v, ok := object["_wperm"].([]byte); ok {
resString = string(v)
}
if len(resString) < 2 {
object["_wperm"] = nil
} else {
keys := strings.Split(resString[1:len(resString)-1], ",")
wperm := make(types.S, len(keys))
for i, k := range keys {
wperm[i] = k
}
object["_wperm"] = wperm
}
}
if object["createdAt"] != nil {
if v, ok := object["createdAt"].(time.Time); ok {
object["createdAt"] = utils.TimetoString(v)
} else {
object["createdAt"] = nil
}
}
if object["updatedAt"] != nil {
if v, ok := object["updatedAt"].(time.Time); ok {
object["updatedAt"] = utils.TimetoString(v)
} else {
object["updatedAt"] = nil
}
}
if object["expiresAt"] != nil {
object["expiresAt"] = valueToDate(object["expiresAt"])
}
if object["_email_verify_token_expires_at"] != nil {
object["_email_verify_token_expires_at"] = valueToDate(object["_email_verify_token_expires_at"])
}
if object["_account_lockout_expires_at"] != nil {
object["_account_lockout_expires_at"] = valueToDate(object["_account_lockout_expires_at"])
}
if object["_perishable_token_expires_at"] != nil {
object["_perishable_token_expires_at"] = valueToDate(object["_perishable_token_expires_at"])
}
if object["_password_changed_at"] != nil {
object["_password_changed_at"] = valueToDate(object["_password_changed_at"])
}
for fieldName := range object {
if object[fieldName] == nil {
delete(object, fieldName)
}
if v, ok := object[fieldName].(time.Time); ok {
object[fieldName] = types.M{
"__type": "Date",
"iso": utils.TimetoString(v),
}
}
}
return object, nil
}
var parseToPosgresComparator = map[string]string{
"$gt": ">",
"$lt": "<",
"$gte": ">=",
"$lte": "<=",
}
func parseTypeToPostgresType(t types.M) (string, error) {
if t == nil {
return "", nil
}
tp := utils.S(t["type"])
switch tp {
case "String":
return "text", nil
case "Date":
return "timestamp with time zone", nil
case "Object":
return "jsonb", nil
case "File":
return "text", nil
case "Boolean":
return "boolean", nil
case "Pointer":
return "char(24)", nil
case "Number":
return "double precision", nil
case "GeoPoint":
return "point", nil
case "Array":
if contents := utils.M(t["contents"]); contents != nil {
if utils.S(contents["type"]) == "String" {
return "text[]", nil
}
}
return "jsonb", nil
default:
return "", errs.E(errs.IncorrectType, "no type for "+tp+" yet")
}
}
func toPostgresValue(value interface{}) interface{} {
if v := utils.M(value); v != nil {
if utils.S(v["__type"]) == "Date" {
return v["iso"]
}
if utils.S(v["__type"]) == "File" {
return v["name"]
}
}
return value
}
func transformValue(value interface{}) interface{} {
if v := utils.M(value); v != nil {
if utils.S(v["__type"]) == "Pointer" {
return v["objectId"]
}
}
return value
}
var emptyCLPS = types.M{
"find": types.M{},
"get": types.M{},
"create": types.M{},
"update": types.M{},
"delete": types.M{},
"addField": types.M{},
}
var defaultCLPS = types.M{
"find": types.M{"*": true},
"get": types.M{"*": true},
"create": types.M{"*": true},
"update": types.M{"*": true},
"delete": types.M{"*": true},
"addField": types.M{"*": true},
}
func toParseSchema(schema types.M) types.M {
if schema == nil {
return nil
}
var fields types.M
if fields = utils.M(schema["fields"]); fields == nil {
fields = types.M{}
}
if utils.S(schema["className"]) == "_User" {
if _, ok := fields["_hashed_password"]; ok {
delete(fields, "_hashed_password")
}
}
if _, ok := fields["_wperm"]; ok {
delete(fields, "_wperm")
}
if _, ok := fields["_rperm"]; ok {
delete(fields, "_rperm")
}
var clps types.M
clps = utils.CopyMap(defaultCLPS)
if classLevelPermissions := utils.M(schema["classLevelPermissions"]); classLevelPermissions != nil {
// clps = utils.CopyMap(emptyCLPS)
// 不存在的 action 默认为公共权限
for k, v := range classLevelPermissions {
clps[k] = v
}
}
return types.M{
"className": schema["className"],
"fields": fields,
"classLevelPermissions": clps,
}
}
func toPostgresSchema(schema types.M) types.M {
if schema == nil {
return nil
}
var fields types.M
if fields = utils.M(schema["fields"]); fields == nil {
fields = types.M{}
}
fields["_wperm"] = types.M{
"type": "Array",
"contents": types.M{"type": "String"},
}
fields["_rperm"] = types.M{
"type": "Array",
"contents": types.M{"type": "String"},
}
if utils.S(schema["className"]) == "_User" {
fields["_hashed_password"] = types.M{"type": "String"}
fields["_password_history"] = types.M{"type": "Array"}
}
schema["fields"] = fields
return schema
}
func handleDotFields(object types.M) types.M {
for fieldName := range object {
if strings.Index(fieldName, ".") == -1 {
continue
}
components := strings.Split(fieldName, ".")
value := object[fieldName]
if v := utils.M(value); v != nil {
if utils.S(v["__op"]) == "Delete" {
value = nil
}
}
currentObj := object
for i, next := range components {
if i == (len(components) - 1) {
if value != nil {
currentObj[next] = value
}
break
}
obj := currentObj[next]
if obj == nil {
obj = types.M{}
currentObj[next] = obj
}
currentObj = utils.M(currentObj[next])
}
delete(object, fieldName)
}
return object
}
func validateKeys(object interface{}) error {
if obj := utils.M(object); obj != nil {
for key, value := range obj {
err := validateKeys(value)
if err != nil {
return err
}
if strings.Contains(key, "$") || strings.Contains(key, ".") {
return errs.E(errs.InvalidNestedKey, "Nested keys should not contain the '$' or '.' characters")
}
}
}
return nil
}
func joinTablesForSchema(schema types.M) []string {
list := []string{}
if schema != nil {
if fields := utils.M(schema["fields"]); fields != nil {
className := utils.S(schema["className"])
for field, v := range fields {
if tp := utils.M(v); tp != nil {
if utils.S(tp["type"]) == "Relation" {
list = append(list, "_Join:"+field+":"+className)
}
}
}
}
}
return list
}
type whereClause struct {
pattern string
values types.S
sorts []string
}
func buildWhereClause(schema, query types.M, index int) (*whereClause, error) {
patterns := []string{}
values := types.S{}
sorts := []string{}
schema = toPostgresSchema(schema)
if schema == nil {
schema = types.M{}
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
for fieldName, fieldValue := range query {
isArrayField := false
if fields != nil {
if tp := utils.M(fields[fieldName]); tp != nil {
if utils.S(tp["type"]) == "Array" {
isArrayField = true
}
}
}
initialPatternsLength := len(patterns)
if fields[fieldName] == nil {
if v := utils.M(fieldValue); v != nil {
if b, ok := v["$exists"].(bool); ok && b == false {
continue
}
}
}
if strings.Contains(fieldName, ".") {
components := strings.Split(fieldName, ".")
for index, cmpt := range components {
if index == 0 {
components[index] = `"` + cmpt + `"`
} else {
components[index] = `'` + cmpt + `'`
}
}
name := strings.Join(components, "->")
b, err := json.Marshal(fieldValue)
if err != nil {
return nil, err
}
patterns = append(patterns, fmt.Sprintf(`%s = '%v'`, name, string(b)))
} else if _, ok := fieldValue.(string); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(bool); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(float64); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(int); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if fieldName == "$or" || fieldName == "$and" {
clauses := []string{}
clauseValues := types.S{}
if array := utils.A(fieldValue); array != nil {
for _, v := range array {
if subQuery := utils.M(v); subQuery != nil {
clause, err := buildWhereClause(schema, subQuery, index)
if err != nil {
return nil, err
}
if len(clause.pattern) > 0 {
clauses = append(clauses, clause.pattern)
clauseValues = append(clauseValues, clause.values...)
index = index + len(clause.values)
}
}
}
}
var orOrAnd string
if fieldName == "$or" {
orOrAnd = " OR "
} else {
orOrAnd = " AND "
}
patterns = append(patterns, fmt.Sprintf(`(%s)`, strings.Join(clauses, orOrAnd)))
values = append(values, clauseValues...)
}
if value := utils.M(fieldValue); value != nil {
if v, ok := value["$ne"]; ok {
if isArrayField {
j, _ := json.Marshal(types.S{v})
value["$ne"] = string(j)
patterns = append(patterns, fmt.Sprintf(`NOT array_contains("%s", $%d)`, fieldName, index))
values = append(values, value["$ne"])
index = index + 1
} else {
if v == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NOT NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`("%s" <> $%d OR "%s" IS NULL)`, fieldName, index, fieldName))
values = append(values, value["$ne"])
index = index + 1
}
}
}
if v, ok := value["$eq"]; ok {
if v == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, v)
index = index + 1
}
}
inArray := utils.A(value["$in"])
ninArray := utils.A(value["$nin"])
isInOrNin := (inArray != nil) || (ninArray != nil)
isTypeString := false
if tp := utils.M(fields[fieldName]); tp != nil {
if contents := utils.M(tp["contents"]); contents != nil {
if utils.S(contents["type"]) == "String" {
isTypeString = true
}
}
}
if inArray != nil && isArrayField && isTypeString {
inPatterns := []string{}
allowNull := false
for listIndex, listElem := range inArray {
if listElem == nil {
allowNull = true
} else {
values = append(values, listElem)
i := 0
if allowNull {
i = index + listIndex - 1
} else {
i = index + listIndex
}
inPatterns = append(inPatterns, fmt.Sprintf("$%d", i))
}
}
if allowNull {
patterns = append(patterns, fmt.Sprintf(`("%s" IS NULL OR "%s" && ARRAY[%s])`, fieldName, fieldName, strings.Join(inPatterns, ",")))
} else {
patterns = append(patterns, fmt.Sprintf(`("%s" && ARRAY[%s])`, fieldName, strings.Join(inPatterns, ",")))
}
index = index + len(inPatterns)
} else if isInOrNin {
createConstraint := func(baseArray types.S, notIn bool) {
if len(baseArray) > 0 {
not := ""
if notIn {
not = " NOT "
}
if isArrayField {
patterns = append(patterns, fmt.Sprintf(`%s array_contains("%s", $%d)`, not, fieldName, index))
j, _ := json.Marshal(baseArray)
values = append(values, string(j))
index = index + 1
} else {
inPatterns := []string{}
for listIndex, listElem := range baseArray {
values = append(values, listElem)
inPatterns = append(inPatterns, fmt.Sprintf("$%d", index+listIndex))
}
patterns = append(patterns, fmt.Sprintf(`"%s" %s IN (%s)`, fieldName, not, strings.Join(inPatterns, ",")))
index = index + len(inPatterns)
}
} else if !notIn {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
}
if inArray != nil {
createConstraint(inArray, false)
}
if ninArray != nil {
createConstraint(ninArray, true)
}
}
allArray := utils.A(value["$all"])
if allArray != nil && isArrayField {
patterns = append(patterns, fmt.Sprintf(`array_contains_all("%s", $%d::jsonb)`, fieldName, index))
j, _ := json.Marshal(allArray)
values = append(values, string(j))
index = index + 1
}
if b, ok := value["$exists"].(bool); ok {
if b {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NOT NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
}
if point := utils.M(value["$nearSphere"]); point != nil {
var distance float64
if v, ok := value["$maxDistance"].(float64); ok {
distance = v
}
distanceInKM := distance * 6371 * 1000
patterns = append(patterns, fmt.Sprintf(`ST_distance_sphere("%s"::geometry, POINT($%d, $%d)::geometry) <= $%d`, fieldName, index, index+1, index+2))
sorts = append(sorts, fmt.Sprintf(`ST_distance_sphere("%s"::geometry, POINT($%d, $%d)::geometry) ASC`, fieldName, index, index+1))
values = append(values, point["longitude"], point["latitude"], distanceInKM)
index = index + 3
}
if within := utils.M(value["$within"]); within != nil {
if box := utils.A(within["$box"]); len(box) == 2 {
box1 := utils.M(box[0])
box2 := utils.M(box[1])
if box1 != nil && box2 != nil {
left := box1["longitude"]
bottom := box1["latitude"]
right := box2["longitude"]
top := box2["latitude"]
patterns = append(patterns, fmt.Sprintf(`"%s"::point <@ $%d::box`, fieldName, index))
values = append(values, fmt.Sprintf("((%v, %v), (%v, %v))", left, bottom, right, top))
index = index + 1
}
}
}
if regex := utils.S(value["$regex"]); regex != "" {
operator := "~"
opts := utils.S(value["$options"])
if opts != "" {
if strings.Contains(opts, "i") {
operator = "~*"
}
if strings.Contains(opts, "x") {
regex = removeWhiteSpace(regex)
}
}
regex = processRegexPattern(regex)
patterns = append(patterns, fmt.Sprintf(`"%s" %s '%s'`, fieldName, operator, regex))
}
if utils.S(value["__type"]) == "Pointer" {
if isArrayField {
patterns = append(patterns, fmt.Sprintf(`array_contains("%s", $%d)`, fieldName, index))
j, _ := json.Marshal(types.S{value})
values = append(values, string(j))
index = index + 1
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, value["objectId"])
index = index + 1
}
}
if utils.S(value["__type"]) == "Date" {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, value["iso"])
index = index + 1
}
for cmp, pgComparator := range parseToPosgresComparator {
if v, ok := value[cmp]; ok {
patterns = append(patterns, fmt.Sprintf(`"%s" %s $%d`, fieldName, pgComparator, index))
values = append(values, toPostgresValue(v))
index = index + 1
}
}
}
if fieldValue == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
if initialPatternsLength == len(patterns) {
s, _ := json.Marshal(fieldValue)
return nil, errs.E(errs.OperationForbidden, "Postgres doesn't support this query type yet "+string(s))
}
}
for i, v := range values {
values[i] = transformValue(v)
}
return &whereClause{strings.Join(patterns, " AND "), values, sorts}, nil
}
func removeWhiteSpace(s string) string {
if strings.HasSuffix(s, "\n") == false {
s = s + "\n"
}
re := regexp.MustCompile(`(?im)^#.*\n`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`(?im)([^\\])#.*\n`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`(?im)([^\\])\s+`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`^\s+`)
s = re.ReplaceAllString(s, "")
s = strings.TrimSpace(s)
return s
}
func processRegexPattern(s string) string {
if strings.HasPrefix(s, "^") {
return "^" + literalizeRegexPart(s[1:])
} else if strings.HasSuffix(s, "$") {
return literalizeRegexPart(s[:len(s)-1]) + "$"
}
return literalizeRegexPart(s)
}
func createLiteralRegex(s string) string {
chars := strings.Split(s, "")
for i, c := range chars {
if m, _ := regexp.MatchString(`[0-9a-zA-Z]`, c); m == false {
if c == `'` {
chars[i] = `''`
} else {
chars[i] = `\` + c
}
}
}
return strings.Join(chars, "")
}
func literalizeRegexPart(s string) string {
// go 不支持 (?!) 语法,需要进行等价替换
// /\\Q((?!\\E).*)\\E$/
// /\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)\\E$/
matcher1 := regexp.MustCompile(`\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)\\E$`)
result1 := matcher1.FindStringSubmatch(s)
if len(result1) > 1 {
index := strings.Index(s, result1[0])
prefix := s[:index]
remaining := result1[1]
return literalizeRegexPart(prefix) + createLiteralRegex(remaining)
}
// /\\Q((?!\\E).*)$/
// /\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)$/
matcher2 := regexp.MustCompile(`\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)$`)
result2 := matcher2.FindStringSubmatch(s)
if len(result2) > 1 {
index := strings.Index(s, result2[0])
prefix := s[:index]
remaining := result2[1]
return literalizeRegexPart(prefix) + createLiteralRegex(remaining)
}
re := regexp.MustCompile(`([^\\])(\\E)`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`([^\\])(\\Q)`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`^\\E`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`^\\Q`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`([^'])'`)
s = re.ReplaceAllString(s, "$1''")
re = regexp.MustCompile(`^'([^'])`)
s = re.ReplaceAllString(s, "''$1")
return s
}
func valueToDate(v interface{}) types.M {
if v, ok := v.(time.Time); ok {
return types.M{
"__type": "Date",
"iso": utils.TimetoString(v),
}
}
return nil
}
优化添加字段时的性能
package postgres
import (
"database/sql"
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"time"
"regexp"
"github.com/lfq7413/tomato/errs"
"github.com/lfq7413/tomato/types"
"github.com/lfq7413/tomato/utils"
"github.com/lib/pq"
)
const postgresSchemaCollectionName = "_SCHEMA"
const postgresRelationDoesNotExistError = "42P01"
const postgresDuplicateRelationError = "42P07"
const postgresDuplicateColumnError = "42701"
const postgresDuplicateObjectError = "42710"
const postgresUniqueIndexViolationError = "23505"
const postgresTransactionAbortedError = "25P02"
// PostgresAdapter postgres 数据库适配器
type PostgresAdapter struct {
collectionPrefix string
collectionList []string
db *sql.DB
}
// NewPostgresAdapter ...
func NewPostgresAdapter(collectionPrefix string, db *sql.DB) *PostgresAdapter {
return &PostgresAdapter{
collectionPrefix: collectionPrefix,
collectionList: []string{},
db: db,
}
}
// ensureSchemaCollectionExists 确保 _SCHEMA 表存在,不存在则创建表
func (p *PostgresAdapter) ensureSchemaCollectionExists() error {
_, err := p.db.Exec(`CREATE TABLE IF NOT EXISTS "_SCHEMA" ( "className" varChar(120), "schema" jsonb, "isParseClass" bool, PRIMARY KEY ("className") )`)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError || e.Code == postgresUniqueIndexViolationError || e.Code == postgresDuplicateObjectError {
// _SCHEMA 表已经存在,已经由其他请求创建,忽略错误
return nil
}
} else {
return err
}
}
return nil
}
// ClassExists 检测数据库中是否存在指定类
func (p *PostgresAdapter) ClassExists(name string) bool {
var result bool
err := p.db.QueryRow(`SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = $1)`, name).Scan(&result)
if err != nil {
return false
}
return result
}
// SetClassLevelPermissions 设置类级别权限
func (p *PostgresAdapter) SetClassLevelPermissions(className string, CLPs types.M) error {
err := p.ensureSchemaCollectionExists()
if err != nil {
return err
}
if CLPs == nil {
CLPs = types.M{}
}
b, err := json.Marshal(CLPs)
if err != nil {
return err
}
qs := `UPDATE "_SCHEMA" SET "schema" = json_object_set_key("schema", $1::text, $2::jsonb) WHERE "className"=$3 `
_, err = p.db.Exec(qs, "classLevelPermissions", string(b), className)
if err != nil {
return err
}
return nil
}
// CreateClass 创建类
func (p *PostgresAdapter) CreateClass(className string, schema types.M) (types.M, error) {
if schema == nil {
schema = types.M{}
}
schema["className"] = className
b, err := json.Marshal(schema)
if err != nil {
return nil, err
}
err = p.createTable(className, schema)
if err != nil {
return nil, err
}
_, err = p.db.Exec(`INSERT INTO "_SCHEMA" ("className", "schema", "isParseClass") VALUES ($1, $2, $3)`, className, string(b), true)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresUniqueIndexViolationError {
return nil, errs.E(errs.DuplicateValue, "Class "+className+" already exists.")
}
}
return nil, err
}
return toParseSchema(schema), nil
}
// createTable 仅创建表,不加入 schema 中
func (p *PostgresAdapter) createTable(className string, schema types.M) error {
if schema == nil {
schema = types.M{}
}
valuesArray := types.S{}
patternsArray := []string{}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
if className == "_User" {
fields["_email_verify_token_expires_at"] = types.M{"type": "Date"}
fields["_email_verify_token"] = types.M{"type": "String"}
fields["_account_lockout_expires_at"] = types.M{"type": "Date"}
fields["_failed_login_count"] = types.M{"type": "Number"}
fields["_perishable_token"] = types.M{"type": "String"}
fields["_perishable_token_expires_at"] = types.M{"type": "Date"}
fields["_password_changed_at"] = types.M{"type": "Date"}
fields["_password_history"] = types.M{"type": "Array"}
}
relations := []string{}
for fieldName, t := range fields {
parseType := utils.M(t)
if parseType == nil {
parseType = types.M{}
}
if utils.S(parseType["type"]) == "Relation" {
relations = append(relations, fieldName)
continue
}
if fieldName == "_rperm" || fieldName == "_wperm" {
parseType["contents"] = types.M{"type": "String"}
}
valuesArray = append(valuesArray, fieldName)
postgresType, err := parseTypeToPostgresType(parseType)
if err != nil {
return err
}
valuesArray = append(valuesArray, postgresType)
patternsArray = append(patternsArray, `"%s" %s`)
if fieldName == "objectId" {
valuesArray = append(valuesArray, fieldName)
patternsArray = append(patternsArray, `PRIMARY KEY ("%s")`)
}
}
qs := `CREATE TABLE IF NOT EXISTS "%s" (` + strings.Join(patternsArray, ",") + `)`
values := append(types.S{className}, valuesArray...)
qs = fmt.Sprintf(qs, values...)
err := p.ensureSchemaCollectionExists()
if err != nil {
return err
}
_, err = p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError {
// 表已经存在,已经由其他请求创建,忽略错误
} else {
return err
}
} else {
return err
}
}
// 创建 relation 表
for _, fieldName := range relations {
name := fmt.Sprintf(`_Join:%s:%s`, fieldName, className)
_, err = p.db.Exec(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS "%s" ("relatedId" varChar(120), "owningId" varChar(120), PRIMARY KEY("relatedId", "owningId") )`, name))
if err != nil {
return err
}
}
return nil
}
// AddFieldIfNotExists 添加字段定义
func (p *PostgresAdapter) AddFieldIfNotExists(className, fieldName string, fieldType types.M) error {
if fieldType == nil {
fieldType = types.M{}
}
if utils.S(fieldType["type"]) != "Relation" {
tp, err := parseTypeToPostgresType(fieldType)
if err != nil {
return err
}
qs := fmt.Sprintf(`ALTER TABLE "%s" ADD COLUMN "%s" %s`, className, fieldName, tp)
_, err = p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresRelationDoesNotExistError {
// TODO 添加默认字段
_, ce := p.CreateClass(className, types.M{"fields": types.M{fieldName: fieldType}})
if ce != nil {
return ce
}
} else if e.Code == postgresDuplicateColumnError {
// Column 已经存在,由其他请求创建
} else {
return err
}
} else {
return err
}
}
} else {
name := fmt.Sprintf(`_Join:%s:%s`, fieldName, className)
qs := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS "%s" ("relatedId" varChar(120), "owningId" varChar(120), PRIMARY KEY("relatedId", "owningId") )`, name)
_, err := p.db.Exec(qs)
if err != nil {
return err
}
}
qs := `SELECT "schema" FROM "_SCHEMA" WHERE "className" = $1 and ("schema"::json->'fields'->$2) is not null`
rows, err := p.db.Query(qs, className, fieldName)
if err != nil {
return err
}
if rows.Next() {
return nil
}
path := fmt.Sprintf(`{fields,%s}`, fieldName)
qs = `UPDATE "_SCHEMA" SET "schema"=jsonb_set("schema", $1, $2) WHERE "className"=$3`
b, _ := json.Marshal(fieldType)
_, err = p.db.Exec(qs, path, string(b), className)
// qs := `SELECT "schema" FROM "_SCHEMA" WHERE "className" = $1`
// rows, err := p.db.Query(qs, className)
// if err != nil {
// return err
// }
// if rows.Next() {
// var sch types.M
// var v []byte
// err := rows.Scan(&v)
// if err != nil {
// return err
// }
// err = json.Unmarshal(v, &sch)
// if err != nil {
// return err
// }
// if sch == nil {
// sch = types.M{}
// }
// var fields types.M
// if v := utils.M(sch["fields"]); v != nil {
// fields = v
// } else {
// fields = types.M{}
// }
// if _, ok := fields[fieldName]; ok {
// // 当表不存在时,会进行新建表,所以也会走到这里,不再处理错误
// // Attempted to add a field that already exists
// return nil
// }
// fields[fieldName] = fieldType
// sch["fields"] = fields
// b, err := json.Marshal(sch)
// qs := `UPDATE "_SCHEMA" SET "schema"=$1 WHERE "className"=$2`
// _, err = p.db.Exec(qs, b, className)
// if err != nil {
// return err
// }
// }
return err
}
// DeleteClass 删除指定表
func (p *PostgresAdapter) DeleteClass(className string) (types.M, error) {
qs := fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, className)
_, err := p.db.Exec(qs)
if err != nil {
return nil, err
}
qs = `DELETE FROM "_SCHEMA" WHERE "className"=$1`
_, err = p.db.Exec(qs, className)
if err != nil {
return nil, err
}
return types.M{}, nil
}
// DeleteAllClasses 删除所有表,仅用于测试
func (p *PostgresAdapter) DeleteAllClasses() error {
qs := `SELECT "className","schema" FROM "_SCHEMA"`
rows, err := p.db.Query(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok && e.Code == postgresRelationDoesNotExistError {
// _SCHEMA 不存在,则不删除
return nil
}
return err
}
classNames := []string{}
schemas := []types.M{}
for rows.Next() {
var clsName string
var sch types.M
var v []byte
err := rows.Scan(&clsName, &v)
if err != nil {
return err
}
err = json.Unmarshal(v, &sch)
if err != nil {
return err
}
classNames = append(classNames, clsName)
schemas = append(schemas, sch)
}
joins := []string{}
for _, sch := range schemas {
joins = append(joins, joinTablesForSchema(sch)...)
}
classes := []string{"_SCHEMA", "_PushStatus", "_JobStatus", "_Hooks", "_GlobalConfig"}
classes = append(classes, classNames...)
classes = append(classes, joins...)
for _, name := range classes {
qs = fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, name)
p.db.Exec(qs)
}
return nil
}
// DeleteFields 删除字段
func (p *PostgresAdapter) DeleteFields(className string, schema types.M, fieldNames []string) error {
if schema == nil {
schema = types.M{}
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
fldNames := types.S{}
for _, fieldName := range fieldNames {
field := utils.M(fields[fieldName])
if field != nil && utils.S(field["type"]) == "Relation" {
// 不处理 Relation 类型字段
} else {
fldNames = append(fldNames, fieldName)
}
delete(fields, fieldName)
}
schema["fields"] = fields
values := append(types.S{className}, fldNames...)
columnArray := []string{}
for _ = range fldNames {
columnArray = append(columnArray, `"%s"`)
}
columns := strings.Join(columnArray, ", DROP COLUMN ")
b, err := json.Marshal(schema)
if err != nil {
return err
}
qs := `UPDATE "_SCHEMA" SET "schema"=$1 WHERE "className"=$2`
_, err = p.db.Exec(qs, b, className)
if err != nil {
return err
}
if len(values) > 1 {
qs = fmt.Sprintf(`ALTER TABLE "%%s" DROP COLUMN %s`, columns)
qs = fmt.Sprintf(qs, values...)
_, err = p.db.Exec(qs)
if err != nil {
return err
}
}
return nil
}
// CreateObject 创建对象
func (p *PostgresAdapter) CreateObject(className string, schema, object types.M) error {
columnsArray := []string{}
valuesArray := types.S{}
geoPoints := types.M{}
if schema == nil {
schema = types.M{}
}
if len(object) == 0 {
return nil
}
schema = toPostgresSchema(schema)
object = handleDotFields(object)
err := validateKeys(object)
if err != nil {
return err
}
// 预处理 authData 字段,避免在遍历 map 并向其添加元素时造成的不稳定性
for fieldName := range object {
re := regexp.MustCompile(`^_auth_data_([a-zA-Z0-9_]+)$`)
authDataMatch := re.FindStringSubmatch(fieldName)
if authDataMatch != nil && len(authDataMatch) == 2 {
provider := authDataMatch[1]
authData := utils.M(object["authData"])
if authData == nil {
authData = types.M{}
}
authData[provider] = object[fieldName]
delete(object, fieldName)
object["authData"] = authData
}
}
for fieldName := range object {
columnsArray = append(columnsArray, fieldName)
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
if fields[fieldName] == nil && className == "_User" {
if fieldName == "_email_verify_token" ||
fieldName == "_failed_login_count" ||
fieldName == "_perishable_token" {
valuesArray = append(valuesArray, object[fieldName])
}
if fieldName == "_password_history" {
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
valuesArray = append(valuesArray, b)
}
if fieldName == "_email_verify_token_expires_at" ||
fieldName == "_account_lockout_expires_at" ||
fieldName == "_perishable_token_expires_at" ||
fieldName == "_password_changed_at" {
if v := utils.M(object[fieldName]); v != nil && utils.S(v["iso"]) != "" {
valuesArray = append(valuesArray, v["iso"])
} else {
valuesArray = append(valuesArray, nil)
}
}
continue
}
tp := utils.M(fields[fieldName])
if tp == nil {
tp = types.M{}
}
switch utils.S(tp["type"]) {
case "Date":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["iso"]) != "" {
valuesArray = append(valuesArray, v["iso"])
} else {
valuesArray = append(valuesArray, nil)
}
case "Pointer":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["objectId"]) != "" {
valuesArray = append(valuesArray, v["objectId"])
} else {
valuesArray = append(valuesArray, "")
}
case "Array":
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
if fieldName == "_rperm" || fieldName == "_wperm" {
// '[' => '{'
if b[0] == 91 {
b[0] = 123
}
// ']' => '}'
if len(b) > 0 && b[len(b)-1] == 93 {
b[len(b)-1] = 125
}
}
valuesArray = append(valuesArray, b)
case "Object":
b, err := json.Marshal(object[fieldName])
if err != nil {
return err
}
valuesArray = append(valuesArray, b)
case "String", "Number", "Boolean":
valuesArray = append(valuesArray, object[fieldName])
case "File":
if v := utils.M(object[fieldName]); v != nil && utils.S(v["name"]) != "" {
valuesArray = append(valuesArray, v["name"])
} else {
valuesArray = append(valuesArray, "")
}
case "GeoPoint":
geoPoints[fieldName] = object[fieldName]
columnsArray = columnsArray[:len(columnsArray)-1]
default:
return errs.E(errs.OtherCause, "Type "+utils.S(tp["type"])+" not supported yet")
}
}
for key := range geoPoints {
columnsArray = append(columnsArray, key)
}
initialValues := []string{}
for index := range valuesArray {
termination := ""
fieldName := columnsArray[index]
if fieldName == "_rperm" || fieldName == "_wperm" {
termination = "::text[]"
} else {
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
tp := utils.M(fields[fieldName])
if tp == nil {
tp = types.M{}
}
if utils.S(tp["type"]) == "Array" {
termination = "::jsonb"
}
}
initialValues = append(initialValues, fmt.Sprintf(`$%d%s`, index+1, termination))
}
geoPointsInjects := []string{}
for _, v := range geoPoints {
value := utils.M(v)
if value == nil {
value = types.M{}
}
valuesArray = append(valuesArray, value["longitude"], value["latitude"])
l := len(valuesArray)
geoPointsInjects = append(geoPointsInjects, fmt.Sprintf(`POINT($%d, $%d)`, l-1, l))
}
columnsPatternArray := []string{}
for _, key := range columnsArray {
columnsPatternArray = append(columnsPatternArray, fmt.Sprintf(`"%s"`, key))
}
columnsPattern := strings.Join(columnsPatternArray, ",")
initialValues = append(initialValues, geoPointsInjects...)
valuesPattern := strings.Join(initialValues, ",")
qs := fmt.Sprintf(`INSERT INTO "%s" (%s) VALUES (%s)`, className, columnsPattern, valuesPattern)
_, err = p.db.Exec(qs, valuesArray...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresUniqueIndexViolationError {
return errs.E(errs.DuplicateValue, "A duplicate value for a field with unique values was provided")
}
}
return err
}
return nil
}
// GetAllClasses ...
func (p *PostgresAdapter) GetAllClasses() ([]types.M, error) {
err := p.ensureSchemaCollectionExists()
if err != nil {
return nil, err
}
qs := `SELECT "className","schema" FROM "_SCHEMA"`
rows, err := p.db.Query(qs)
if err != nil {
return nil, err
}
schemas := []types.M{}
for rows.Next() {
var clsName string
var sch types.M
var v []byte
err := rows.Scan(&clsName, &v)
if err != nil {
return nil, err
}
err = json.Unmarshal(v, &sch)
if err != nil {
return nil, err
}
sch["className"] = clsName
schemas = append(schemas, toParseSchema(sch))
}
return schemas, nil
}
// GetClass ...
func (p *PostgresAdapter) GetClass(className string) (types.M, error) {
err := p.ensureSchemaCollectionExists()
if err != nil {
return nil, err
}
qs := `SELECT "schema" FROM "_SCHEMA" WHERE "className"=$1`
rows, err := p.db.Query(qs, className)
if err != nil {
return nil, err
}
schema := types.M{}
if rows.Next() {
var v []byte
err = rows.Scan(&v)
if err != nil {
return nil, err
}
err = json.Unmarshal(v, &schema)
if err != nil {
return nil, err
}
} else {
return schema, nil
}
return toParseSchema(schema), nil
}
// DeleteObjectsByQuery 删除符合条件的所有对象
func (p *PostgresAdapter) DeleteObjectsByQuery(className string, schema, query types.M) error {
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return err
}
if len(query) == 0 {
where.pattern = "TRUE"
}
qs := fmt.Sprintf(`WITH deleted AS (DELETE FROM "%s" WHERE %s RETURNING *) SELECT count(*) FROM deleted`, className, where.pattern)
row := p.db.QueryRow(qs, where.values...)
var count int
err = row.Scan(&count)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return errs.E(errs.ObjectNotFound, "Object not found.")
}
}
return err
}
if count == 0 {
return errs.E(errs.ObjectNotFound, "Object not found.")
}
return nil
}
// Find ...
func (p *PostgresAdapter) Find(className string, schema, query, options types.M) ([]types.M, error) {
if schema == nil {
schema = types.M{}
}
if options == nil {
options = types.M{}
}
var hasLimit bool
var hasSkip bool
if _, ok := options["limit"]; ok {
hasLimit = true
}
if _, ok := options["skip"]; ok {
hasSkip = true
}
values := types.S{}
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return nil, err
}
values = append(values, where.values...)
var wherePattern string
var limitPattern string
var skipPattern string
if where.pattern != "" {
wherePattern = `WHERE ` + where.pattern
}
if hasLimit {
limitPattern = fmt.Sprintf(`LIMIT $%d`, len(values)+1)
values = append(values, options["limit"])
}
if hasSkip {
skipPattern = fmt.Sprintf(`OFFSET $%d`, len(values)+1)
values = append(values, options["skip"])
}
var sortPattern string
if _, ok := options["sort"]; ok {
if keys, ok := options["sort"].([]string); ok {
postgresSort := []string{}
for _, key := range keys {
var postgresKey string
if strings.HasPrefix(key, "-") {
key = key[1:]
postgresKey = fmt.Sprintf(`"%s" DESC`, key)
} else {
postgresKey = fmt.Sprintf(`"%s" ASC`, key)
}
postgresSort = append(postgresSort, postgresKey)
}
sorting := strings.Join(postgresSort, ",")
if len(postgresSort) > 0 {
sortPattern = fmt.Sprintf(`ORDER BY %s`, sorting)
}
}
}
if len(where.sorts) > 0 {
sortPattern = fmt.Sprintf(`ORDER BY %s`, strings.Join(where.sorts, ","))
}
columns := "*"
if _, ok := options["keys"]; ok {
if keys, ok := options["keys"].([]string); ok {
postgresKeys := []string{}
for _, key := range keys {
if key != "" {
postgresKeys = append(postgresKeys, fmt.Sprintf(`"%s"`, key))
}
}
if len(postgresKeys) > 0 {
columns = strings.Join(postgresKeys, ",")
}
}
}
qs := fmt.Sprintf(`SELECT %s FROM "%s" %s %s %s %s`, columns, className, wherePattern, sortPattern, limitPattern, skipPattern)
rows, err := p.db.Query(qs, values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return []types.M{}, nil
}
}
return nil, err
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
results := []types.M{}
var resultColumns []string
for rows.Next() {
if resultColumns == nil {
resultColumns, err = rows.Columns()
if err != nil {
return nil, err
}
}
resultValues := []*interface{}{}
values := types.S{}
for i := 0; i < len(resultColumns); i++ {
var v interface{}
resultValues = append(resultValues, &v)
values = append(values, &v)
}
err = rows.Scan(values...)
if err != nil {
return nil, err
}
object := types.M{}
for i, field := range resultColumns {
object[field] = *resultValues[i]
}
object, err = postgresObjectToParseObject(object, fields)
if err != nil {
return nil, err
}
results = append(results, object)
}
return results, nil
}
// Count ...
func (p *PostgresAdapter) Count(className string, schema, query types.M) (int, error) {
where, err := buildWhereClause(schema, query, 1)
if err != nil {
return 0, err
}
wherePattern := ""
if len(where.pattern) > 0 {
wherePattern = `WHERE ` + where.pattern
}
qs := fmt.Sprintf(`SELECT count(*) FROM "%s" %s`, className, wherePattern)
rows, err := p.db.Query(qs, where.values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresRelationDoesNotExistError {
return 0, nil
}
}
return 0, err
}
var count int
if rows.Next() {
err = rows.Scan(&count)
if err != nil {
return 0, nil
}
}
return count, nil
}
// UpdateObjectsByQuery ...
func (p *PostgresAdapter) UpdateObjectsByQuery(className string, schema, query, update types.M) error {
_, err := p.FindOneAndUpdate(className, schema, query, update)
return err
}
// FindOneAndUpdate ...
func (p *PostgresAdapter) FindOneAndUpdate(className string, schema, query, update types.M) (types.M, error) {
updatePatterns := []string{}
values := types.S{}
index := 1
if schema == nil {
schema = types.M{}
}
schema = toPostgresSchema(schema)
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
originalUpdate := utils.CopyMapM(update)
update = handleDotFields(update)
for fieldName, v := range update {
re := regexp.MustCompile(`^_auth_data_([a-zA-Z0-9_]+)$`)
authDataMatch := re.FindStringSubmatch(fieldName)
if authDataMatch != nil && len(authDataMatch) == 2 {
provider := authDataMatch[1]
delete(update, fieldName)
authData := utils.M(update["authData"])
if authData == nil {
authData = types.M{}
}
authData[provider] = v
update["authData"] = authData
}
}
for fieldName, fieldValue := range update {
if fieldValue == nil {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = NULL`, fieldName))
continue
}
if fieldName == "authData" {
generate := func(jsonb, key, value string) string {
return fmt.Sprintf(`json_object_set_key(COALESCE(%s, '{}'::jsonb), %s, %s)::jsonb`, jsonb, key, value)
}
lastKey := fmt.Sprintf(`"%s"`, fieldName)
authData := utils.M(fieldValue)
if authData == nil {
continue
}
for key, value := range authData {
lastKey = generate(lastKey, fmt.Sprintf(`$%d::text`, index), fmt.Sprintf(`$%d::jsonb`, index+1))
index = index + 2
if value != nil {
if v := utils.M(value); v != nil && utils.S(v["__op"]) == "Delete" {
value = nil
} else {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
value = string(b)
}
}
values = append(values, key, value)
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = %s`, fieldName, lastKey))
continue
}
if fieldName == "updatedAt" {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
continue
}
switch fieldValue.(type) {
case string, bool, float64, int:
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
continue
case time.Time:
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, utils.TimetoString(fieldValue.(time.Time)))
index = index + 1
continue
}
if object := utils.M(fieldValue); object != nil {
switch utils.S(object["__op"]) {
case "Increment":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = COALESCE("%s", 0) + $%d`, fieldName, fieldName, index))
values = append(values, object["amount"])
index = index + 1
continue
case "Add":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_add(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
case "Delete":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, nil)
index = index + 1
continue
case "Remove":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_remove(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
case "AddUnique":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = array_add_unique(COALESCE("%s", '[]'::jsonb), $%d::jsonb)`, fieldName, fieldName, index))
b, err := json.Marshal(object["objects"])
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
}
switch utils.S(object["__type"]) {
case "Pointer":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, object["objectId"])
index = index + 1
continue
case "Date", "File":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, toPostgresValue(object))
index = index + 1
continue
case "GeoPoint":
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = POINT($%d, $%d)`, fieldName, index, index+1))
values = append(values, object["longitude"], object["latitude"])
index = index + 2
continue
case "Relation":
continue
}
if tp := utils.M(fields[fieldName]); tp != nil && utils.S(tp["type"]) == "Object" {
keysToIncrement := []string{}
for k, v := range originalUpdate {
if o := utils.M(v); o != nil && utils.S(o["__op"]) == "Increment" {
if keys := strings.Split(k, "."); len(keys) == 2 && keys[0] == fieldName {
keysToIncrement = append(keysToIncrement, keys[1])
}
}
}
incrementPatterns := ""
if len(keysToIncrement) > 0 {
for _, key := range keysToIncrement {
increment := utils.M(object[key])
if increment == nil {
continue
}
var amount interface{}
switch increment["amount"].(type) {
case float64, int:
amount = increment["amount"]
}
if amount == nil {
continue
}
incrementPatterns += " || " + fmt.Sprintf(`CONCAT('{"%s":', COALESCE("%s"->>'%s', '0')::float + %v, '}')::jsonb`, key, fieldName, key, amount)
}
for _, key := range keysToIncrement {
delete(object, key)
}
}
keysToDelete := []string{}
for k, v := range originalUpdate {
if o := utils.M(v); o != nil && utils.S(o["__op"]) == "Delete" {
if keys := strings.Split(k, "."); len(keys) == 2 && keys[0] == fieldName {
keysToDelete = append(keysToDelete, keys[1])
}
}
}
deletePatterns := ""
for _, k := range keysToDelete {
deletePatterns = deletePatterns + fmt.Sprintf(` - '%s'`, k)
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = ( COALESCE("%s", '{}'::jsonb) %s %s || $%d::jsonb )`, fieldName, fieldName, deletePatterns, incrementPatterns, index))
b, err := json.Marshal(object)
if err != nil {
return nil, err
}
values = append(values, string(b))
index = index + 1
continue
}
}
if array := utils.A(fieldValue); array != nil {
if tp := utils.M(fields[fieldName]); tp != nil && utils.S(tp["type"]) == "Array" {
expectedType, err := parseTypeToPostgresType(tp)
if err != nil {
return nil, err
}
b, err := json.Marshal(fieldValue)
if err != nil {
return nil, err
}
if expectedType == "text[]" {
// '[' => '{'
if b[0] == 91 {
b[0] = 123
}
// ']' => '}'
if len(b) > 0 && b[len(b)-1] == 93 {
b[len(b)-1] = 125
}
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d::text[]`, fieldName, index))
} else {
updatePatterns = append(updatePatterns, fmt.Sprintf(`"%s" = $%d::jsonb`, fieldName, index))
}
values = append(values, string(b))
index = index + 1
continue
}
}
b, _ := json.Marshal(fieldValue)
return nil, errs.E(errs.OperationForbidden, "Postgres doesn't support update "+string(b)+" yet")
}
where, err := buildWhereClause(schema, query, index)
if err != nil {
return nil, err
}
values = append(values, where.values...)
// TODO 需要添加限制,只更新一条,UpdateObjectsByQuery 时更新多条
qs := fmt.Sprintf(`UPDATE "%s" SET %s WHERE %s RETURNING *`, className, strings.Join(updatePatterns, ","), where.pattern)
rows, err := p.db.Query(qs, values...)
if err != nil {
if e, ok := err.(*pq.Error); ok {
// 表不存在返回空
if e.Code == postgresRelationDoesNotExistError {
return nil, errs.E(errs.ObjectNotFound, "Object not found.")
}
}
return nil, err
}
object := types.M{}
if rows.Next() {
resultColumns, err := rows.Columns()
if err != nil {
return nil, err
}
resultValues := []*interface{}{}
values := types.S{}
for i := 0; i < len(resultColumns); i++ {
var v interface{}
resultValues = append(resultValues, &v)
values = append(values, &v)
}
err = rows.Scan(values...)
if err != nil {
return nil, err
}
for i, field := range resultColumns {
object[field] = *resultValues[i]
}
object, err = postgresObjectToParseObject(object, fields)
if err != nil {
return nil, err
}
}
return object, nil
}
// UpsertOneObject 仅用于 config 和 hooks
func (p *PostgresAdapter) UpsertOneObject(className string, schema, query, update types.M) error {
object, err := p.FindOneAndUpdate(className, schema, query, update)
if err != nil {
return err
}
if len(object) == 0 {
createValue := types.M{}
for k, v := range query {
createValue[k] = v
}
for k, v := range update {
createValue[k] = v
}
err = p.CreateObject(className, schema, createValue)
if err != nil {
return err
}
}
return nil
}
// EnsureUniqueness 创建索引
func (p *PostgresAdapter) EnsureUniqueness(className string, schema types.M, fieldNames []string) error {
sort.Sort(sort.StringSlice(fieldNames))
constraintName := `unique_` + strings.Join(fieldNames, "_")
constraintPatterns := []string{}
for _, fieldName := range fieldNames {
constraintPatterns = append(constraintPatterns, `"`+fieldName+`"`)
}
qs := fmt.Sprintf(`ALTER TABLE "%s" ADD CONSTRAINT "%s" UNIQUE (%s)`, className, constraintName, strings.Join(constraintPatterns, ","))
_, err := p.db.Exec(qs)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code == postgresDuplicateRelationError && strings.Contains(e.Message, constraintName) {
// 索引已存在,忽略错误
} else if e.Code == postgresUniqueIndexViolationError && strings.Contains(e.Message, constraintName) {
return errs.E(errs.DuplicateValue, "A duplicate value for a field with unique values was provided")
}
} else {
return err
}
}
return nil
}
// PerformInitialization ...
func (p *PostgresAdapter) PerformInitialization(options types.M) error {
if options == nil {
options = types.M{}
}
if volatileClassesSchemas, ok := options["VolatileClassesSchemas"].([]types.M); ok {
for _, schema := range volatileClassesSchemas {
err := p.createTable(utils.S(schema["className"]), schema)
if err != nil {
if e, ok := err.(*pq.Error); ok {
if e.Code != postgresDuplicateRelationError {
return err
}
} else if e, ok := err.(*errs.TomatoError); ok {
if e.Code != errs.InvalidClassName {
return err
}
} else {
return err
}
}
}
}
_, err := p.db.Exec(jsonObjectSetKey)
if err != nil {
return err
}
_, err = p.db.Exec(arrayAdd)
if err != nil {
return err
}
_, err = p.db.Exec(arrayAddUnique)
if err != nil {
return err
}
_, err = p.db.Exec(arrayRemove)
if err != nil {
return err
}
_, err = p.db.Exec(arrayContainsAll)
if err != nil {
return err
}
_, err = p.db.Exec(arrayContains)
if err != nil {
return err
}
return nil
}
func postgresObjectToParseObject(object, fields types.M) (types.M, error) {
if len(object) == 0 {
return object, nil
}
for fieldName, v := range fields {
tp := utils.M(v)
if tp == nil {
continue
}
objectType := utils.S(tp["type"])
if objectType == "Pointer" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = types.M{
"objectId": string(v),
"__type": "Pointer",
"className": tp["targetClass"],
}
} else {
object[fieldName] = nil
}
} else if objectType == "Relation" {
object[fieldName] = types.M{
"__type": "Relation",
"className": tp["targetClass"],
}
} else if objectType == "GeoPoint" && object[fieldName] != nil {
// object[fieldName] = (10,20) (longitude, latitude)
resString := ""
if v, ok := object[fieldName].([]byte); ok {
resString = string(v)
}
if len(resString) < 5 {
object[fieldName] = nil
continue
}
pointString := strings.Split(resString[1:len(resString)-1], ",")
if len(pointString) != 2 {
object[fieldName] = nil
continue
}
longitude, err := strconv.ParseFloat(pointString[0], 64)
if err != nil {
return nil, err
}
latitude, err := strconv.ParseFloat(pointString[1], 64)
if err != nil {
return nil, err
}
object[fieldName] = types.M{
"__type": "GeoPoint",
"longitude": longitude,
"latitude": latitude,
}
} else if objectType == "File" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = types.M{
"__type": "File",
"name": string(v),
}
} else {
object[fieldName] = nil
}
} else if objectType == "String" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
object[fieldName] = string(v)
} else {
object[fieldName] = nil
}
} else if objectType == "Object" && object[fieldName] != nil {
if v, ok := object[fieldName].([]byte); ok {
var r types.M
err := json.Unmarshal(v, &r)
if err != nil {
return nil, err
}
object[fieldName] = r
} else {
object[fieldName] = nil
}
} else if objectType == "Array" && object[fieldName] != nil {
if fieldName == "_rperm" || fieldName == "_wperm" {
continue
}
if v, ok := object[fieldName].([]byte); ok {
var r types.S
err := json.Unmarshal(v, &r)
if err != nil {
return nil, err
}
object[fieldName] = r
} else {
object[fieldName] = nil
}
}
}
if object["_rperm"] != nil {
// object["_rperm"] = {hello,world}
// 在添加 _rperm 时已保证值里不含 ','
resString := ""
if v, ok := object["_rperm"].([]byte); ok {
resString = string(v)
}
if len(resString) < 2 {
object["_rperm"] = nil
} else {
keys := strings.Split(resString[1:len(resString)-1], ",")
rperm := make(types.S, len(keys))
for i, k := range keys {
rperm[i] = k
}
object["_rperm"] = rperm
}
}
if object["_wperm"] != nil {
// object["_wperm"] = {hello,world}
// 在添加 _wperm 时已保证值里不含 ','
resString := ""
if v, ok := object["_wperm"].([]byte); ok {
resString = string(v)
}
if len(resString) < 2 {
object["_wperm"] = nil
} else {
keys := strings.Split(resString[1:len(resString)-1], ",")
wperm := make(types.S, len(keys))
for i, k := range keys {
wperm[i] = k
}
object["_wperm"] = wperm
}
}
if object["createdAt"] != nil {
if v, ok := object["createdAt"].(time.Time); ok {
object["createdAt"] = utils.TimetoString(v)
} else {
object["createdAt"] = nil
}
}
if object["updatedAt"] != nil {
if v, ok := object["updatedAt"].(time.Time); ok {
object["updatedAt"] = utils.TimetoString(v)
} else {
object["updatedAt"] = nil
}
}
if object["expiresAt"] != nil {
object["expiresAt"] = valueToDate(object["expiresAt"])
}
if object["_email_verify_token_expires_at"] != nil {
object["_email_verify_token_expires_at"] = valueToDate(object["_email_verify_token_expires_at"])
}
if object["_account_lockout_expires_at"] != nil {
object["_account_lockout_expires_at"] = valueToDate(object["_account_lockout_expires_at"])
}
if object["_perishable_token_expires_at"] != nil {
object["_perishable_token_expires_at"] = valueToDate(object["_perishable_token_expires_at"])
}
if object["_password_changed_at"] != nil {
object["_password_changed_at"] = valueToDate(object["_password_changed_at"])
}
for fieldName := range object {
if object[fieldName] == nil {
delete(object, fieldName)
}
if v, ok := object[fieldName].(time.Time); ok {
object[fieldName] = types.M{
"__type": "Date",
"iso": utils.TimetoString(v),
}
}
}
return object, nil
}
var parseToPosgresComparator = map[string]string{
"$gt": ">",
"$lt": "<",
"$gte": ">=",
"$lte": "<=",
}
func parseTypeToPostgresType(t types.M) (string, error) {
if t == nil {
return "", nil
}
tp := utils.S(t["type"])
switch tp {
case "String":
return "text", nil
case "Date":
return "timestamp with time zone", nil
case "Object":
return "jsonb", nil
case "File":
return "text", nil
case "Boolean":
return "boolean", nil
case "Pointer":
return "char(24)", nil
case "Number":
return "double precision", nil
case "GeoPoint":
return "point", nil
case "Array":
if contents := utils.M(t["contents"]); contents != nil {
if utils.S(contents["type"]) == "String" {
return "text[]", nil
}
}
return "jsonb", nil
default:
return "", errs.E(errs.IncorrectType, "no type for "+tp+" yet")
}
}
func toPostgresValue(value interface{}) interface{} {
if v := utils.M(value); v != nil {
if utils.S(v["__type"]) == "Date" {
return v["iso"]
}
if utils.S(v["__type"]) == "File" {
return v["name"]
}
}
return value
}
func transformValue(value interface{}) interface{} {
if v := utils.M(value); v != nil {
if utils.S(v["__type"]) == "Pointer" {
return v["objectId"]
}
}
return value
}
var emptyCLPS = types.M{
"find": types.M{},
"get": types.M{},
"create": types.M{},
"update": types.M{},
"delete": types.M{},
"addField": types.M{},
}
var defaultCLPS = types.M{
"find": types.M{"*": true},
"get": types.M{"*": true},
"create": types.M{"*": true},
"update": types.M{"*": true},
"delete": types.M{"*": true},
"addField": types.M{"*": true},
}
func toParseSchema(schema types.M) types.M {
if schema == nil {
return nil
}
var fields types.M
if fields = utils.M(schema["fields"]); fields == nil {
fields = types.M{}
}
if utils.S(schema["className"]) == "_User" {
if _, ok := fields["_hashed_password"]; ok {
delete(fields, "_hashed_password")
}
}
if _, ok := fields["_wperm"]; ok {
delete(fields, "_wperm")
}
if _, ok := fields["_rperm"]; ok {
delete(fields, "_rperm")
}
var clps types.M
clps = utils.CopyMap(defaultCLPS)
if classLevelPermissions := utils.M(schema["classLevelPermissions"]); classLevelPermissions != nil {
// clps = utils.CopyMap(emptyCLPS)
// 不存在的 action 默认为公共权限
for k, v := range classLevelPermissions {
clps[k] = v
}
}
return types.M{
"className": schema["className"],
"fields": fields,
"classLevelPermissions": clps,
}
}
func toPostgresSchema(schema types.M) types.M {
if schema == nil {
return nil
}
var fields types.M
if fields = utils.M(schema["fields"]); fields == nil {
fields = types.M{}
}
fields["_wperm"] = types.M{
"type": "Array",
"contents": types.M{"type": "String"},
}
fields["_rperm"] = types.M{
"type": "Array",
"contents": types.M{"type": "String"},
}
if utils.S(schema["className"]) == "_User" {
fields["_hashed_password"] = types.M{"type": "String"}
fields["_password_history"] = types.M{"type": "Array"}
}
schema["fields"] = fields
return schema
}
func handleDotFields(object types.M) types.M {
for fieldName := range object {
if strings.Index(fieldName, ".") == -1 {
continue
}
components := strings.Split(fieldName, ".")
value := object[fieldName]
if v := utils.M(value); v != nil {
if utils.S(v["__op"]) == "Delete" {
value = nil
}
}
currentObj := object
for i, next := range components {
if i == (len(components) - 1) {
if value != nil {
currentObj[next] = value
}
break
}
obj := currentObj[next]
if obj == nil {
obj = types.M{}
currentObj[next] = obj
}
currentObj = utils.M(currentObj[next])
}
delete(object, fieldName)
}
return object
}
func validateKeys(object interface{}) error {
if obj := utils.M(object); obj != nil {
for key, value := range obj {
err := validateKeys(value)
if err != nil {
return err
}
if strings.Contains(key, "$") || strings.Contains(key, ".") {
return errs.E(errs.InvalidNestedKey, "Nested keys should not contain the '$' or '.' characters")
}
}
}
return nil
}
func joinTablesForSchema(schema types.M) []string {
list := []string{}
if schema != nil {
if fields := utils.M(schema["fields"]); fields != nil {
className := utils.S(schema["className"])
for field, v := range fields {
if tp := utils.M(v); tp != nil {
if utils.S(tp["type"]) == "Relation" {
list = append(list, "_Join:"+field+":"+className)
}
}
}
}
}
return list
}
type whereClause struct {
pattern string
values types.S
sorts []string
}
func buildWhereClause(schema, query types.M, index int) (*whereClause, error) {
patterns := []string{}
values := types.S{}
sorts := []string{}
schema = toPostgresSchema(schema)
if schema == nil {
schema = types.M{}
}
fields := utils.M(schema["fields"])
if fields == nil {
fields = types.M{}
}
for fieldName, fieldValue := range query {
isArrayField := false
if fields != nil {
if tp := utils.M(fields[fieldName]); tp != nil {
if utils.S(tp["type"]) == "Array" {
isArrayField = true
}
}
}
initialPatternsLength := len(patterns)
if fields[fieldName] == nil {
if v := utils.M(fieldValue); v != nil {
if b, ok := v["$exists"].(bool); ok && b == false {
continue
}
}
}
if strings.Contains(fieldName, ".") {
components := strings.Split(fieldName, ".")
for index, cmpt := range components {
if index == 0 {
components[index] = `"` + cmpt + `"`
} else {
components[index] = `'` + cmpt + `'`
}
}
name := strings.Join(components, "->")
b, err := json.Marshal(fieldValue)
if err != nil {
return nil, err
}
patterns = append(patterns, fmt.Sprintf(`%s = '%v'`, name, string(b)))
} else if _, ok := fieldValue.(string); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(bool); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(float64); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if _, ok := fieldValue.(int); ok {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, fieldValue)
index = index + 1
} else if fieldName == "$or" || fieldName == "$and" {
clauses := []string{}
clauseValues := types.S{}
if array := utils.A(fieldValue); array != nil {
for _, v := range array {
if subQuery := utils.M(v); subQuery != nil {
clause, err := buildWhereClause(schema, subQuery, index)
if err != nil {
return nil, err
}
if len(clause.pattern) > 0 {
clauses = append(clauses, clause.pattern)
clauseValues = append(clauseValues, clause.values...)
index = index + len(clause.values)
}
}
}
}
var orOrAnd string
if fieldName == "$or" {
orOrAnd = " OR "
} else {
orOrAnd = " AND "
}
patterns = append(patterns, fmt.Sprintf(`(%s)`, strings.Join(clauses, orOrAnd)))
values = append(values, clauseValues...)
}
if value := utils.M(fieldValue); value != nil {
if v, ok := value["$ne"]; ok {
if isArrayField {
j, _ := json.Marshal(types.S{v})
value["$ne"] = string(j)
patterns = append(patterns, fmt.Sprintf(`NOT array_contains("%s", $%d)`, fieldName, index))
values = append(values, value["$ne"])
index = index + 1
} else {
if v == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NOT NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`("%s" <> $%d OR "%s" IS NULL)`, fieldName, index, fieldName))
values = append(values, value["$ne"])
index = index + 1
}
}
}
if v, ok := value["$eq"]; ok {
if v == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, v)
index = index + 1
}
}
inArray := utils.A(value["$in"])
ninArray := utils.A(value["$nin"])
isInOrNin := (inArray != nil) || (ninArray != nil)
isTypeString := false
if tp := utils.M(fields[fieldName]); tp != nil {
if contents := utils.M(tp["contents"]); contents != nil {
if utils.S(contents["type"]) == "String" {
isTypeString = true
}
}
}
if inArray != nil && isArrayField && isTypeString {
inPatterns := []string{}
allowNull := false
for listIndex, listElem := range inArray {
if listElem == nil {
allowNull = true
} else {
values = append(values, listElem)
i := 0
if allowNull {
i = index + listIndex - 1
} else {
i = index + listIndex
}
inPatterns = append(inPatterns, fmt.Sprintf("$%d", i))
}
}
if allowNull {
patterns = append(patterns, fmt.Sprintf(`("%s" IS NULL OR "%s" && ARRAY[%s])`, fieldName, fieldName, strings.Join(inPatterns, ",")))
} else {
patterns = append(patterns, fmt.Sprintf(`("%s" && ARRAY[%s])`, fieldName, strings.Join(inPatterns, ",")))
}
index = index + len(inPatterns)
} else if isInOrNin {
createConstraint := func(baseArray types.S, notIn bool) {
if len(baseArray) > 0 {
not := ""
if notIn {
not = " NOT "
}
if isArrayField {
patterns = append(patterns, fmt.Sprintf(`%s array_contains("%s", $%d)`, not, fieldName, index))
j, _ := json.Marshal(baseArray)
values = append(values, string(j))
index = index + 1
} else {
inPatterns := []string{}
for listIndex, listElem := range baseArray {
values = append(values, listElem)
inPatterns = append(inPatterns, fmt.Sprintf("$%d", index+listIndex))
}
patterns = append(patterns, fmt.Sprintf(`"%s" %s IN (%s)`, fieldName, not, strings.Join(inPatterns, ",")))
index = index + len(inPatterns)
}
} else if !notIn {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
}
if inArray != nil {
createConstraint(inArray, false)
}
if ninArray != nil {
createConstraint(ninArray, true)
}
}
allArray := utils.A(value["$all"])
if allArray != nil && isArrayField {
patterns = append(patterns, fmt.Sprintf(`array_contains_all("%s", $%d::jsonb)`, fieldName, index))
j, _ := json.Marshal(allArray)
values = append(values, string(j))
index = index + 1
}
if b, ok := value["$exists"].(bool); ok {
if b {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NOT NULL`, fieldName))
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
}
if point := utils.M(value["$nearSphere"]); point != nil {
var distance float64
if v, ok := value["$maxDistance"].(float64); ok {
distance = v
}
distanceInKM := distance * 6371 * 1000
patterns = append(patterns, fmt.Sprintf(`ST_distance_sphere("%s"::geometry, POINT($%d, $%d)::geometry) <= $%d`, fieldName, index, index+1, index+2))
sorts = append(sorts, fmt.Sprintf(`ST_distance_sphere("%s"::geometry, POINT($%d, $%d)::geometry) ASC`, fieldName, index, index+1))
values = append(values, point["longitude"], point["latitude"], distanceInKM)
index = index + 3
}
if within := utils.M(value["$within"]); within != nil {
if box := utils.A(within["$box"]); len(box) == 2 {
box1 := utils.M(box[0])
box2 := utils.M(box[1])
if box1 != nil && box2 != nil {
left := box1["longitude"]
bottom := box1["latitude"]
right := box2["longitude"]
top := box2["latitude"]
patterns = append(patterns, fmt.Sprintf(`"%s"::point <@ $%d::box`, fieldName, index))
values = append(values, fmt.Sprintf("((%v, %v), (%v, %v))", left, bottom, right, top))
index = index + 1
}
}
}
if regex := utils.S(value["$regex"]); regex != "" {
operator := "~"
opts := utils.S(value["$options"])
if opts != "" {
if strings.Contains(opts, "i") {
operator = "~*"
}
if strings.Contains(opts, "x") {
regex = removeWhiteSpace(regex)
}
}
regex = processRegexPattern(regex)
patterns = append(patterns, fmt.Sprintf(`"%s" %s '%s'`, fieldName, operator, regex))
}
if utils.S(value["__type"]) == "Pointer" {
if isArrayField {
patterns = append(patterns, fmt.Sprintf(`array_contains("%s", $%d)`, fieldName, index))
j, _ := json.Marshal(types.S{value})
values = append(values, string(j))
index = index + 1
} else {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, value["objectId"])
index = index + 1
}
}
if utils.S(value["__type"]) == "Date" {
patterns = append(patterns, fmt.Sprintf(`"%s" = $%d`, fieldName, index))
values = append(values, value["iso"])
index = index + 1
}
for cmp, pgComparator := range parseToPosgresComparator {
if v, ok := value[cmp]; ok {
patterns = append(patterns, fmt.Sprintf(`"%s" %s $%d`, fieldName, pgComparator, index))
values = append(values, toPostgresValue(v))
index = index + 1
}
}
}
if fieldValue == nil {
patterns = append(patterns, fmt.Sprintf(`"%s" IS NULL`, fieldName))
}
if initialPatternsLength == len(patterns) {
s, _ := json.Marshal(fieldValue)
return nil, errs.E(errs.OperationForbidden, "Postgres doesn't support this query type yet "+string(s))
}
}
for i, v := range values {
values[i] = transformValue(v)
}
return &whereClause{strings.Join(patterns, " AND "), values, sorts}, nil
}
func removeWhiteSpace(s string) string {
if strings.HasSuffix(s, "\n") == false {
s = s + "\n"
}
re := regexp.MustCompile(`(?im)^#.*\n`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`(?im)([^\\])#.*\n`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`(?im)([^\\])\s+`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`^\s+`)
s = re.ReplaceAllString(s, "")
s = strings.TrimSpace(s)
return s
}
func processRegexPattern(s string) string {
if strings.HasPrefix(s, "^") {
return "^" + literalizeRegexPart(s[1:])
} else if strings.HasSuffix(s, "$") {
return literalizeRegexPart(s[:len(s)-1]) + "$"
}
return literalizeRegexPart(s)
}
func createLiteralRegex(s string) string {
chars := strings.Split(s, "")
for i, c := range chars {
if m, _ := regexp.MatchString(`[0-9a-zA-Z]`, c); m == false {
if c == `'` {
chars[i] = `''`
} else {
chars[i] = `\` + c
}
}
}
return strings.Join(chars, "")
}
func literalizeRegexPart(s string) string {
// go 不支持 (?!) 语法,需要进行等价替换
// /\\Q((?!\\E).*)\\E$/
// /\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)\\E$/
matcher1 := regexp.MustCompile(`\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)\\E$`)
result1 := matcher1.FindStringSubmatch(s)
if len(result1) > 1 {
index := strings.Index(s, result1[0])
prefix := s[:index]
remaining := result1[1]
return literalizeRegexPart(prefix) + createLiteralRegex(remaining)
}
// /\\Q((?!\\E).*)$/
// /\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)$/
matcher2 := regexp.MustCompile(`\\Q(\\[^E\n\r].*|[^\\\n\r].*|.??)$`)
result2 := matcher2.FindStringSubmatch(s)
if len(result2) > 1 {
index := strings.Index(s, result2[0])
prefix := s[:index]
remaining := result2[1]
return literalizeRegexPart(prefix) + createLiteralRegex(remaining)
}
re := regexp.MustCompile(`([^\\])(\\E)`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`([^\\])(\\Q)`)
s = re.ReplaceAllString(s, "$1")
re = regexp.MustCompile(`^\\E`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`^\\Q`)
s = re.ReplaceAllString(s, "")
re = regexp.MustCompile(`([^'])'`)
s = re.ReplaceAllString(s, "$1''")
re = regexp.MustCompile(`^'([^'])`)
s = re.ReplaceAllString(s, "''$1")
return s
}
func valueToDate(v interface{}) types.M {
if v, ok := v.(time.Time); ok {
return types.M{
"__type": "Date",
"iso": utils.TimetoString(v),
}
}
return nil
}
|
package cmds
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/user"
"path"
"path/filepath"
"strings"
units "github.com/docker/go-units"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
pachdclient "github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsutil"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
"github.com/pachyderm/pachyderm/src/server/pkg/uuid"
"github.com/pachyderm/pachyderm/src/server/pps/pretty"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
const (
codestart = "```sh"
codeend = "```"
termHeight = 24
)
// Cmds returns a slice containing pps commands.
func Cmds(noMetrics *bool) ([]*cobra.Command, error) {
metrics := !*noMetrics
raw := false
rawFlag := func(cmd *cobra.Command) {
cmd.Flags().BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
}
marshaller := &jsonpb.Marshaler{
Indent: " ",
OrigName: true,
}
job := &cobra.Command{
Use: "job",
Short: "Docs for jobs.",
Long: `Jobs are the basic unit of computation in Pachyderm.
Jobs run a containerized workload over a set of finished input commits.
Creating a job will also create a new repo and a commit in that repo which
contains the output of the job. Unless the job is created with another job as a
parent. If the job is created with a parent it will use the same repo as its
parent job and the commit it creates will use the parent job's commit as a
parent.
If the job fails the commit it creates will not be finished.
The increase the throughput of a job increase the Shard paremeter.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
pipelineSpec := "[Pipeline Specification](../reference/pipeline_spec.html)"
var block bool
inspectJob := &cobra.Command{
Use: "inspect-job job-id",
Short: "Return info about a job.",
Long: "Return info about a job.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
jobInfo, err := client.InspectJob(args[0], block)
if err != nil {
cmdutil.ErrorAndExit("error from InspectJob: %s", err.Error())
}
if jobInfo == nil {
cmdutil.ErrorAndExit("job %s not found.", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, jobInfo)
}
return pretty.PrintDetailedJobInfo(jobInfo)
}),
}
inspectJob.Flags().BoolVarP(&block, "block", "b", false, "block until the job has either succeeded or failed")
rawFlag(inspectJob)
var pipelineName string
var outputCommitStr string
var inputCommitStrs []string
listJob := &cobra.Command{
Use: "list-job [commits]",
Short: "Return info about jobs.",
Long: `Return info about jobs.
Examples:
` + codestart + `# return all jobs
$ pachctl list-job
# return all jobs in pipeline foo
$ pachctl list-job -p foo
# return all jobs whose input commits include foo/XXX and bar/YYY
$ pachctl list-job foo/XXX bar/YYY
# return all jobs in pipeline foo and whose input commits include bar/YYY
$ pachctl list-job -p foo bar/YYY
` + codeend,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
commits, err := cmdutil.ParseCommits(inputCommitStrs)
if err != nil {
return err
}
var outputCommit *pfs.Commit
if outputCommitStr != "" {
outputCommits, err := cmdutil.ParseCommits([]string{outputCommitStr})
if err != nil {
return err
}
if len(outputCommits) == 1 {
outputCommit = outputCommits[0]
}
}
if raw {
if err := client.ListJobF(pipelineName, commits, outputCommit, func(ji *ppsclient.JobInfo) error {
if err := marshaller.Marshal(os.Stdout, ji); err != nil {
return err
}
return nil
return nil
}); err != nil {
return err
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.JobHeader)
if err := client.ListJobF(pipelineName, commits, outputCommit, func(ji *ppsclient.JobInfo) error {
pretty.PrintJobInfo(writer, ji)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listJob.Flags().StringVarP(&pipelineName, "pipeline", "p", "", "Limit to jobs made by pipeline.")
listJob.Flags().StringVarP(&outputCommitStr, "output", "o", "", "List jobs with a specific output commit.")
listJob.Flags().StringSliceVarP(&inputCommitStrs, "input", "i", []string{}, "List jobs with a specific set of input commits.")
rawFlag(listJob)
var pipelines cmdutil.RepeatedStringArg
flushJob := &cobra.Command{
Use: "flush-job commit [commit ...]",
Short: "Wait for all jobs caused by the specified commits to finish and return them.",
Long: `Wait for all jobs caused by the specified commits to finish and return them.
Examples:
` + codestart + `# return jobs caused by foo/XXX and bar/YYY
$ pachctl flush-job foo/XXX bar/YYY
# return jobs caused by foo/XXX leading to pipelines bar and baz
$ pachctl flush-job foo/XXX -p bar -p baz
` + codeend,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer c.Close()
jobInfos, err := c.FlushJobAll(commits, pipelines)
if err != nil {
return err
}
if raw {
for _, jobInfo := range jobInfos {
if err := marshaller.Marshal(os.Stdout, jobInfo); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.JobHeader)
for _, jobInfo := range jobInfos {
pretty.PrintJobInfo(writer, jobInfo)
}
return writer.Flush()
}),
}
flushJob.Flags().VarP(&pipelines, "pipeline", "p", "Wait only for jobs leading to a specific set of pipelines")
rawFlag(flushJob)
deleteJob := &cobra.Command{
Use: "delete-job job-id",
Short: "Delete a job.",
Long: "Delete a job.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.DeleteJob(args[0]); err != nil {
cmdutil.ErrorAndExit("error from DeleteJob: %s", err.Error())
}
return nil
}),
}
stopJob := &cobra.Command{
Use: "stop-job job-id",
Short: "Stop a job.",
Long: "Stop a job. The job will be stopped immediately.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StopJob(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StopJob: %s", err.Error())
}
return nil
}),
}
restartDatum := &cobra.Command{
Use: "restart-datum job-id datum-path1,datum-path2",
Short: "Restart a datum.",
Long: "Restart a datum.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
datumFilter := strings.Split(args[1], ",")
for i := 0; i < len(datumFilter); {
if len(datumFilter[i]) == 0 {
if i+1 < len(datumFilter) {
copy(datumFilter[i:], datumFilter[i+1:])
}
datumFilter = datumFilter[:len(datumFilter)-1]
} else {
i++
}
}
return client.RestartDatum(args[0], datumFilter)
}),
}
var pageSize int64
var page int64
listDatum := &cobra.Command{
Use: "list-datum job-id",
Short: "Return the datums in a job.",
Long: "Return the datums in a job.",
Run: cmdutil.RunBoundedArgs(1, 1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if pageSize < 0 {
return fmt.Errorf("pageSize must be zero or positive")
}
if page < 0 {
return fmt.Errorf("page must be zero or positive")
}
if raw {
if err := client.ListDatumF(args[0], pageSize, page, func(di *ppsclient.DatumInfo) error {
return marshaller.Marshal(os.Stdout, di)
}); err != nil {
return err
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.DatumHeader)
if err := client.ListDatumF(args[0], pageSize, page, func(di *ppsclient.DatumInfo) error {
pretty.PrintDatumInfo(writer, di)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
rawFlag(listDatum)
listDatum.Flags().Int64Var(&pageSize, "pageSize", 0, "Specify the number of results sent back in a single page")
listDatum.Flags().Int64Var(&page, "page", 0, "Specify the page of results to send")
inspectDatum := &cobra.Command{
Use: "inspect-datum job-id datum-id",
Short: "Display detailed info about a single datum.",
Long: "Display detailed info about a single datum.",
Run: cmdutil.RunBoundedArgs(2, 2, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
datumInfo, err := client.InspectDatum(args[0], args[1])
if err != nil {
return err
}
if raw {
return marshaller.Marshal(os.Stdout, datumInfo)
}
pretty.PrintDetailedDatumInfo(os.Stdout, datumInfo)
return nil
}),
}
rawFlag(inspectDatum)
var (
jobID string
datumID string
commaInputs string // comma-separated list of input files of interest
master bool
follow bool
tail int64
)
getLogs := &cobra.Command{
Use: "get-logs [--pipeline=<pipeline>|--job=<job id>] [--datum=<datum id>]",
Short: "Return logs from a job.",
Long: `Return logs from a job.
Examples:
` + codestart + `# return logs emitted by recent jobs in the "filter" pipeline
$ pachctl get-logs --pipeline=filter
# return logs emitted by the job aedfa12aedf
$ pachctl get-logs --job=aedfa12aedf
# return logs emitted by the pipeline \"filter\" while processing /apple.txt and a file with the hash 123aef
$ pachctl get-logs --pipeline=filter --inputs=/apple.txt,123aef
` + codeend,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
// Break up comma-separated input paths, and filter out empty entries
data := strings.Split(commaInputs, ",")
for i := 0; i < len(data); {
if len(data[i]) == 0 {
if i+1 < len(data) {
copy(data[i:], data[i+1:])
}
data = data[:len(data)-1]
} else {
i++
}
}
// Issue RPC
marshaler := &jsonpb.Marshaler{}
iter := client.GetLogs(pipelineName, jobID, data, datumID, master, follow, tail)
for iter.Next() {
var messageStr string
if raw {
var err error
messageStr, err = marshaler.MarshalToString(iter.Message())
if err != nil {
fmt.Fprintf(os.Stderr, "error marshalling \"%v\": %s\n", iter.Message(), err)
}
fmt.Println(messageStr)
} else if iter.Message().User {
fmt.Println(iter.Message().Message)
} else if iter.Message().Master && master {
fmt.Println(iter.Message().Message)
} else if pipelineName == "" && jobID == "" {
fmt.Println(iter.Message().Message)
}
}
return iter.Err()
}),
}
getLogs.Flags().StringVarP(&pipelineName, "pipeline", "p", "", "Filter the log "+
"for lines from this pipeline (accepts pipeline name)")
getLogs.Flags().StringVar(&jobID, "job", "", "Filter for log lines from "+
"this job (accepts job ID)")
getLogs.Flags().StringVar(&datumID, "datum", "", "Filter for log lines for this datum (accepts datum ID)")
getLogs.Flags().StringVar(&commaInputs, "inputs", "", "Filter for log lines "+
"generated while processing these files (accepts PFS paths or file hashes)")
getLogs.Flags().BoolVar(&master, "master", false, "Return log messages from the master process (pipeline must be set).")
getLogs.Flags().BoolVar(&raw, "raw", false, "Return log messages verbatim from server.")
getLogs.Flags().BoolVarP(&follow, "follow", "f", false, "Follow logs as more are created.")
getLogs.Flags().Int64VarP(&tail, "tail", "t", 0, "Lines of recent logs to display.")
pipeline := &cobra.Command{
Use: "pipeline",
Short: "Docs for pipelines.",
Long: `Pipelines are a powerful abstraction for automating jobs.
Pipelines take a set of repos as inputs, rather than the set of commits that
jobs take. Pipelines then subscribe to commits on those repos and launches a job
to process each incoming commit.
Creating a pipeline will also create a repo of the same name.
All jobs created by a pipeline will create commits in the pipeline's repo.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var rebuild bool
var pushImages bool
var registry string
var username string
var password string
var pipelinePath string
createPipeline := &cobra.Command{
Use: "create-pipeline -f pipeline.json",
Short: "Create a new pipeline.",
Long: fmt.Sprintf("Create a new pipeline from a %s", pipelineSpec),
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
return pipelineHelper(metrics, false, rebuild, pushImages, registry, username, password, pipelinePath, false)
}),
}
createPipeline.Flags().StringVarP(&pipelinePath, "file", "f", "-", "The file containing the pipeline, it can be a url or local file. - reads from stdin.")
createPipeline.Flags().BoolVarP(&rebuild, "rebuild", "b", false, "If true, rebuild and push local docker images into the cluster registry.")
createPipeline.Flags().BoolVarP(&pushImages, "push-images", "p", false, "If true, push local docker images into the cluster registry.")
createPipeline.Flags().StringVarP(®istry, "registry", "r", "docker.io", "The registry to push images to.")
createPipeline.Flags().StringVarP(&username, "username", "u", "", "The username to push images as, defaults to your docker username.")
createPipeline.Flags().StringVarP(&password, "password", "", "", "Your password for the registry being pushed to.")
var reprocess bool
updatePipeline := &cobra.Command{
Use: "update-pipeline -f pipeline.json",
Short: "Update an existing Pachyderm pipeline.",
Long: fmt.Sprintf("Update a Pachyderm pipeline with a new %s", pipelineSpec),
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
return pipelineHelper(metrics, reprocess, rebuild, pushImages, registry, username, password, pipelinePath, true)
}),
}
updatePipeline.Flags().StringVarP(&pipelinePath, "file", "f", "-", "The file containing the pipeline, it can be a url or local file. - reads from stdin.")
updatePipeline.Flags().BoolVarP(&rebuild, "rebuild", "b", false, "If true, rebuild and push local docker images into the cluster registry.")
updatePipeline.Flags().BoolVarP(&pushImages, "push-images", "p", false, "If true, push local docker images into the cluster registry.")
updatePipeline.Flags().StringVarP(®istry, "registry", "r", "docker.io", "The registry to push images to.")
updatePipeline.Flags().StringVarP(&username, "username", "u", "", "The username to push images as, defaults to your OS username.")
updatePipeline.Flags().StringVarP(&password, "password", "", "", "Your password for the registry being pushed to.")
updatePipeline.Flags().BoolVar(&reprocess, "reprocess", false, "If true, reprocess datums that were already processed by previous version of the pipeline.")
inspectPipeline := &cobra.Command{
Use: "inspect-pipeline pipeline-name",
Short: "Return info about a pipeline.",
Long: "Return info about a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
pipelineInfo, err := client.InspectPipeline(args[0])
if err != nil {
return err
}
if pipelineInfo == nil {
return fmt.Errorf("pipeline %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, pipelineInfo)
}
return pretty.PrintDetailedPipelineInfo(pipelineInfo)
}),
}
rawFlag(inspectPipeline)
extractPipeline := &cobra.Command{
Use: "extract-pipeline pipeline-name",
Short: "Return the manifest used to create a pipeline.",
Long: "Return the manifest used to create a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
createPipelineRequest, err := client.ExtractPipeline(args[0])
if err != nil {
return err
}
return marshaller.Marshal(os.Stdout, createPipelineRequest)
}),
}
var editor string
editPipeline := &cobra.Command{
Use: "edit-pipeline pipeline-name",
Short: "Edit the manifest for a pipeline in your text editor.",
Long: "Edit the manifest for a pipeline in your text editor.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
createPipelineRequest, err := client.ExtractPipeline(args[0])
if err != nil {
return err
}
f, err := ioutil.TempFile("", args[0])
if err != nil {
return err
}
if err := marshaller.Marshal(f, createPipelineRequest); err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if editor == "" {
editor = os.Getenv("EDITOR")
}
if editor == "" {
editor = "vim"
}
if err := cmdutil.RunIO(cmdutil.IO{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}, editor, f.Name()); err != nil {
return err
}
cfgReader, err := ppsutil.NewPipelineManifestReader(f.Name())
if err != nil {
return err
}
request, err := cfgReader.NextCreatePipelineRequest()
if err != nil {
return err
}
if proto.Equal(createPipelineRequest, request) {
fmt.Println("Pipeline unchanged, no update will be performed.")
return nil
}
request.Update = true
request.Reprocess = reprocess
if request.Input.Atom != nil {
fmt.Fprintln(os.Stderr, "the `atom` input type is deprecated as of 1.8.1, please replace `atom` with `pfs`")
}
if _, err := client.PpsAPIClient.CreatePipeline(
client.Ctx(),
request,
); err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
editPipeline.Flags().BoolVar(&reprocess, "reprocess", false, "If true, reprocess datums that were already processed by previous version of the pipeline.")
editPipeline.Flags().StringVar(&editor, "editor", "", "Editor to use for modifying the manifest.")
var spec bool
listPipeline := &cobra.Command{
Use: "list-pipeline",
Short: "Return info about all pipelines.",
Long: "Return info about all pipelines.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
pipelineInfos, err := client.ListPipeline()
if err != nil {
return err
}
if raw {
for _, pipelineInfo := range pipelineInfos {
if err := marshaller.Marshal(os.Stdout, pipelineInfo); err != nil {
return err
}
}
return nil
}
if spec {
for _, pipelineInfo := range pipelineInfos {
if err := marshaller.Marshal(os.Stdout, ppsutil.PipelineReqFromInfo(pipelineInfo)); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.PipelineHeader)
for _, pipelineInfo := range pipelineInfos {
pretty.PrintPipelineInfo(writer, pipelineInfo)
}
return writer.Flush()
}),
}
rawFlag(listPipeline)
listPipeline.Flags().BoolVarP(&spec, "spec", "s", false, "Output create-pipeline compatibility specs.")
var all bool
var force bool
deletePipeline := &cobra.Command{
Use: "delete-pipeline pipeline-name",
Short: "Delete a pipeline.",
Long: "Delete a pipeline.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if len(args) > 0 && all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
if len(args) == 0 && !all {
return fmt.Errorf("either a pipeline name or the --all flag needs to be provided")
}
if all {
_, err = client.PpsAPIClient.DeletePipeline(
client.Ctx(),
&ppsclient.DeletePipelineRequest{
All: all,
Force: force,
})
} else {
err = client.DeletePipeline(args[0], force)
}
if err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
deletePipeline.Flags().BoolVar(&all, "all", false, "delete all pipelines")
deletePipeline.Flags().BoolVarP(&force, "force", "f", false, "delete the pipeline regardless of errors; use with care")
startPipeline := &cobra.Command{
Use: "start-pipeline pipeline-name",
Short: "Restart a stopped pipeline.",
Long: "Restart a stopped pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StartPipeline(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StartPipeline: %s", err.Error())
}
return nil
}),
}
stopPipeline := &cobra.Command{
Use: "stop-pipeline pipeline-name",
Short: "Stop a running pipeline.",
Long: "Stop a running pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StopPipeline(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StopPipeline: %s", err.Error())
}
return nil
}),
}
var memory string
garbageCollect := &cobra.Command{
Use: "garbage-collect",
Short: "Garbage collect unused data.",
Long: `Garbage collect unused data.
When a file/commit/repo is deleted, the data is not immediately removed from
the underlying storage system (e.g. S3) for performance and architectural
reasons. This is similar to how when you delete a file on your computer, the
file is not necessarily wiped from disk immediately.
To actually remove the data, you will need to manually invoke garbage
collection with "pachctl garbage-collect".
Currently "pachctl garbage-collect" can only be started when there are no
pipelines running. You also need to ensure that there's no ongoing "put-file".
Garbage collection puts the cluster into a readonly mode where no new jobs can
be created and no data can be added.
Pachyderm's garbage collection uses bloom filters to index live objects. This
means that some dead objects may erronously not be deleted during garbage
collection. The probability of this happening depends on how many objects you
have; at around 10M objects it starts to become likely with the default values.
To lower Pachyderm's error rate and make garbage-collection more comprehensive,
you can increase the amount of memory used for the bloom filters with the
--memory flag. The default value is 10MB.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
memoryBytes, err := units.RAMInBytes(memory)
if err != nil {
return err
}
return client.GarbageCollect(memoryBytes)
}),
}
garbageCollect.Flags().StringVarP(&memory, "memory", "m", "0", "The amount of memory to use during garbage collection. Default is 10MB.")
var result []*cobra.Command
result = append(result, job)
result = append(result, inspectJob)
result = append(result, listJob)
result = append(result, flushJob)
result = append(result, deleteJob)
result = append(result, stopJob)
result = append(result, restartDatum)
result = append(result, listDatum)
result = append(result, inspectDatum)
result = append(result, getLogs)
result = append(result, pipeline)
result = append(result, createPipeline)
result = append(result, updatePipeline)
result = append(result, inspectPipeline)
result = append(result, extractPipeline)
result = append(result, editPipeline)
result = append(result, listPipeline)
result = append(result, deletePipeline)
result = append(result, startPipeline)
result = append(result, stopPipeline)
result = append(result, garbageCollect)
return result, nil
}
func pipelineHelper(metrics bool, reprocess bool, rebuild bool, pushImages bool, registry string, username string, password string, pipelinePath string, update bool) error {
cfgReader, err := ppsutil.NewPipelineManifestReader(pipelinePath)
if err != nil {
return err
}
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
for {
request, err := cfgReader.NextCreatePipelineRequest()
if err == io.EOF {
break
} else if err != nil {
return err
}
if request.Input.Atom != nil {
fmt.Println("WARNING: The `atom` input type has been deprecated and will be removed in a future version. Please replace `atom` with `pfs`.")
}
if update {
request.Update = true
request.Reprocess = reprocess
}
if rebuild || pushImages {
if rebuild && pushImages {
fmt.Fprintln(os.Stderr, "`--push-images` is redundant, as it's already enabled with `--rebuild`")
}
dockerClient, authConfig, err := dockerConfig(registry, username, password)
if err != nil {
return err
}
repo, sourceTag := docker.ParseRepositoryTag(request.Transform.Image)
if sourceTag == "" {
sourceTag = "latest"
}
destTag := uuid.NewWithoutDashes()
if rebuild {
dockerfile := request.Transform.Dockerfile
if dockerfile == "" {
return fmt.Errorf("`dockerfile` must be specified in order to use `--rebuild`")
}
url, err := url.Parse(pipelinePath)
if pipelinePath == "-" || (err == nil && url.Scheme != "") {
return fmt.Errorf("`--rebuild` can only be used when the pipeline path is local")
}
absPath, err := filepath.Abs(pipelinePath)
if err != nil {
return fmt.Errorf("could not get absolute path to the pipeline path '%s': %s", pipelinePath, err)
}
contextDir := filepath.Dir(absPath)
err = buildImage(dockerClient, registry, repo, contextDir, dockerfile, destTag)
if err != nil {
return err
}
// Now that we've rebuilt into `destTag`, change the
// `sourceTag` to be the same so that the push will work with
// the right image
sourceTag = destTag
}
image, err := pushImage(dockerClient, authConfig, registry, repo, sourceTag, destTag)
if err != nil {
return err
}
request.Transform.Image = image
}
if _, err := client.PpsAPIClient.CreatePipeline(
client.Ctx(),
request,
); err != nil {
return grpcutil.ScrubGRPC(err)
}
}
return nil
}
// ByCreationTime is an implementation of sort.Interface which
// sorts pps job info by creation time, ascending.
type ByCreationTime []*ppsclient.JobInfo
func (arr ByCreationTime) Len() int { return len(arr) }
func (arr ByCreationTime) Swap(i, j int) { arr[i], arr[j] = arr[j], arr[i] }
func (arr ByCreationTime) Less(i, j int) bool {
if arr[i].Started == nil || arr[j].Started == nil {
return false
}
if arr[i].Started.Seconds < arr[j].Started.Seconds {
return true
} else if arr[i].Started.Seconds == arr[j].Started.Seconds {
return arr[i].Started.Nanos < arr[j].Started.Nanos
}
return false
}
func dockerConfig(registry string, username string, password string) (*docker.Client, docker.AuthConfiguration, error) {
var authConfig docker.AuthConfiguration
client, err := docker.NewClientFromEnv()
if err != nil {
err = fmt.Errorf("could not create a docker client from the environment: %s", err)
return nil, authConfig, err
}
if username != "" && password != "" {
authConfig = docker.AuthConfiguration{ServerAddress: registry}
authConfig.Username = username
authConfig.Password = password
} else {
authConfigs, err := docker.NewAuthConfigurationsFromDockerCfg()
if err != nil {
if isDockerUsingKeychain() {
err = fmt.Errorf("error parsing auth: %s; it looks like you may have a docker configuration not supported by the client library that we use; as a workaround, try specifying the `--username` and `--password` flags", err.Error())
return nil, authConfig, err
}
err = fmt.Errorf("error parsing auth: %s, try running `docker login`", err.Error())
return nil, authConfig, err
}
for _, _authConfig := range authConfigs.Configs {
serverAddress := _authConfig.ServerAddress
if strings.Contains(serverAddress, registry) {
authConfig = _authConfig
break
}
}
}
return client, authConfig, nil
}
// buildImage builds a new docker image as registry/user/repo.
func buildImage(client *docker.Client, registry string, repo string, contextDir string, dockerfile string, destTag string) error {
destImage := fmt.Sprintf("%s/%s:%s", registry, repo, destTag)
fmt.Printf("Building %s, this may take a while.\n", destImage)
err := client.BuildImage(docker.BuildImageOptions{
Name: destImage,
ContextDir: contextDir,
Dockerfile: dockerfile,
OutputStream: os.Stdout,
})
if err != nil {
return fmt.Errorf("could not build docker image: %s", err)
}
return nil
}
// pushImage pushes an image as registry/user/repo.
func pushImage(client *docker.Client, authConfig docker.AuthConfiguration, registry string, repo string, sourceTag string, destTag string) (string, error) {
fullRepo := fmt.Sprintf("%s/%s", registry, repo)
sourceImage := fmt.Sprintf("%s:%s", fullRepo, sourceTag)
destImage := fmt.Sprintf("%s:%s", fullRepo, destTag)
fmt.Printf("Tagging/pushing %s, this may take a while.\n", destImage)
if err := client.TagImage(sourceImage, docker.TagImageOptions{
Repo: fullRepo,
Tag: destTag,
Context: context.Background(),
}); err != nil {
err = fmt.Errorf("could not tag docker image: %s", err)
return "", err
}
if err := client.PushImage(
docker.PushImageOptions{
Name: fullRepo,
Tag: destTag,
},
authConfig,
); err != nil {
err = fmt.Errorf("could not push docker image: %s", err)
return "", err
}
return destImage, nil
}
// isDockerUsingKeychain checks if the user has a configuration that is not
// readable by our current docker client library.
// TODO(ys): remove if/when this issue is addressed:
// https://github.com/fsouza/go-dockerclient/issues/677
func isDockerUsingKeychain() bool {
user, err := user.Current()
if err != nil {
return false
}
contents, err := ioutil.ReadFile(path.Join(user.HomeDir, ".docker/config.json"))
if err != nil {
return false
}
var j map[string]interface{}
if err = json.Unmarshal(contents, &j); err != nil {
return false
}
auths, ok := j["auths"]
if !ok {
return false
}
authsInner, ok := auths.(map[string]interface{})
if !ok {
return false
}
index, ok := authsInner["https://index.docker.io/v1/"]
if !ok {
return false
}
indexInner, ok := index.(map[string]interface{})
if !ok || len(indexInner) > 0 {
return false
}
return j["credsStore"] == "osxkeychain"
}
Rename rebuild -> build
package cmds
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/user"
"path"
"path/filepath"
"strings"
units "github.com/docker/go-units"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
pachdclient "github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsutil"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
"github.com/pachyderm/pachyderm/src/server/pkg/uuid"
"github.com/pachyderm/pachyderm/src/server/pps/pretty"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
const (
codestart = "```sh"
codeend = "```"
termHeight = 24
)
// Cmds returns a slice containing pps commands.
func Cmds(noMetrics *bool) ([]*cobra.Command, error) {
metrics := !*noMetrics
raw := false
rawFlag := func(cmd *cobra.Command) {
cmd.Flags().BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
}
marshaller := &jsonpb.Marshaler{
Indent: " ",
OrigName: true,
}
job := &cobra.Command{
Use: "job",
Short: "Docs for jobs.",
Long: `Jobs are the basic unit of computation in Pachyderm.
Jobs run a containerized workload over a set of finished input commits.
Creating a job will also create a new repo and a commit in that repo which
contains the output of the job. Unless the job is created with another job as a
parent. If the job is created with a parent it will use the same repo as its
parent job and the commit it creates will use the parent job's commit as a
parent.
If the job fails the commit it creates will not be finished.
The increase the throughput of a job increase the Shard paremeter.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
pipelineSpec := "[Pipeline Specification](../reference/pipeline_spec.html)"
var block bool
inspectJob := &cobra.Command{
Use: "inspect-job job-id",
Short: "Return info about a job.",
Long: "Return info about a job.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
jobInfo, err := client.InspectJob(args[0], block)
if err != nil {
cmdutil.ErrorAndExit("error from InspectJob: %s", err.Error())
}
if jobInfo == nil {
cmdutil.ErrorAndExit("job %s not found.", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, jobInfo)
}
return pretty.PrintDetailedJobInfo(jobInfo)
}),
}
inspectJob.Flags().BoolVarP(&block, "block", "b", false, "block until the job has either succeeded or failed")
rawFlag(inspectJob)
var pipelineName string
var outputCommitStr string
var inputCommitStrs []string
listJob := &cobra.Command{
Use: "list-job [commits]",
Short: "Return info about jobs.",
Long: `Return info about jobs.
Examples:
` + codestart + `# return all jobs
$ pachctl list-job
# return all jobs in pipeline foo
$ pachctl list-job -p foo
# return all jobs whose input commits include foo/XXX and bar/YYY
$ pachctl list-job foo/XXX bar/YYY
# return all jobs in pipeline foo and whose input commits include bar/YYY
$ pachctl list-job -p foo bar/YYY
` + codeend,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
commits, err := cmdutil.ParseCommits(inputCommitStrs)
if err != nil {
return err
}
var outputCommit *pfs.Commit
if outputCommitStr != "" {
outputCommits, err := cmdutil.ParseCommits([]string{outputCommitStr})
if err != nil {
return err
}
if len(outputCommits) == 1 {
outputCommit = outputCommits[0]
}
}
if raw {
if err := client.ListJobF(pipelineName, commits, outputCommit, func(ji *ppsclient.JobInfo) error {
if err := marshaller.Marshal(os.Stdout, ji); err != nil {
return err
}
return nil
return nil
}); err != nil {
return err
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.JobHeader)
if err := client.ListJobF(pipelineName, commits, outputCommit, func(ji *ppsclient.JobInfo) error {
pretty.PrintJobInfo(writer, ji)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listJob.Flags().StringVarP(&pipelineName, "pipeline", "p", "", "Limit to jobs made by pipeline.")
listJob.Flags().StringVarP(&outputCommitStr, "output", "o", "", "List jobs with a specific output commit.")
listJob.Flags().StringSliceVarP(&inputCommitStrs, "input", "i", []string{}, "List jobs with a specific set of input commits.")
rawFlag(listJob)
var pipelines cmdutil.RepeatedStringArg
flushJob := &cobra.Command{
Use: "flush-job commit [commit ...]",
Short: "Wait for all jobs caused by the specified commits to finish and return them.",
Long: `Wait for all jobs caused by the specified commits to finish and return them.
Examples:
` + codestart + `# return jobs caused by foo/XXX and bar/YYY
$ pachctl flush-job foo/XXX bar/YYY
# return jobs caused by foo/XXX leading to pipelines bar and baz
$ pachctl flush-job foo/XXX -p bar -p baz
` + codeend,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer c.Close()
jobInfos, err := c.FlushJobAll(commits, pipelines)
if err != nil {
return err
}
if raw {
for _, jobInfo := range jobInfos {
if err := marshaller.Marshal(os.Stdout, jobInfo); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.JobHeader)
for _, jobInfo := range jobInfos {
pretty.PrintJobInfo(writer, jobInfo)
}
return writer.Flush()
}),
}
flushJob.Flags().VarP(&pipelines, "pipeline", "p", "Wait only for jobs leading to a specific set of pipelines")
rawFlag(flushJob)
deleteJob := &cobra.Command{
Use: "delete-job job-id",
Short: "Delete a job.",
Long: "Delete a job.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.DeleteJob(args[0]); err != nil {
cmdutil.ErrorAndExit("error from DeleteJob: %s", err.Error())
}
return nil
}),
}
stopJob := &cobra.Command{
Use: "stop-job job-id",
Short: "Stop a job.",
Long: "Stop a job. The job will be stopped immediately.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StopJob(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StopJob: %s", err.Error())
}
return nil
}),
}
restartDatum := &cobra.Command{
Use: "restart-datum job-id datum-path1,datum-path2",
Short: "Restart a datum.",
Long: "Restart a datum.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
datumFilter := strings.Split(args[1], ",")
for i := 0; i < len(datumFilter); {
if len(datumFilter[i]) == 0 {
if i+1 < len(datumFilter) {
copy(datumFilter[i:], datumFilter[i+1:])
}
datumFilter = datumFilter[:len(datumFilter)-1]
} else {
i++
}
}
return client.RestartDatum(args[0], datumFilter)
}),
}
var pageSize int64
var page int64
listDatum := &cobra.Command{
Use: "list-datum job-id",
Short: "Return the datums in a job.",
Long: "Return the datums in a job.",
Run: cmdutil.RunBoundedArgs(1, 1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if pageSize < 0 {
return fmt.Errorf("pageSize must be zero or positive")
}
if page < 0 {
return fmt.Errorf("page must be zero or positive")
}
if raw {
if err := client.ListDatumF(args[0], pageSize, page, func(di *ppsclient.DatumInfo) error {
return marshaller.Marshal(os.Stdout, di)
}); err != nil {
return err
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.DatumHeader)
if err := client.ListDatumF(args[0], pageSize, page, func(di *ppsclient.DatumInfo) error {
pretty.PrintDatumInfo(writer, di)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
rawFlag(listDatum)
listDatum.Flags().Int64Var(&pageSize, "pageSize", 0, "Specify the number of results sent back in a single page")
listDatum.Flags().Int64Var(&page, "page", 0, "Specify the page of results to send")
inspectDatum := &cobra.Command{
Use: "inspect-datum job-id datum-id",
Short: "Display detailed info about a single datum.",
Long: "Display detailed info about a single datum.",
Run: cmdutil.RunBoundedArgs(2, 2, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
datumInfo, err := client.InspectDatum(args[0], args[1])
if err != nil {
return err
}
if raw {
return marshaller.Marshal(os.Stdout, datumInfo)
}
pretty.PrintDetailedDatumInfo(os.Stdout, datumInfo)
return nil
}),
}
rawFlag(inspectDatum)
var (
jobID string
datumID string
commaInputs string // comma-separated list of input files of interest
master bool
follow bool
tail int64
)
getLogs := &cobra.Command{
Use: "get-logs [--pipeline=<pipeline>|--job=<job id>] [--datum=<datum id>]",
Short: "Return logs from a job.",
Long: `Return logs from a job.
Examples:
` + codestart + `# return logs emitted by recent jobs in the "filter" pipeline
$ pachctl get-logs --pipeline=filter
# return logs emitted by the job aedfa12aedf
$ pachctl get-logs --job=aedfa12aedf
# return logs emitted by the pipeline \"filter\" while processing /apple.txt and a file with the hash 123aef
$ pachctl get-logs --pipeline=filter --inputs=/apple.txt,123aef
` + codeend,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
// Break up comma-separated input paths, and filter out empty entries
data := strings.Split(commaInputs, ",")
for i := 0; i < len(data); {
if len(data[i]) == 0 {
if i+1 < len(data) {
copy(data[i:], data[i+1:])
}
data = data[:len(data)-1]
} else {
i++
}
}
// Issue RPC
marshaler := &jsonpb.Marshaler{}
iter := client.GetLogs(pipelineName, jobID, data, datumID, master, follow, tail)
for iter.Next() {
var messageStr string
if raw {
var err error
messageStr, err = marshaler.MarshalToString(iter.Message())
if err != nil {
fmt.Fprintf(os.Stderr, "error marshalling \"%v\": %s\n", iter.Message(), err)
}
fmt.Println(messageStr)
} else if iter.Message().User {
fmt.Println(iter.Message().Message)
} else if iter.Message().Master && master {
fmt.Println(iter.Message().Message)
} else if pipelineName == "" && jobID == "" {
fmt.Println(iter.Message().Message)
}
}
return iter.Err()
}),
}
getLogs.Flags().StringVarP(&pipelineName, "pipeline", "p", "", "Filter the log "+
"for lines from this pipeline (accepts pipeline name)")
getLogs.Flags().StringVar(&jobID, "job", "", "Filter for log lines from "+
"this job (accepts job ID)")
getLogs.Flags().StringVar(&datumID, "datum", "", "Filter for log lines for this datum (accepts datum ID)")
getLogs.Flags().StringVar(&commaInputs, "inputs", "", "Filter for log lines "+
"generated while processing these files (accepts PFS paths or file hashes)")
getLogs.Flags().BoolVar(&master, "master", false, "Return log messages from the master process (pipeline must be set).")
getLogs.Flags().BoolVar(&raw, "raw", false, "Return log messages verbatim from server.")
getLogs.Flags().BoolVarP(&follow, "follow", "f", false, "Follow logs as more are created.")
getLogs.Flags().Int64VarP(&tail, "tail", "t", 0, "Lines of recent logs to display.")
pipeline := &cobra.Command{
Use: "pipeline",
Short: "Docs for pipelines.",
Long: `Pipelines are a powerful abstraction for automating jobs.
Pipelines take a set of repos as inputs, rather than the set of commits that
jobs take. Pipelines then subscribe to commits on those repos and launches a job
to process each incoming commit.
Creating a pipeline will also create a repo of the same name.
All jobs created by a pipeline will create commits in the pipeline's repo.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var build bool
var pushImages bool
var registry string
var username string
var password string
var pipelinePath string
createPipeline := &cobra.Command{
Use: "create-pipeline -f pipeline.json",
Short: "Create a new pipeline.",
Long: fmt.Sprintf("Create a new pipeline from a %s", pipelineSpec),
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
return pipelineHelper(metrics, false, build, pushImages, registry, username, password, pipelinePath, false)
}),
}
createPipeline.Flags().StringVarP(&pipelinePath, "file", "f", "-", "The file containing the pipeline, it can be a url or local file. - reads from stdin.")
createPipeline.Flags().BoolVarP(&build, "build", "b", false, "If true, build and push local docker images into the docker registry.")
createPipeline.Flags().BoolVarP(&pushImages, "push-images", "p", false, "If true, push local docker images into the docker registry.")
createPipeline.Flags().StringVarP(®istry, "registry", "r", "docker.io", "The registry to push images to.")
createPipeline.Flags().StringVarP(&username, "username", "u", "", "The username to push images as, defaults to your docker username.")
createPipeline.Flags().StringVarP(&password, "password", "", "", "Your password for the registry being pushed to.")
var reprocess bool
updatePipeline := &cobra.Command{
Use: "update-pipeline -f pipeline.json",
Short: "Update an existing Pachyderm pipeline.",
Long: fmt.Sprintf("Update a Pachyderm pipeline with a new %s", pipelineSpec),
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
return pipelineHelper(metrics, reprocess, build, pushImages, registry, username, password, pipelinePath, true)
}),
}
updatePipeline.Flags().StringVarP(&pipelinePath, "file", "f", "-", "The file containing the pipeline, it can be a url or local file. - reads from stdin.")
updatePipeline.Flags().BoolVarP(&build, "build", "b", false, "If true, build and push local docker images into the docker registry.")
updatePipeline.Flags().BoolVarP(&pushImages, "push-images", "p", false, "If true, push local docker images into the docker registry.")
updatePipeline.Flags().StringVarP(®istry, "registry", "r", "docker.io", "The registry to push images to.")
updatePipeline.Flags().StringVarP(&username, "username", "u", "", "The username to push images as, defaults to your OS username.")
updatePipeline.Flags().StringVarP(&password, "password", "", "", "Your password for the registry being pushed to.")
updatePipeline.Flags().BoolVar(&reprocess, "reprocess", false, "If true, reprocess datums that were already processed by previous version of the pipeline.")
inspectPipeline := &cobra.Command{
Use: "inspect-pipeline pipeline-name",
Short: "Return info about a pipeline.",
Long: "Return info about a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
pipelineInfo, err := client.InspectPipeline(args[0])
if err != nil {
return err
}
if pipelineInfo == nil {
return fmt.Errorf("pipeline %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, pipelineInfo)
}
return pretty.PrintDetailedPipelineInfo(pipelineInfo)
}),
}
rawFlag(inspectPipeline)
extractPipeline := &cobra.Command{
Use: "extract-pipeline pipeline-name",
Short: "Return the manifest used to create a pipeline.",
Long: "Return the manifest used to create a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
createPipelineRequest, err := client.ExtractPipeline(args[0])
if err != nil {
return err
}
return marshaller.Marshal(os.Stdout, createPipelineRequest)
}),
}
var editor string
editPipeline := &cobra.Command{
Use: "edit-pipeline pipeline-name",
Short: "Edit the manifest for a pipeline in your text editor.",
Long: "Edit the manifest for a pipeline in your text editor.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
createPipelineRequest, err := client.ExtractPipeline(args[0])
if err != nil {
return err
}
f, err := ioutil.TempFile("", args[0])
if err != nil {
return err
}
if err := marshaller.Marshal(f, createPipelineRequest); err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if editor == "" {
editor = os.Getenv("EDITOR")
}
if editor == "" {
editor = "vim"
}
if err := cmdutil.RunIO(cmdutil.IO{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}, editor, f.Name()); err != nil {
return err
}
cfgReader, err := ppsutil.NewPipelineManifestReader(f.Name())
if err != nil {
return err
}
request, err := cfgReader.NextCreatePipelineRequest()
if err != nil {
return err
}
if proto.Equal(createPipelineRequest, request) {
fmt.Println("Pipeline unchanged, no update will be performed.")
return nil
}
request.Update = true
request.Reprocess = reprocess
if request.Input.Atom != nil {
fmt.Fprintln(os.Stderr, "the `atom` input type is deprecated as of 1.8.1, please replace `atom` with `pfs`")
}
if _, err := client.PpsAPIClient.CreatePipeline(
client.Ctx(),
request,
); err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
editPipeline.Flags().BoolVar(&reprocess, "reprocess", false, "If true, reprocess datums that were already processed by previous version of the pipeline.")
editPipeline.Flags().StringVar(&editor, "editor", "", "Editor to use for modifying the manifest.")
var spec bool
listPipeline := &cobra.Command{
Use: "list-pipeline",
Short: "Return info about all pipelines.",
Long: "Return info about all pipelines.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
pipelineInfos, err := client.ListPipeline()
if err != nil {
return err
}
if raw {
for _, pipelineInfo := range pipelineInfos {
if err := marshaller.Marshal(os.Stdout, pipelineInfo); err != nil {
return err
}
}
return nil
}
if spec {
for _, pipelineInfo := range pipelineInfos {
if err := marshaller.Marshal(os.Stdout, ppsutil.PipelineReqFromInfo(pipelineInfo)); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.PipelineHeader)
for _, pipelineInfo := range pipelineInfos {
pretty.PrintPipelineInfo(writer, pipelineInfo)
}
return writer.Flush()
}),
}
rawFlag(listPipeline)
listPipeline.Flags().BoolVarP(&spec, "spec", "s", false, "Output create-pipeline compatibility specs.")
var all bool
var force bool
deletePipeline := &cobra.Command{
Use: "delete-pipeline pipeline-name",
Short: "Delete a pipeline.",
Long: "Delete a pipeline.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if len(args) > 0 && all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
if len(args) == 0 && !all {
return fmt.Errorf("either a pipeline name or the --all flag needs to be provided")
}
if all {
_, err = client.PpsAPIClient.DeletePipeline(
client.Ctx(),
&ppsclient.DeletePipelineRequest{
All: all,
Force: force,
})
} else {
err = client.DeletePipeline(args[0], force)
}
if err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
deletePipeline.Flags().BoolVar(&all, "all", false, "delete all pipelines")
deletePipeline.Flags().BoolVarP(&force, "force", "f", false, "delete the pipeline regardless of errors; use with care")
startPipeline := &cobra.Command{
Use: "start-pipeline pipeline-name",
Short: "Restart a stopped pipeline.",
Long: "Restart a stopped pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StartPipeline(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StartPipeline: %s", err.Error())
}
return nil
}),
}
stopPipeline := &cobra.Command{
Use: "stop-pipeline pipeline-name",
Short: "Stop a running pipeline.",
Long: "Stop a running pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
if err := client.StopPipeline(args[0]); err != nil {
cmdutil.ErrorAndExit("error from StopPipeline: %s", err.Error())
}
return nil
}),
}
var memory string
garbageCollect := &cobra.Command{
Use: "garbage-collect",
Short: "Garbage collect unused data.",
Long: `Garbage collect unused data.
When a file/commit/repo is deleted, the data is not immediately removed from
the underlying storage system (e.g. S3) for performance and architectural
reasons. This is similar to how when you delete a file on your computer, the
file is not necessarily wiped from disk immediately.
To actually remove the data, you will need to manually invoke garbage
collection with "pachctl garbage-collect".
Currently "pachctl garbage-collect" can only be started when there are no
pipelines running. You also need to ensure that there's no ongoing "put-file".
Garbage collection puts the cluster into a readonly mode where no new jobs can
be created and no data can be added.
Pachyderm's garbage collection uses bloom filters to index live objects. This
means that some dead objects may erronously not be deleted during garbage
collection. The probability of this happening depends on how many objects you
have; at around 10M objects it starts to become likely with the default values.
To lower Pachyderm's error rate and make garbage-collection more comprehensive,
you can increase the amount of memory used for the bloom filters with the
--memory flag. The default value is 10MB.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return err
}
defer client.Close()
memoryBytes, err := units.RAMInBytes(memory)
if err != nil {
return err
}
return client.GarbageCollect(memoryBytes)
}),
}
garbageCollect.Flags().StringVarP(&memory, "memory", "m", "0", "The amount of memory to use during garbage collection. Default is 10MB.")
var result []*cobra.Command
result = append(result, job)
result = append(result, inspectJob)
result = append(result, listJob)
result = append(result, flushJob)
result = append(result, deleteJob)
result = append(result, stopJob)
result = append(result, restartDatum)
result = append(result, listDatum)
result = append(result, inspectDatum)
result = append(result, getLogs)
result = append(result, pipeline)
result = append(result, createPipeline)
result = append(result, updatePipeline)
result = append(result, inspectPipeline)
result = append(result, extractPipeline)
result = append(result, editPipeline)
result = append(result, listPipeline)
result = append(result, deletePipeline)
result = append(result, startPipeline)
result = append(result, stopPipeline)
result = append(result, garbageCollect)
return result, nil
}
func pipelineHelper(metrics bool, reprocess bool, build bool, pushImages bool, registry string, username string, password string, pipelinePath string, update bool) error {
cfgReader, err := ppsutil.NewPipelineManifestReader(pipelinePath)
if err != nil {
return err
}
client, err := pachdclient.NewOnUserMachine(metrics, true, "user")
if err != nil {
return fmt.Errorf("error connecting to pachd: %v", err)
}
defer client.Close()
for {
request, err := cfgReader.NextCreatePipelineRequest()
if err == io.EOF {
break
} else if err != nil {
return err
}
if request.Input.Atom != nil {
fmt.Println("WARNING: The `atom` input type has been deprecated and will be removed in a future version. Please replace `atom` with `pfs`.")
}
if update {
request.Update = true
request.Reprocess = reprocess
}
if build || pushImages {
if build && pushImages {
fmt.Fprintln(os.Stderr, "`--push-images` is redundant, as it's already enabled with `--build`")
}
dockerClient, authConfig, err := dockerConfig(registry, username, password)
if err != nil {
return err
}
repo, sourceTag := docker.ParseRepositoryTag(request.Transform.Image)
if sourceTag == "" {
sourceTag = "latest"
}
destTag := uuid.NewWithoutDashes()
if build {
dockerfile := request.Transform.Dockerfile
if dockerfile == "" {
return fmt.Errorf("`dockerfile` must be specified in order to use `--build`")
}
url, err := url.Parse(pipelinePath)
if pipelinePath == "-" || (err == nil && url.Scheme != "") {
return fmt.Errorf("`--build` can only be used when the pipeline path is local")
}
absPath, err := filepath.Abs(pipelinePath)
if err != nil {
return fmt.Errorf("could not get absolute path to the pipeline path '%s': %s", pipelinePath, err)
}
contextDir := filepath.Dir(absPath)
err = buildImage(dockerClient, registry, repo, contextDir, dockerfile, destTag)
if err != nil {
return err
}
// Now that we've built into `destTag`, change the
// `sourceTag` to be the same so that the push will work with
// the right image
sourceTag = destTag
}
image, err := pushImage(dockerClient, authConfig, registry, repo, sourceTag, destTag)
if err != nil {
return err
}
request.Transform.Image = image
}
if _, err := client.PpsAPIClient.CreatePipeline(
client.Ctx(),
request,
); err != nil {
return grpcutil.ScrubGRPC(err)
}
}
return nil
}
// ByCreationTime is an implementation of sort.Interface which
// sorts pps job info by creation time, ascending.
type ByCreationTime []*ppsclient.JobInfo
func (arr ByCreationTime) Len() int { return len(arr) }
func (arr ByCreationTime) Swap(i, j int) { arr[i], arr[j] = arr[j], arr[i] }
func (arr ByCreationTime) Less(i, j int) bool {
if arr[i].Started == nil || arr[j].Started == nil {
return false
}
if arr[i].Started.Seconds < arr[j].Started.Seconds {
return true
} else if arr[i].Started.Seconds == arr[j].Started.Seconds {
return arr[i].Started.Nanos < arr[j].Started.Nanos
}
return false
}
func dockerConfig(registry string, username string, password string) (*docker.Client, docker.AuthConfiguration, error) {
var authConfig docker.AuthConfiguration
client, err := docker.NewClientFromEnv()
if err != nil {
err = fmt.Errorf("could not create a docker client from the environment: %s", err)
return nil, authConfig, err
}
if username != "" && password != "" {
authConfig = docker.AuthConfiguration{ServerAddress: registry}
authConfig.Username = username
authConfig.Password = password
} else {
authConfigs, err := docker.NewAuthConfigurationsFromDockerCfg()
if err != nil {
if isDockerUsingKeychain() {
err = fmt.Errorf("error parsing auth: %s; it looks like you may have a docker configuration not supported by the client library that we use; as a workaround, try specifying the `--username` and `--password` flags", err.Error())
return nil, authConfig, err
}
err = fmt.Errorf("error parsing auth: %s, try running `docker login`", err.Error())
return nil, authConfig, err
}
for _, _authConfig := range authConfigs.Configs {
serverAddress := _authConfig.ServerAddress
if strings.Contains(serverAddress, registry) {
authConfig = _authConfig
break
}
}
}
return client, authConfig, nil
}
// buildImage builds a new docker image as registry/user/repo.
func buildImage(client *docker.Client, registry string, repo string, contextDir string, dockerfile string, destTag string) error {
destImage := fmt.Sprintf("%s/%s:%s", registry, repo, destTag)
fmt.Printf("Building %s, this may take a while.\n", destImage)
err := client.BuildImage(docker.BuildImageOptions{
Name: destImage,
ContextDir: contextDir,
Dockerfile: dockerfile,
OutputStream: os.Stdout,
})
if err != nil {
return fmt.Errorf("could not build docker image: %s", err)
}
return nil
}
// pushImage pushes an image as registry/user/repo.
func pushImage(client *docker.Client, authConfig docker.AuthConfiguration, registry string, repo string, sourceTag string, destTag string) (string, error) {
fullRepo := fmt.Sprintf("%s/%s", registry, repo)
sourceImage := fmt.Sprintf("%s:%s", fullRepo, sourceTag)
destImage := fmt.Sprintf("%s:%s", fullRepo, destTag)
fmt.Printf("Tagging/pushing %s, this may take a while.\n", destImage)
if err := client.TagImage(sourceImage, docker.TagImageOptions{
Repo: fullRepo,
Tag: destTag,
Context: context.Background(),
}); err != nil {
err = fmt.Errorf("could not tag docker image: %s", err)
return "", err
}
if err := client.PushImage(
docker.PushImageOptions{
Name: fullRepo,
Tag: destTag,
},
authConfig,
); err != nil {
err = fmt.Errorf("could not push docker image: %s", err)
return "", err
}
return destImage, nil
}
// isDockerUsingKeychain checks if the user has a configuration that is not
// readable by our current docker client library.
// TODO(ys): remove if/when this issue is addressed:
// https://github.com/fsouza/go-dockerclient/issues/677
func isDockerUsingKeychain() bool {
user, err := user.Current()
if err != nil {
return false
}
contents, err := ioutil.ReadFile(path.Join(user.HomeDir, ".docker/config.json"))
if err != nil {
return false
}
var j map[string]interface{}
if err = json.Unmarshal(contents, &j); err != nil {
return false
}
auths, ok := j["auths"]
if !ok {
return false
}
authsInner, ok := auths.(map[string]interface{})
if !ok {
return false
}
index, ok := authsInner["https://index.docker.io/v1/"]
if !ok {
return false
}
indexInner, ok := index.(map[string]interface{})
if !ok || len(indexInner) > 0 {
return false
}
return j["credsStore"] == "osxkeychain"
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package pbft
import (
"reflect"
)
func (instance *Plugin) correctViewChange(vc *ViewChange) bool {
for _, p := range append(vc.Pset, vc.Qset...) {
if !(p.View < vc.View && p.SequenceNumber > vc.H && p.SequenceNumber <= vc.H+instance.L) {
logger.Debug("invalid p entry in view-change: vc(v:%d h:%d) p(v:%d n:%d)",
vc.View, vc.H, p.View, p.SequenceNumber)
return false
}
}
for _, c := range vc.Cset {
// XXX the paper says c.n > vc.h
if !(c.SequenceNumber >= vc.H && c.SequenceNumber <= vc.H+instance.L) {
logger.Debug("invalid c entry in view-change: vc(v:%d h:%d) c(n:%d)",
vc.View, vc.H, c.SequenceNumber)
return false
}
}
return true
}
func (instance *Plugin) sendViewChange() error {
instance.view += 1
instance.activeView = false
// P set: requests that have prepared here
//
// "<n,d,v> has a prepared certificate, and no request
// prepared in a later view with the same number"
for idx, cert := range instance.certStore {
if cert.prePrepare == nil {
continue
}
digest := cert.prePrepare.RequestDigest
if !instance.prepared(digest, idx.v, idx.n) {
continue
}
if p, ok := instance.pset[idx.n]; ok && p.View > idx.v {
continue
}
instance.pset[idx.n] = &ViewChange_PQ{
SequenceNumber: idx.n,
Digest: digest,
View: idx.v,
}
}
// Q set: requests that have pre-prepared here (pre-prepare or
// prepare sent)
//
// "<n,d,v>: requests that pre-prepared here, and did not
// pre-prepare in a later view with the same number"
for idx, cert := range instance.certStore {
if cert.prePrepare == nil {
continue
}
digest := cert.prePrepare.RequestDigest
if !instance.prePrepared(digest, idx.v, idx.n) {
continue
}
qi := qidx{digest, idx.n}
if q, ok := instance.qset[qi]; ok && q.View > idx.v {
continue
}
instance.qset[qi] = &ViewChange_PQ{
SequenceNumber: idx.n,
Digest: digest,
View: idx.v,
}
}
// clear old messages
for idx, _ := range instance.certStore {
if idx.v < instance.view {
delete(instance.certStore, idx)
// XXX how do we clear reqStore?
}
}
for idx, _ := range instance.viewChangeStore {
if idx.v < instance.view {
delete(instance.viewChangeStore, idx)
}
}
vc := &ViewChange{
View: instance.view,
H: instance.h,
ReplicaId: instance.id,
}
for n, state := range instance.chkpts {
vc.Cset = append(vc.Cset, &ViewChange_C{
SequenceNumber: n,
Digest: state,
})
}
for _, p := range instance.pset {
vc.Pset = append(vc.Pset, p)
}
for _, q := range instance.qset {
vc.Qset = append(vc.Qset, q)
}
logger.Info("Replica %d sending view-change, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
instance.id, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
return instance.broadcast(&Message{&Message_ViewChange{vc}}, true)
}
func (instance *Plugin) recvViewChange(vc *ViewChange) error {
logger.Info("Replica %d received view-change from replica %d, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
instance.id, vc.ReplicaId, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
if !(vc.View >= instance.view && instance.correctViewChange(vc) || instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}] != nil) {
logger.Warning("View-change message incorrect")
return nil
}
instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}] = vc
if instance.getPrimary(instance.view) == instance.id {
return instance.sendNewView()
}
return instance.processNewView()
}
func (instance *Plugin) sendNewView() (err error) {
if instance.lastNewView.View == instance.view {
return
}
vset := instance.getViewChanges()
cp, ok := instance.selectInitialCheckpoint(vset)
if !ok {
return
}
msgList := instance.assignSequenceNumbers(vset, cp)
if msgList == nil {
return
}
nv := &NewView{
View: instance.view,
Vset: vset,
Xset: msgList,
ReplicaId: instance.id,
}
logger.Info("New primary %d sending new-view, v:%d, X:%+v",
instance.id, nv.View, nv.Xset)
err = instance.broadcast(&Message{&Message_NewView{nv}}, false)
if err != nil {
return err
}
instance.lastNewView = *nv
return instance.processNewView()
}
func (instance *Plugin) recvNewView(nv *NewView) error {
logger.Info("Replica %d received new-view %d",
instance.id, nv.View)
if !(nv.View > 0 && nv.View >= instance.view && instance.getPrimary(nv.View) == nv.ReplicaId && instance.lastNewView.View != nv.View) {
logger.Info("Replica %d rejecting invalid new-view from %d, v:%d",
instance.id, nv.ReplicaId, nv.View)
return nil
}
instance.lastNewView = *nv
return instance.processNewView()
}
func (instance *Plugin) processNewView() error {
nv := instance.lastNewView
if nv.View == 0 {
return nil
}
if instance.activeView {
logger.Info("Replica %d ignoring new-view from %d, v:%d: we are active in view %d",
instance.id, nv.ReplicaId, nv.View, instance.view)
return nil
}
// XXX check new-view certificate
cp, ok := instance.selectInitialCheckpoint(nv.Vset)
if !ok {
logger.Warning("could not determine initial checkpoint: %+v",
instance.viewChangeStore)
return instance.sendViewChange()
}
msgList := instance.assignSequenceNumbers(nv.Vset, cp)
if msgList == nil {
logger.Warning("could not assign sequence numbers: %+v",
instance.viewChangeStore)
return instance.sendViewChange()
}
if !reflect.DeepEqual(msgList, nv.Xset) {
logger.Warning("failed to verify new-view Xset: computed %+v, received %+v",
msgList, nv.Xset)
return instance.sendViewChange()
}
for n, d := range nv.Xset {
// XXX why should we use "h ≥ min{n | ∃d : (<n,d> ∈ X)}"?
// "h ≥ min{n | ∃d : (<n,d> ∈ X)} ∧ ∀<n,d> ∈ X : (n ≤ h ∨ ∃m ∈ in : (D(m) = d))"
if n <= instance.h {
continue
} else {
if d == "" {
// NULL request; skip
continue
}
if _, ok := instance.reqStore[d]; !ok {
logger.Warning("missing assigned, non-checkpointed request %s",
d)
// XXX fetch request?
return nil
}
}
}
logger.Info("Replica %d accepting new-view to view %d", instance.id, instance.view)
instance.activeView = true
for n, d := range nv.Xset {
preprep := &PrePrepare{
View: instance.view,
SequenceNumber: n,
RequestDigest: d,
ReplicaId: instance.id,
}
cert := instance.getCert(instance.view, n)
cert.prePrepare = preprep
if n < instance.seqNo {
instance.seqNo = n
}
}
if instance.getPrimary(instance.view) != instance.id {
for n, d := range nv.Xset {
prep := &Prepare{
View: instance.view,
SequenceNumber: n,
RequestDigest: d,
ReplicaId: instance.id,
}
cert := instance.getCert(instance.view, n)
cert.prepare = append(cert.prepare, prep)
cert.sentPrepare = true
instance.broadcast(&Message{&Message_Prepare{prep}}, true)
}
}
return nil
}
func (instance *Plugin) getViewChanges() (vset []*ViewChange) {
vset = make([]*ViewChange, 0)
for _, vc := range instance.viewChangeStore {
vset = append(vset, vc)
}
return
}
func (instance *Plugin) selectInitialCheckpoint(vset []*ViewChange) (checkpoint uint64, ok bool) {
checkpoints := make(map[ViewChange_C][]*ViewChange)
for _, vc := range vset {
for _, c := range vc.Cset {
checkpoints[*c] = append(checkpoints[*c], vc)
}
}
if len(checkpoints) == 0 {
logger.Debug("no checkpoints to select from: %d %s",
len(instance.viewChangeStore), checkpoints)
return
}
for idx, vcList := range checkpoints {
// need weak certificate for the checkpoint
if len(vcList) <= instance.f {
logger.Debug("no weak certificate for n:%d",
idx.SequenceNumber)
continue
}
quorum := 0
for _, vc := range vcList {
if vc.H <= idx.SequenceNumber {
quorum += 1
}
}
if quorum <= 2*instance.f {
logger.Debug("no quorum for n:%d",
idx.SequenceNumber)
continue
}
if checkpoint <= idx.SequenceNumber {
checkpoint = idx.SequenceNumber
ok = true
}
}
return
}
func (instance *Plugin) assignSequenceNumbers(vset []*ViewChange, h uint64) (msgList map[uint64]string) {
msgList = make(map[uint64]string)
// "for all n such that h < n <= h + L"
nLoop:
for n := h + 1; n <= h+instance.L; n++ {
// "∃m ∈ S..."
for _, m := range vset {
// "...with <n,d,v> ∈ m.P"
for _, em := range m.Pset {
quorum := 0
// "A1. ∃2f+1 messages m' ∈ S"
mpLoop:
for _, mp := range vset {
if mp.H >= n {
continue
}
// "∀<n,d',v'> ∈ m'.P"
for _, emp := range mp.Pset {
if n == emp.SequenceNumber && !(emp.View < em.View || (emp.View == em.View && emp.Digest == em.Digest)) {
continue mpLoop
}
}
quorum += 1
}
if quorum < 2*instance.f+1 {
continue
}
quorum = 0
// "A2. ∃f+1 messages m' ∈ S"
for _, mp := range vset {
// "∃<n,d',v'> ∈ m'.Q"
for _, emp := range mp.Qset {
if n == emp.SequenceNumber && emp.View >= em.View && emp.Digest == em.Digest {
quorum += 1
}
}
}
if quorum < instance.f+1 {
continue
}
// "then select the null request with digest d for number n"
msgList[n] = em.Digest
continue nLoop
}
}
quorum := 0
// "else if ∃2f+1 messages m ∈ S"
nullLoop:
for _, m := range vset {
// "m.P has no entry"
for _, em := range m.Pset {
if em.SequenceNumber == n {
continue nullLoop
}
}
quorum += 1
}
if quorum >= 2*instance.f+1 {
// "then select the null request for number n"
msgList[n] = ""
continue nLoop
}
return nil
}
return
}
Remove vset slice initialization
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package pbft
import (
"reflect"
)
func (instance *Plugin) correctViewChange(vc *ViewChange) bool {
for _, p := range append(vc.Pset, vc.Qset...) {
if !(p.View < vc.View && p.SequenceNumber > vc.H && p.SequenceNumber <= vc.H+instance.L) {
logger.Debug("invalid p entry in view-change: vc(v:%d h:%d) p(v:%d n:%d)",
vc.View, vc.H, p.View, p.SequenceNumber)
return false
}
}
for _, c := range vc.Cset {
// XXX the paper says c.n > vc.h
if !(c.SequenceNumber >= vc.H && c.SequenceNumber <= vc.H+instance.L) {
logger.Debug("invalid c entry in view-change: vc(v:%d h:%d) c(n:%d)",
vc.View, vc.H, c.SequenceNumber)
return false
}
}
return true
}
func (instance *Plugin) sendViewChange() error {
instance.view += 1
instance.activeView = false
// P set: requests that have prepared here
//
// "<n,d,v> has a prepared certificate, and no request
// prepared in a later view with the same number"
for idx, cert := range instance.certStore {
if cert.prePrepare == nil {
continue
}
digest := cert.prePrepare.RequestDigest
if !instance.prepared(digest, idx.v, idx.n) {
continue
}
if p, ok := instance.pset[idx.n]; ok && p.View > idx.v {
continue
}
instance.pset[idx.n] = &ViewChange_PQ{
SequenceNumber: idx.n,
Digest: digest,
View: idx.v,
}
}
// Q set: requests that have pre-prepared here (pre-prepare or
// prepare sent)
//
// "<n,d,v>: requests that pre-prepared here, and did not
// pre-prepare in a later view with the same number"
for idx, cert := range instance.certStore {
if cert.prePrepare == nil {
continue
}
digest := cert.prePrepare.RequestDigest
if !instance.prePrepared(digest, idx.v, idx.n) {
continue
}
qi := qidx{digest, idx.n}
if q, ok := instance.qset[qi]; ok && q.View > idx.v {
continue
}
instance.qset[qi] = &ViewChange_PQ{
SequenceNumber: idx.n,
Digest: digest,
View: idx.v,
}
}
// clear old messages
for idx, _ := range instance.certStore {
if idx.v < instance.view {
delete(instance.certStore, idx)
// XXX how do we clear reqStore?
}
}
for idx, _ := range instance.viewChangeStore {
if idx.v < instance.view {
delete(instance.viewChangeStore, idx)
}
}
vc := &ViewChange{
View: instance.view,
H: instance.h,
ReplicaId: instance.id,
}
for n, state := range instance.chkpts {
vc.Cset = append(vc.Cset, &ViewChange_C{
SequenceNumber: n,
Digest: state,
})
}
for _, p := range instance.pset {
vc.Pset = append(vc.Pset, p)
}
for _, q := range instance.qset {
vc.Qset = append(vc.Qset, q)
}
logger.Info("Replica %d sending view-change, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
instance.id, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
return instance.broadcast(&Message{&Message_ViewChange{vc}}, true)
}
func (instance *Plugin) recvViewChange(vc *ViewChange) error {
logger.Info("Replica %d received view-change from replica %d, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
instance.id, vc.ReplicaId, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
if !(vc.View >= instance.view && instance.correctViewChange(vc) || instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}] != nil) {
logger.Warning("View-change message incorrect")
return nil
}
instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}] = vc
if instance.getPrimary(instance.view) == instance.id {
return instance.sendNewView()
}
return instance.processNewView()
}
func (instance *Plugin) sendNewView() (err error) {
if instance.lastNewView.View == instance.view {
return
}
vset := instance.getViewChanges()
cp, ok := instance.selectInitialCheckpoint(vset)
if !ok {
return
}
msgList := instance.assignSequenceNumbers(vset, cp)
if msgList == nil {
return
}
nv := &NewView{
View: instance.view,
Vset: vset,
Xset: msgList,
ReplicaId: instance.id,
}
logger.Info("New primary %d sending new-view, v:%d, X:%+v",
instance.id, nv.View, nv.Xset)
err = instance.broadcast(&Message{&Message_NewView{nv}}, false)
if err != nil {
return err
}
instance.lastNewView = *nv
return instance.processNewView()
}
func (instance *Plugin) recvNewView(nv *NewView) error {
logger.Info("Replica %d received new-view %d",
instance.id, nv.View)
if !(nv.View > 0 && nv.View >= instance.view && instance.getPrimary(nv.View) == nv.ReplicaId && instance.lastNewView.View != nv.View) {
logger.Info("Replica %d rejecting invalid new-view from %d, v:%d",
instance.id, nv.ReplicaId, nv.View)
return nil
}
instance.lastNewView = *nv
return instance.processNewView()
}
func (instance *Plugin) processNewView() error {
nv := instance.lastNewView
if nv.View == 0 {
return nil
}
if instance.activeView {
logger.Info("Replica %d ignoring new-view from %d, v:%d: we are active in view %d",
instance.id, nv.ReplicaId, nv.View, instance.view)
return nil
}
// XXX check new-view certificate
cp, ok := instance.selectInitialCheckpoint(nv.Vset)
if !ok {
logger.Warning("could not determine initial checkpoint: %+v",
instance.viewChangeStore)
return instance.sendViewChange()
}
msgList := instance.assignSequenceNumbers(nv.Vset, cp)
if msgList == nil {
logger.Warning("could not assign sequence numbers: %+v",
instance.viewChangeStore)
return instance.sendViewChange()
}
if !reflect.DeepEqual(msgList, nv.Xset) {
logger.Warning("failed to verify new-view Xset: computed %+v, received %+v",
msgList, nv.Xset)
return instance.sendViewChange()
}
for n, d := range nv.Xset {
// XXX why should we use "h ≥ min{n | ∃d : (<n,d> ∈ X)}"?
// "h ≥ min{n | ∃d : (<n,d> ∈ X)} ∧ ∀<n,d> ∈ X : (n ≤ h ∨ ∃m ∈ in : (D(m) = d))"
if n <= instance.h {
continue
} else {
if d == "" {
// NULL request; skip
continue
}
if _, ok := instance.reqStore[d]; !ok {
logger.Warning("missing assigned, non-checkpointed request %s",
d)
// XXX fetch request?
return nil
}
}
}
logger.Info("Replica %d accepting new-view to view %d", instance.id, instance.view)
instance.activeView = true
for n, d := range nv.Xset {
preprep := &PrePrepare{
View: instance.view,
SequenceNumber: n,
RequestDigest: d,
ReplicaId: instance.id,
}
cert := instance.getCert(instance.view, n)
cert.prePrepare = preprep
if n < instance.seqNo {
instance.seqNo = n
}
}
if instance.getPrimary(instance.view) != instance.id {
for n, d := range nv.Xset {
prep := &Prepare{
View: instance.view,
SequenceNumber: n,
RequestDigest: d,
ReplicaId: instance.id,
}
cert := instance.getCert(instance.view, n)
cert.prepare = append(cert.prepare, prep)
cert.sentPrepare = true
instance.broadcast(&Message{&Message_Prepare{prep}}, true)
}
}
return nil
}
func (instance *Plugin) getViewChanges() (vset []*ViewChange) {
for _, vc := range instance.viewChangeStore {
vset = append(vset, vc)
}
return
}
func (instance *Plugin) selectInitialCheckpoint(vset []*ViewChange) (checkpoint uint64, ok bool) {
checkpoints := make(map[ViewChange_C][]*ViewChange)
for _, vc := range vset {
for _, c := range vc.Cset {
checkpoints[*c] = append(checkpoints[*c], vc)
}
}
if len(checkpoints) == 0 {
logger.Debug("no checkpoints to select from: %d %s",
len(instance.viewChangeStore), checkpoints)
return
}
for idx, vcList := range checkpoints {
// need weak certificate for the checkpoint
if len(vcList) <= instance.f {
logger.Debug("no weak certificate for n:%d",
idx.SequenceNumber)
continue
}
quorum := 0
for _, vc := range vcList {
if vc.H <= idx.SequenceNumber {
quorum += 1
}
}
if quorum <= 2*instance.f {
logger.Debug("no quorum for n:%d",
idx.SequenceNumber)
continue
}
if checkpoint <= idx.SequenceNumber {
checkpoint = idx.SequenceNumber
ok = true
}
}
return
}
func (instance *Plugin) assignSequenceNumbers(vset []*ViewChange, h uint64) (msgList map[uint64]string) {
msgList = make(map[uint64]string)
// "for all n such that h < n <= h + L"
nLoop:
for n := h + 1; n <= h+instance.L; n++ {
// "∃m ∈ S..."
for _, m := range vset {
// "...with <n,d,v> ∈ m.P"
for _, em := range m.Pset {
quorum := 0
// "A1. ∃2f+1 messages m' ∈ S"
mpLoop:
for _, mp := range vset {
if mp.H >= n {
continue
}
// "∀<n,d',v'> ∈ m'.P"
for _, emp := range mp.Pset {
if n == emp.SequenceNumber && !(emp.View < em.View || (emp.View == em.View && emp.Digest == em.Digest)) {
continue mpLoop
}
}
quorum += 1
}
if quorum < 2*instance.f+1 {
continue
}
quorum = 0
// "A2. ∃f+1 messages m' ∈ S"
for _, mp := range vset {
// "∃<n,d',v'> ∈ m'.Q"
for _, emp := range mp.Qset {
if n == emp.SequenceNumber && emp.View >= em.View && emp.Digest == em.Digest {
quorum += 1
}
}
}
if quorum < instance.f+1 {
continue
}
// "then select the request with digest d for number n"
msgList[n] = em.Digest
continue nLoop
}
}
quorum := 0
// "else if ∃2f+1 messages m ∈ S"
nullLoop:
for _, m := range vset {
// "m.P has no entry"
for _, em := range m.Pset {
if em.SequenceNumber == n {
continue nullLoop
}
}
quorum += 1
}
if quorum >= 2*instance.f+1 {
// "then select the null request for number n"
msgList[n] = ""
continue nLoop
}
return nil
}
return
}
|
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// Env is a system call environment used by a database.
type Env struct {
c *C.rocksdb_env_t
}
// NewDefaultEnv creates a default environment.
func NewDefaultEnv() *Env {
return NewNativeEnv(C.rocksdb_create_default_env())
}
// NewNativeEnv creates a Environment object.
func NewNativeEnv(c *C.rocksdb_env_t) *Env {
return &Env{c}
}
// SetBackgroundThreads sets the number of background worker threads
// of a specific thread pool for this environment.
// 'LOW' is the default pool.
// Default: 1
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.c, C.int(n))
}
// SetHighPriorityBackgroundThreads sets the size of the high priority
// thread pool that can be used to prevent compactions from stalling
// memtable flushes.
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n))
}
func (env *Env) LowerThreadPoolIOPriority() {
C.rocksdb_env_lower_thread_pool_io_priority(env.c)
}
func (env *Env) LowerHighPriorityThreadPoolIOPriority() {
C.rocksdb_env_lower_high_priority_thread_pool_io_priority(env.c)
}
func (env *Env) LowerThreadPoolCPUPriority() {
C.rocksdb_env_lower_thread_pool_cpu_priority(env.c)
}
func (env *Env) LowerHighPriorityThreadPoolCPUPriority() {
C.rocksdb_env_lower_high_priority_thread_pool_cpu_priority(env.c)
}
// Destroy deallocates the Env object.
func (env *Env) Destroy() {
C.rocksdb_env_destroy(env.c)
env.c = nil
}
Revert "Add bindings for lowering CPU and IO priority of thread pools"
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// Env is a system call environment used by a database.
type Env struct {
c *C.rocksdb_env_t
}
// NewDefaultEnv creates a default environment.
func NewDefaultEnv() *Env {
return NewNativeEnv(C.rocksdb_create_default_env())
}
// NewNativeEnv creates a Environment object.
func NewNativeEnv(c *C.rocksdb_env_t) *Env {
return &Env{c}
}
// SetBackgroundThreads sets the number of background worker threads
// of a specific thread pool for this environment.
// 'LOW' is the default pool.
// Default: 1
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.c, C.int(n))
}
// SetHighPriorityBackgroundThreads sets the size of the high priority
// thread pool that can be used to prevent compactions from stalling
// memtable flushes.
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n))
}
// Destroy deallocates the Env object.
func (env *Env) Destroy() {
C.rocksdb_env_destroy(env.c)
env.c = nil
}
|
// This file provides the EqC type, which represents strings that can be
// compared only for equality. Unlike Eq, EqC symbols are first canonicalized
// using a program-provided canonicalization function.
package intern
import (
"fmt"
)
// An EqC is a string that has been interned to an integer after being
// canonicalized using a program-provided transformation function. EqC
// supports only equality comparisons, not inequality comparisons. (No checks
// are performed to enforce that property, unfortunately.)
//
// It is strongly recommended that programs alias EqC once for each
// transformation function. This will help the compiler catch program errors
// if strings interned using different transformation functions are compared.
type EqC Eq
// eqc maintains all the state needed to manipulate EqCs.
var eqc state
// init initializes our global state.
func init() {
eqc.forgetAll()
}
// PreEqC provides advance notice of a string that will be interned using
// NewSymbolEqC. A provided function canonicalizes the string. Batching up a
// large number of PreEqC calls before calling NewSymbolEqC helps avoid
// running out of symbols that are properly comparable with all other symbols.
func PreEqC(s string, f func(string) string) {
eqc.Lock()
eqc.pending = append(eqc.pending, f(s))
eqc.Unlock()
}
// NewEqC maps a string to an EqC symbol. It guarantees that two strings
// that are equal after being passed through a function f will return the same
// EqC.
func NewEqC(s string, f func(string) string) EqC {
var err error
st := &eqc
st.Lock()
defer st.Unlock()
sym, err := st.assignSymbol(s, f, false)
if err != nil {
panic(fmt.Sprintf("Internal error: Unexpected error (%s)", err))
}
return EqC(sym)
}
// String converts an EqC back to a string. It panics if given an EqC that was
// not created using NewEqC.
func (s EqC) String() string {
return eqc.toString(uint64(s), "EqC")
}
Removed useless function PreEqC
// This file provides the EqC type, which represents strings that can be
// compared only for equality. Unlike Eq, EqC symbols are first canonicalized
// using a program-provided canonicalization function.
package intern
import (
"fmt"
)
// An EqC is a string that has been interned to an integer after being
// canonicalized using a program-provided transformation function. EqC
// supports only equality comparisons, not inequality comparisons. (No checks
// are performed to enforce that property, unfortunately.)
//
// It is strongly recommended that programs alias EqC once for each
// transformation function. This will help the compiler catch program errors
// if strings interned using different transformation functions are compared.
type EqC Eq
// eqc maintains all the state needed to manipulate EqCs.
var eqc state
// init initializes our global state.
func init() {
eqc.forgetAll()
}
// NewEqC maps a string to an EqC symbol. It guarantees that two strings
// that are equal after being passed through a function f will return the same
// EqC.
func NewEqC(s string, f func(string) string) EqC {
var err error
st := &eqc
st.Lock()
defer st.Unlock()
sym, err := st.assignSymbol(s, f, false)
if err != nil {
panic(fmt.Sprintf("Internal error: Unexpected error (%s)", err))
}
return EqC(sym)
}
// String converts an EqC back to a string. It panics if given an EqC that was
// not created using NewEqC.
func (s EqC) String() string {
return eqc.toString(uint64(s), "EqC")
}
|
package sdk
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"regexp"
"sort"
"time"
"github.com/fsamin/go-dump"
)
// DefaultHistoryLength is the default history length
const (
DefaultHistoryLength int64 = 20
)
// ColorRegexp represent the regexp for a format to hexadecimal color
var ColorRegexp = regexp.MustCompile(`^#\w{3,8}$`)
//Workflow represents a pipeline based workflow
type Workflow struct {
ID int64 `json:"id" db:"id" cli:"-"`
Name string `json:"name" db:"name" cli:"name,key"`
Description string `json:"description,omitempty" db:"description" cli:"description"`
Icon string `json:"icon,omitempty" db:"icon" cli:"-"`
LastModified time.Time `json:"last_modified" db:"last_modified" mapstructure:"-"`
ProjectID int64 `json:"project_id,omitempty" db:"project_id" cli:"-"`
ProjectKey string `json:"project_key" db:"-" cli:"-"`
RootID int64 `json:"root_id,omitempty" db:"root_node_id" cli:"-"`
Root *WorkflowNode `json:"root,omitempty" db:"-" cli:"-"`
Joins []WorkflowNodeJoin `json:"joins,omitempty" db:"-" cli:"-"`
Groups []GroupPermission `json:"groups,omitempty" db:"-" cli:"-"`
Permission int `json:"permission,omitempty" db:"-" cli:"-"`
Metadata Metadata `json:"metadata" yaml:"metadata" db:"-"`
Usage *Usage `json:"usage,omitempty" db:"-" cli:"-"`
HistoryLength int64 `json:"history_length" db:"history_length" cli:"-"`
PurgeTags []string `json:"purge_tags,omitempty" db:"-" cli:"-"`
Notifications []WorkflowNotification `json:"notifications,omitempty" db:"-" cli:"-"`
FromRepository string `json:"from_repository,omitempty" db:"from_repository" cli:"from"`
DerivedFromWorkflowID int64 `json:"derived_from_workflow_id,omitempty" db:"derived_from_workflow_id" cli:"-"`
DerivedFromWorkflowName string `json:"derived_from_workflow_name,omitempty" db:"derived_from_workflow_name" cli:"-"`
DerivationBranch string `json:"derivation_branch,omitempty" db:"derivation_branch" cli:"-"`
Audits []AuditWorklflow `json:"audits" db:"-"`
Pipelines map[int64]Pipeline `json:"pipelines" db:"-" cli:"-" mapstructure:"-"`
Applications map[int64]Application `json:"applications" db:"-" cli:"-" mapstructure:"-"`
Environments map[int64]Environment `json:"environments" db:"-" cli:"-" mapstructure:"-"`
ProjectPlatforms map[int64]ProjectPlatform `json:"project_platforms" db:"-" cli:"-" mapstructure:"-"`
HookModels map[int64]WorkflowHookModel `json:"hook_models" db:"-" cli:"-" mapstructure:"-"`
OutGoingHookModels map[int64]WorkflowHookModel `json:"outgoing_hook_models" db:"-" cli:"-" mapstructure:"-"`
Labels []Label `json:"labels" db:"-" cli:"labels"`
ToDelete bool `json:"to_delete" db:"to_delete" cli:"-"`
Favorite bool `json:"favorite" db:"-" cli:"favorite"`
WorkflowData *WorkflowData `json:"workflow_data" db:"-" cli:"-"`
}
// GetApplication retrieve application from workflow
func (w *Workflow) GetApplication(ID int64) Application {
return w.Applications[ID]
}
// RetroMigrate temporary method that convert new workflow structure into old workflow structure for backward compatibility
func (w *Workflow) RetroMigrate() {
root := w.WorkflowData.Node.retroMigrate()
w.Root = &root
if len(w.WorkflowData.Joins) > 0 {
w.Joins = make([]WorkflowNodeJoin, 0, len(w.WorkflowData.Joins))
for _, j := range w.WorkflowData.Joins {
w.Joins = append(w.Joins, j.retroMigrateJoin())
}
}
// Set context on old node
for _, n := range w.Nodes(true) {
node := w.GetNode(n.ID)
if node.Context == nil {
continue
}
if node.Context.ApplicationID != 0 {
app, ok := w.Applications[node.Context.ApplicationID]
if ok {
node.Context.Application = &app
}
}
if node.Context.EnvironmentID != 0 {
env, ok := w.Environments[node.Context.EnvironmentID]
if ok {
node.Context.Environment = &env
}
}
if node.Context.ProjectPlatformID != 0 {
pp, ok := w.ProjectPlatforms[node.Context.ProjectPlatformID]
if ok {
node.Context.ProjectPlatform = &pp
}
}
}
}
// Migrate old workflow struct into new workflow struct
func (w *Workflow) Migrate(withID bool) WorkflowData {
work := WorkflowData{}
if w != nil && w.Root != nil {
// Add root node
work.Node = (*w.Root).migrate(withID)
// Add Join
work.Joins = make([]Node, 0, len(w.Joins))
for _, j := range w.Joins {
work.Joins = append(work.Joins, j.migrate(withID))
}
}
return work
}
// WorkflowNotification represents notifications on a workflow
type WorkflowNotification struct {
ID int64 `json:"id,omitempty" db:"id"`
WorkflowID int64 `json:"workflow_id,omitempty" db:"workflow_id"`
SourceNodeRefs []string `json:"source_node_ref,omitempty" db:"-"`
SourceNodeIDs []int64 `json:"source_node_id,omitempty" db:"-"`
Type string `json:"type" db:"type"`
Settings UserNotificationSettings `json:"settings" db:"-"`
}
func (w *Workflow) Forks() (map[int64]WorkflowNodeFork, map[int64]string) {
forkMap := make(map[int64]WorkflowNodeFork, 0)
forkTriggerMap := make(map[int64]string, 0)
w.Root.ForksMap(&forkMap, &forkTriggerMap)
for _, j := range w.Joins {
for _, t := range j.Triggers {
(&t.WorkflowDestNode).ForksMap(&forkMap, &forkTriggerMap)
}
}
return forkMap, forkTriggerMap
}
//JoinsID returns joins ID
func (w *Workflow) JoinsID() []int64 {
res := make([]int64, len(w.Joins))
for i, j := range w.Joins {
res[i] = j.ID
}
return res
}
// ResetIDs resets all nodes and joins ids
func (w *Workflow) ResetIDs() {
if w.Root == nil {
return
}
(w.Root).ResetIDs()
for i := range w.Joins {
j := &w.Joins[i]
j.ID = 0
j.SourceNodeIDs = nil
for tid := range j.Triggers {
t := &j.Triggers[tid]
(&t.WorkflowDestNode).ResetIDs()
}
}
}
//Nodes returns nodes IDs excluding the root ID
func (w *Workflow) Nodes(withRoot bool) []WorkflowNode {
if w.Root == nil {
return nil
}
res := []WorkflowNode{}
if withRoot {
res = append(res, w.Root.Nodes()...)
} else {
for _, t := range w.Root.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
for _, f := range w.Root.Forks {
for _, t := range f.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
}
for i := range w.Root.OutgoingHooks {
for j := range w.Root.OutgoingHooks[i].Triggers {
res = append(res, w.Root.OutgoingHooks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
}
return res
}
//AddTrigger adds a trigger to the destination node from the node found by its name
func (w *Workflow) AddTrigger(name string, dest Node) {
if w.WorkflowData == nil || w.WorkflowData.Node.Name == "" {
return
}
(&w.WorkflowData.Node).AddTrigger(name, dest)
for i := range w.WorkflowData.Joins {
for j := range w.WorkflowData.Joins[i].Triggers {
(&w.WorkflowData.Joins[i].Triggers[j].ChildNode).AddTrigger(name, dest)
}
}
}
//GetNodeByRef returns the node given its ref
func (w *Workflow) GetNodeByRef(ref string) *WorkflowNode {
n := w.Root.GetNodeByRef(ref)
if n != nil {
return n
}
for ji := range w.Joins {
j := &w.Joins[ji]
for ti := range j.Triggers {
t := &j.Triggers[ti]
n2 := (&t.WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
return nil
}
func (w *Workflow) GetForkByName(name string) *WorkflowNodeFork {
n := w.Root.GetForkByName(name)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetForkByName(name)
if n != nil {
return n
}
}
}
return nil
}
//GetNodeByName returns the node given its name
func (w *Workflow) GetNodeByName(name string) *WorkflowNode {
n := w.Root.GetNodeByName(name)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetNodeByName(name)
if n != nil {
return n
}
}
}
return nil
}
//GetNode returns the node given its id
func (w *Workflow) GetNode(id int64) *WorkflowNode {
n := w.Root.GetNode(id)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetNode(id)
if n != nil {
return n
}
}
}
return nil
}
//GetJoin returns the join given its id
func (w *Workflow) GetJoin(id int64) *WorkflowNodeJoin {
for _, j := range w.Joins {
if j.ID == id {
return &j
}
}
return nil
}
//TriggersID returns triggers IDs
func (w *Workflow) TriggersID() []int64 {
res := w.Root.TriggersID()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.ID)
res = append(res, t.WorkflowDestNode.TriggersID()...)
}
}
return res
}
//References returns a slice with all node references
func (w *Workflow) References() []string {
if w.Root == nil {
return nil
}
res := w.Root.References()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.References()...)
}
}
return res
}
//InvolvedApplications returns all applications used in the workflow
func (w *Workflow) InvolvedApplications() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedApplications()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedApplications()...)
}
}
return res
}
//InvolvedPipelines returns all pipelines used in the workflow
func (w *Workflow) InvolvedPipelines() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedPipelines()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPipelines()...)
}
}
return res
}
//GetApplications returns all applications used in the workflow
func (w *Workflow) GetApplications() []Application {
if w.Root == nil {
return nil
}
res := w.Root.GetApplications()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.GetApplications()...)
}
}
withoutDuplicates := []Application{}
for _, a := range res {
var found bool
for _, d := range withoutDuplicates {
if a.Name == d.Name {
found = true
break
}
}
if !found {
withoutDuplicates = append(withoutDuplicates, a)
}
}
return withoutDuplicates
}
//GetEnvironments returns all environments used in the workflow
func (w *Workflow) GetEnvironments() []Environment {
if w.Root == nil {
return nil
}
res := w.Root.GetEnvironments()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.GetEnvironments()...)
}
}
withoutDuplicates := []Environment{}
for _, a := range res {
var found bool
for _, d := range withoutDuplicates {
if a.Name == d.Name {
found = true
break
}
}
if !found {
withoutDuplicates = append(withoutDuplicates, a)
}
}
return withoutDuplicates
}
//GetPipelines returns all pipelines used in the workflow
func (w *Workflow) GetPipelines() []Pipeline {
if w.Root == nil {
return nil
}
res := make([]Pipeline, len(w.Pipelines))
var i int
for _, p := range w.Pipelines {
res[i] = p
i++
}
return res
}
// GetRepositories returns the list of repositories from applications
func (w *Workflow) GetRepositories() []string {
apps := w.GetApplications()
repos := map[string]struct{}{}
for _, a := range apps {
if a.RepositoryFullname != "" {
repos[a.RepositoryFullname] = struct{}{}
}
}
res := make([]string, len(repos))
var i int
for repo := range repos {
res[i] = repo
i++
}
return res
}
//InvolvedEnvironments returns all environments used in the workflow
func (w *Workflow) InvolvedEnvironments() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedEnvironments()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedEnvironments()...)
}
}
return res
}
//InvolvedPlatforms returns all platforms used in the workflow
func (w *Workflow) InvolvedPlatforms() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedPlatforms()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPlatforms()...)
}
}
return res
}
//Visit all the workflow and apply the visitor func on all nodes
func (w *Workflow) Visit(visitor func(*WorkflowNode)) {
w.Root.Visit(visitor)
for i := range w.Joins {
for j := range w.Joins[i].Triggers {
n := &w.Joins[i].Triggers[j].WorkflowDestNode
n.Visit(visitor)
}
}
}
//Visit all the workflow and apply the visitor func on all nodes
func (w *Workflow) VisitNode(visitor func(*Node, *Workflow)) {
w.WorkflowData.Node.VisitNode(w, visitor)
for i := range w.WorkflowData.Joins {
for j := range w.WorkflowData.Joins[i].Triggers {
n := &w.WorkflowData.Joins[i].Triggers[j].ChildNode
n.VisitNode(w, visitor)
}
}
}
//Sort sorts the workflow
func (w *Workflow) SortNode() {
w.VisitNode(func(n *Node, w *Workflow) {
n.Sort()
})
for _, join := range w.WorkflowData.Joins {
sort.Slice(join.Triggers, func(i, j int) bool {
return join.Triggers[i].ChildNode.Name < join.Triggers[j].ChildNode.Name
})
}
}
//Sort sorts the workflow
func (w *Workflow) Sort() {
w.Visit(func(n *WorkflowNode) {
n.Sort()
})
for _, join := range w.Joins {
sort.Slice(join.Triggers, func(i, j int) bool {
return join.Triggers[i].WorkflowDestNode.Name < join.Triggers[j].WorkflowDestNode.Name
})
}
}
// AssignEmptyType fill node type field
func (w *Workflow) AssignEmptyType() {
// set node type for join
for i := range w.WorkflowData.Joins {
j := &w.WorkflowData.Joins[i]
j.Type = NodeTypeJoin
}
nodesArray := w.WorkflowData.Array()
for i := range nodesArray {
n := nodesArray[i]
if n.Type == "" {
if n.Context != nil && n.Context.PipelineID != 0 {
n.Type = NodeTypePipeline
} else if n.OutGoingHookContext != nil && n.OutGoingHookContext.HookModelID != 0 {
n.Type = NodeTypeOutGoingHook
} else {
n.Type = NodeTypeFork
}
}
}
}
// ValidateType check if nodes have a correct nodeType
func (w *Workflow) ValidateType() error {
namesInError := make([]string, 0)
for _, n := range w.WorkflowData.Array() {
switch n.Type {
case NodeTypePipeline:
if n.Context == nil || (n.Context.PipelineID == 0 && n.Context.PipelineName == "") {
namesInError = append(namesInError, n.Name)
}
case NodeTypeOutGoingHook:
if n.OutGoingHookContext == nil || (n.OutGoingHookContext.HookModelID == 0 && n.OutGoingHookContext.HookModelName == "") {
namesInError = append(namesInError, n.Name)
}
case NodeTypeJoin:
if n.JoinContext == nil || len(n.JoinContext) == 0 {
namesInError = append(namesInError, n.Name)
}
case NodeTypeFork:
if (n.Context != nil && (n.Context.PipelineID != 0 || n.Context.PipelineName != "")) ||
(n.OutGoingHookContext != nil && (n.OutGoingHookContext.HookModelID != 0 || n.OutGoingHookContext.HookModelName != "")) ||
(n.JoinContext != nil && len(n.JoinContext) > 0) {
namesInError = append(namesInError, n.Name)
}
default:
namesInError = append(namesInError, n.Name)
}
}
if len(namesInError) > 0 {
return WithStack(fmt.Errorf("wrong type for nodes %v", namesInError))
}
return nil
}
//Visit all the workflow and apply the visitor func on the current node and the children
func (n *WorkflowNode) Visit(visitor func(*WorkflowNode)) {
visitor(n)
for i := range n.Triggers {
d := &n.Triggers[i].WorkflowDestNode
d.Visit(visitor)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
d := &n.OutgoingHooks[i].Triggers[j].WorkflowDestNode
d.Visit(visitor)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
d := &n.Forks[i].Triggers[j].WorkflowDestNode
d.Visit(visitor)
}
}
}
//Sort sorts the workflow node
func (n *WorkflowNode) Sort() {
sort.Slice(n.Triggers, func(i, j int) bool {
return n.Triggers[i].WorkflowDestNode.Name < n.Triggers[j].WorkflowDestNode.Name
})
}
//WorkflowNodeJoin aims to joins multiple node into multiple triggers
type WorkflowNodeJoin struct {
ID int64 `json:"id" db:"id"`
Ref string `json:"ref" db:"-"`
WorkflowID int64 `json:"workflow_id" db:"workflow_id"`
SourceNodeIDs []int64 `json:"source_node_id,omitempty" db:"-"`
SourceNodeRefs []string `json:"source_node_ref,omitempty" db:"-"`
Triggers []WorkflowNodeJoinTrigger `json:"triggers,omitempty" db:"-"`
}
func (j WorkflowNodeJoin) migrate(withID bool) Node {
newNode := Node{
Name: j.Ref,
Ref: j.Ref,
Type: NodeTypeJoin,
JoinContext: make([]NodeJoin, 0, len(j.SourceNodeIDs)),
Triggers: make([]NodeTrigger, 0, len(j.Triggers)),
}
if newNode.Ref == "" {
newNode.Ref = RandomString(5)
}
if withID {
newNode.ID = j.ID
}
for i := range j.SourceNodeIDs {
newNode.JoinContext = append(newNode.JoinContext, NodeJoin{
ParentID: j.SourceNodeIDs[i],
ParentName: j.SourceNodeRefs[i],
})
}
for _, t := range j.Triggers {
child := t.WorkflowDestNode.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: newNode.Name,
ChildNode: child,
})
}
return newNode
}
//WorkflowNodeJoinTrigger is a trigger for joins
type WorkflowNodeJoinTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeJoinID int64 `json:"join_id" db:"workflow_node_join_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNode represents a node in w workflow tree
type WorkflowNode struct {
ID int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Ref string `json:"ref,omitempty" db:"-"`
WorkflowID int64 `json:"workflow_id" db:"workflow_id"`
PipelineID int64 `json:"pipeline_id" db:"pipeline_id"`
PipelineName string `json:"pipeline_name" db:"-"`
DeprecatedPipeline Pipeline `json:"pipeline" db:"-"`
Context *WorkflowNodeContext `json:"context" db:"-"`
TriggerSrcID int64 `json:"-" db:"-"`
TriggerJoinSrcID int64 `json:"-" db:"-"`
TriggerHookSrcID int64 `json:"-" db:"-"`
TriggerSrcForkID int64 `json:"-" db:"-"`
Hooks []WorkflowNodeHook `json:"hooks,omitempty" db:"-"`
Forks []WorkflowNodeFork `json:"forks,omitempty" db:"-"`
Triggers []WorkflowNodeTrigger `json:"triggers,omitempty" db:"-"`
OutgoingHooks []WorkflowNodeOutgoingHook `json:"outgoing_hooks,omitempty" db:"-"`
}
func (n Node) retroMigrate() WorkflowNode {
newNode := WorkflowNode{
Ref: n.Ref,
Name: n.Name,
WorkflowID: n.WorkflowID,
Context: &WorkflowNodeContext{
ProjectPlatformID: n.Context.ProjectPlatformID,
EnvironmentID: n.Context.EnvironmentID,
ApplicationID: n.Context.ApplicationID,
DefaultPipelineParameters: n.Context.DefaultPipelineParameters,
DefaultPayload: n.Context.DefaultPayload,
Mutex: n.Context.Mutex,
Conditions: n.Context.Conditions,
},
PipelineID: n.Context.PipelineID,
OutgoingHooks: nil,
Hooks: make([]WorkflowNodeHook, 0, len(n.Hooks)),
Triggers: nil,
Forks: nil,
}
for _, h := range n.Hooks {
hook := WorkflowNodeHook{
UUID: h.UUID,
Ref: h.Ref,
WorkflowHookModelID: h.HookModelID,
Config: h.Config,
}
newNode.Hooks = append(newNode.Hooks, hook)
}
for _, t := range n.Triggers {
switch t.ChildNode.Type {
case NodeTypePipeline:
trig := WorkflowNodeTrigger{
WorkflowDestNode: t.ChildNode.retroMigrate(),
}
newNode.Triggers = append(newNode.Triggers, trig)
case NodeTypeFork:
newNode.Forks = append(newNode.Forks, t.ChildNode.retroMigrateFork())
case NodeTypeOutGoingHook:
newNode.OutgoingHooks = append(newNode.OutgoingHooks, t.ChildNode.retroMigrateOutGoingHook())
}
}
return newNode
}
func (n Node) retroMigrateFork() WorkflowNodeFork {
fork := WorkflowNodeFork{
Name: n.Name,
}
if len(n.Triggers) > 0 {
fork.Triggers = make([]WorkflowNodeForkTrigger, 0, len(n.Triggers))
}
for _, t := range n.Triggers {
trig := WorkflowNodeForkTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
fork.Triggers = append(fork.Triggers, trig)
}
return fork
}
func (n Node) retroMigrateOutGoingHook() WorkflowNodeOutgoingHook {
h := WorkflowNodeOutgoingHook{
Config: n.OutGoingHookContext.Config,
WorkflowHookModelID: n.OutGoingHookContext.HookModelID,
Ref: n.Ref,
Name: n.Name,
}
if len(n.Triggers) > 0 {
h.Triggers = make([]WorkflowNodeOutgoingHookTrigger, 0, len(n.Triggers))
for _, t := range n.Triggers {
trig := WorkflowNodeOutgoingHookTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
h.Triggers = append(h.Triggers, trig)
}
}
return h
}
func (n Node) retroMigrateJoin() WorkflowNodeJoin {
j := WorkflowNodeJoin{
Ref: n.Ref,
}
j.SourceNodeRefs = make([]string, 0, len(n.JoinContext))
for _, jc := range n.JoinContext {
j.SourceNodeRefs = append(j.SourceNodeRefs, jc.ParentName)
}
if len(n.Triggers) > 0 {
j.Triggers = make([]WorkflowNodeJoinTrigger, 0, len(n.Triggers))
for _, t := range n.Triggers {
trig := WorkflowNodeJoinTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
j.Triggers = append(j.Triggers, trig)
}
}
return j
}
func (n WorkflowNode) migrate(withID bool) Node {
newNode := Node{
WorkflowID: n.WorkflowID,
Type: NodeTypePipeline,
Name: n.Name,
Ref: n.Ref,
Context: &NodeContext{
PipelineID: n.PipelineID,
ApplicationID: n.Context.ApplicationID,
EnvironmentID: n.Context.EnvironmentID,
ProjectPlatformID: n.Context.ProjectPlatformID,
Conditions: n.Context.Conditions,
DefaultPayload: n.Context.DefaultPayload,
DefaultPipelineParameters: n.Context.DefaultPipelineParameters,
Mutex: n.Context.Mutex,
},
Hooks: make([]NodeHook, 0, len(n.Hooks)),
Triggers: make([]NodeTrigger, 0, len(n.Triggers)+len(n.Forks)+len(n.OutgoingHooks)),
}
if n.Context.ApplicationID == 0 && n.Context.Application != nil {
newNode.Context.ApplicationID = n.Context.Application.ID
}
if n.Context.EnvironmentID == 0 && n.Context.Environment != nil {
newNode.Context.EnvironmentID = n.Context.Environment.ID
}
if n.Context.ProjectPlatformID == 0 && n.Context.ProjectPlatform != nil {
newNode.Context.ProjectPlatformID = n.Context.ProjectPlatform.ID
}
if withID {
newNode.ID = n.ID
}
if n.Ref == "" {
n.Ref = n.Name
}
for _, h := range n.Hooks {
nh := NodeHook{
Ref: h.Ref,
HookModelID: h.WorkflowHookModelID,
Config: h.Config,
UUID: h.UUID,
}
if withID {
nh.ID = h.ID
}
newNode.Hooks = append(newNode.Hooks, nh)
}
for _, t := range n.Triggers {
triggeredNode := t.WorkflowDestNode.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: triggeredNode,
})
}
for _, f := range n.Forks {
forkNode := f.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: forkNode,
})
}
for _, h := range n.OutgoingHooks {
ogh := h.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: ogh,
})
}
return newNode
}
func (n *WorkflowNode) ForksMap(forkMap *map[int64]WorkflowNodeFork, triggerMap *map[int64]string) {
for _, f := range n.Forks {
(*forkMap)[f.ID] = f
for _, t := range f.Triggers {
(*triggerMap)[t.ID] = f.Name
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
}
for _, t := range n.Triggers {
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
for _, o := range n.OutgoingHooks {
for _, t := range o.Triggers {
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
}
}
// IsLinkedToRepo returns boolean to know if the node is linked to an application which is also linked to a repository
func (n *WorkflowNode) IsLinkedToRepo() bool {
if n == nil {
return false
}
return n.Context != nil && n.Context.Application != nil && n.Context.Application.RepositoryFullname != ""
}
// Application return an application and a boolean (false if no application)
func (n *WorkflowNode) Application() (a Application, b bool) {
if n == nil {
return a, false
}
if n.Context == nil {
return a, false
}
if n.Context.Application == nil {
return a, false
}
return *n.Context.Application, true
}
// Environment return an environment and a boolean (false if no environment)
func (n *WorkflowNode) Environment() (e Environment, b bool) {
if n == nil {
return e, false
}
if n.Context == nil {
return e, false
}
if n.Context.Environment == nil {
return e, false
}
return *n.Context.Environment, true
}
// ProjectPlatform return an projectPlatform and a boolean (false if no projectPlatform)
func (n *WorkflowNode) ProjectPlatform() (p ProjectPlatform, b bool) {
if n == nil {
return p, false
}
if n.Context == nil {
return p, false
}
if n.Context.ProjectPlatform == nil {
return p, false
}
return *n.Context.ProjectPlatform, true
}
// EqualsTo returns true if a node has the same pipeline and context than another
func (n *WorkflowNode) EqualsTo(n1 *WorkflowNode) bool {
if n.PipelineID != n1.PipelineID {
return false
}
if n.Context == nil && n1.Context != nil {
return false
}
if n.Context != nil && n1.Context == nil {
return false
}
if n.Context.ApplicationID != n1.Context.ApplicationID {
return false
}
if n.Context.EnvironmentID != n1.Context.EnvironmentID {
return false
}
return true
}
//GetNodeByRef returns the node given its ref
func (n *WorkflowNode) GetNodeByRef(ref string) *WorkflowNode {
if n == nil {
return nil
}
if n.Ref == ref {
return n
}
for i := range n.Triggers {
t := &n.Triggers[i]
n2 := (&t.WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
return nil
}
func (n *WorkflowNode) GetForkByName(name string) *WorkflowNodeFork {
if n == nil {
return nil
}
for i := range n.Forks {
f := &n.Forks[i]
if f.Name == name {
return f
}
for j := range f.Triggers {
f2 := (&f.Triggers[j].WorkflowDestNode).GetForkByName(name)
if f2 != nil {
return f2
}
}
}
for j := range n.Triggers {
n2 := (&n.Triggers[j].WorkflowDestNode).GetForkByName(name)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetForkByName(name)
if n2 != nil {
return n2
}
}
}
return nil
}
//GetNodeByName returns the node given its name
func (n *WorkflowNode) GetNodeByName(name string) *WorkflowNode {
if n == nil {
return nil
}
if n.Name == name {
return n
}
for _, t := range n.Triggers {
n2 := t.WorkflowDestNode.GetNodeByName(name)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNodeByName(name)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNodeByName(name)
if n2 != nil {
return n2
}
}
}
return nil
}
//GetNode returns the node given its id
func (n *WorkflowNode) GetNode(id int64) *WorkflowNode {
if n == nil {
return nil
}
if n.ID == id {
return n
}
for _, t := range n.Triggers {
n1 := t.WorkflowDestNode.GetNode(id)
if n1 != nil {
return n1
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNode(id)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNode(id)
if n2 != nil {
return n2
}
}
}
return nil
}
// ResetIDs resets node id for the following node and its triggers
func (n *WorkflowNode) ResetIDs() {
n.ID = 0
for i := range n.Triggers {
t := &n.Triggers[i]
(&t.WorkflowDestNode).ResetIDs()
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
(&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).ResetIDs()
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
(&n.Forks[i].Triggers[j].WorkflowDestNode).ResetIDs()
}
}
}
//Nodes returns a slice with all node IDs
func (n *WorkflowNode) Nodes() []WorkflowNode {
res := []WorkflowNode{*n}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
return res
}
func ancestor(id int64, node *WorkflowNode, deep bool) (map[int64]bool, bool) {
res := map[int64]bool{}
if id == node.ID {
return res, true
}
for _, t := range node.Triggers {
if t.WorkflowDestNode.ID == id {
res[node.ID] = true
return res, true
}
ids, ok := ancestor(id, &t.WorkflowDestNode, deep)
if ok {
if len(ids) == 1 || deep {
for k := range ids {
res[k] = true
}
}
if deep {
res[node.ID] = true
}
return res, true
}
}
for i := range node.Forks {
for j := range node.Forks[i].Triggers {
destNode := &node.Forks[i].Triggers[j].WorkflowDestNode
if destNode.ID == id {
res[node.ID] = true
return res, true
}
ids, ok := ancestor(id, destNode, deep)
if ok {
if len(ids) == 1 || deep {
for k := range ids {
res[k] = true
}
}
if deep {
res[node.ID] = true
}
return res, true
}
}
}
return res, false
}
// Ancestors returns all node ancestors if deep equal true, and only his direct ancestors if deep equal false
func (n *WorkflowNode) Ancestors(w *Workflow, deep bool) []int64 {
if n == nil {
return nil
}
res, ok := ancestor(n.ID, w.Root, deep)
if !ok {
joinLoop:
for _, j := range w.Joins {
for _, t := range j.Triggers {
resAncestor, ok := ancestor(n.ID, &t.WorkflowDestNode, deep)
if ok {
if len(resAncestor) == 1 || deep {
for id := range resAncestor {
res[id] = true
}
}
if len(resAncestor) == 0 || deep {
for _, id := range j.SourceNodeIDs {
res[id] = true
if deep {
node := w.GetNode(id)
if node != nil {
ancerstorRes := node.Ancestors(w, deep)
for _, id := range ancerstorRes {
res[id] = true
}
}
}
}
}
break joinLoop
}
}
}
}
keys := make([]int64, len(res))
i := 0
for k := range res {
keys[i] = k
i++
}
return keys
}
//TriggersID returns a slides of triggers IDs
func (n *WorkflowNode) TriggersID() []int64 {
res := []int64{}
for _, t := range n.Triggers {
res = append(res, t.ID)
res = append(res, t.WorkflowDestNode.TriggersID()...)
}
return res
}
//References returns a slice with all node references
func (n *WorkflowNode) References() []string {
res := []string{}
if n.Ref != "" {
res = []string{n.Ref}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.References()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.References()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.References()...)
}
}
return res
}
//InvolvedApplications returns all applications used in the workflow
func (n *WorkflowNode) InvolvedApplications() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.ApplicationID == 0 && n.Context.Application != nil {
n.Context.ApplicationID = n.Context.Application.ID
}
if n.Context.ApplicationID != 0 {
res = []int64{n.Context.ApplicationID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedApplications()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedApplications()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedApplications()...)
}
}
return res
}
//InvolvedPipelines returns all pipelines used in the workflow
func (n *WorkflowNode) InvolvedPipelines() []int64 {
res := []int64{}
if n.PipelineID != 0 {
res = append(res, n.PipelineID)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPipelines()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedPipelines()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedPipelines()...)
}
}
return res
}
//GetApplications returns all applications used in the workflow
func (n *WorkflowNode) GetApplications() []Application {
res := []Application{}
if n.Context != nil && n.Context.Application != nil {
res = append(res, *n.Context.Application)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.GetApplications()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.GetApplications()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.GetApplications()...)
}
}
return res
}
//GetEnvironments returns all Environments used in the workflow
func (n *WorkflowNode) GetEnvironments() []Environment {
res := []Environment{}
if n.Context != nil && n.Context.Environment != nil {
res = append(res, *n.Context.Environment)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.GetEnvironments()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.GetEnvironments()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.GetEnvironments()...)
}
}
return res
}
//InvolvedEnvironments returns all environments used in the workflow
func (n *WorkflowNode) InvolvedEnvironments() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.EnvironmentID == 0 && n.Context.Environment != nil {
n.Context.EnvironmentID = n.Context.Environment.ID
}
if n.Context.EnvironmentID != 0 {
res = []int64{n.Context.EnvironmentID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedEnvironments()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedEnvironments()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedEnvironments()...)
}
}
return res
}
//InvolvedPlatforms returns all platforms used in the workflow
func (n *WorkflowNode) InvolvedPlatforms() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.ProjectPlatformID == 0 && n.Context.ProjectPlatform != nil {
n.Context.ProjectPlatformID = n.Context.ProjectPlatform.ID
}
if n.Context.ProjectPlatformID != 0 {
res = []int64{n.Context.ProjectPlatformID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPlatforms()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedPlatforms()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedPlatforms()...)
}
}
return res
}
// CheckApplicationDeploymentStrategies checks application deployment strategies
func (n *WorkflowNode) CheckApplicationDeploymentStrategies(proj *Project) error {
if n.Context == nil {
return nil
}
if n.Context.Application == nil {
return nil
}
var id = n.Context.ProjectPlatformID
if id == 0 && n.Context.ProjectPlatform != nil {
id = n.Context.ProjectPlatform.ID
}
if id == 0 {
return nil
}
pf := proj.GetPlatformByID(id)
if pf == nil {
return fmt.Errorf("platform unavailable")
}
for _, a := range proj.Applications {
if a.ID == n.Context.ApplicationID || (n.Context.Application != nil && n.Context.Application.ID == a.ID) {
if _, has := a.DeploymentStrategies[pf.Name]; !has {
return fmt.Errorf("platform %s unavailable", pf.Name)
}
}
}
return nil
}
//WorkflowNodeTrigger is a link between two pipelines in a workflow
type WorkflowNodeTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeID int64 `json:"workflow_node_id" db:"workflow_node_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
// WorkflowNodeForkTrigger is a link between a fork and a node
type WorkflowNodeForkTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowForkID int64 `json:"workflow_node_fork_id" db:"workflow_node_fork_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNodeOutgoingHookTrigger is a link between an outgoing hook and pipeline in a workflow
type WorkflowNodeOutgoingHookTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeOutgoingHookID int64 `json:"workflow_node_outgoing_hook_id" db:"workflow_node_outgoing_hook_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNodeConditions is either an array of WorkflowNodeCondition or a lua script
type WorkflowNodeConditions struct {
PlainConditions []WorkflowNodeCondition `json:"plain,omitempty" yaml:"check,omitempty"`
LuaScript string `json:"lua_script,omitempty" yaml:"script,omitempty"`
}
//WorkflowNodeCondition represents a condition to trigger ot not a pipeline in a workflow. Operator can be =, !=, regex
type WorkflowNodeCondition struct {
Variable string `json:"variable"`
Operator string `json:"operator"`
Value string `json:"value"`
}
//WorkflowNodeContext represents a context attached on a node
type WorkflowNodeContext struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeID int64 `json:"workflow_node_id" db:"workflow_node_id"`
ApplicationID int64 `json:"application_id" db:"application_id"`
Application *Application `json:"application,omitempty" db:"-"`
Environment *Environment `json:"environment,omitempty" db:"-"`
EnvironmentID int64 `json:"environment_id" db:"environment_id"`
ProjectPlatform *ProjectPlatform `json:"project_platform" db:"-"`
ProjectPlatformID int64 `json:"project_platform_id" db:"project_platform_id"`
DefaultPayload interface{} `json:"default_payload,omitempty" db:"-"`
DefaultPipelineParameters []Parameter `json:"default_pipeline_parameters,omitempty" db:"-"`
Conditions WorkflowNodeConditions `json:"conditions,omitempty" db:"-"`
Mutex bool `json:"mutex"`
}
// HasDefaultPayload returns true if the node has a default payload
func (c *WorkflowNodeContext) HasDefaultPayload() bool {
if c == nil {
return false
}
if c.DefaultPayload == nil {
return false
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
m, _ := dumper.ToStringMap(c.DefaultPayload)
return len(m) > 0
}
// DefaultPayloadToMap returns default payload to map
func (c *WorkflowNodeContext) DefaultPayloadToMap() (map[string]string, error) {
if c == nil {
return nil, fmt.Errorf("Workflow node context is nil")
}
if c.DefaultPayload == nil {
return map[string]string{}, nil
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
return dumper.ToStringMap(c.DefaultPayload)
}
//WorkflowNodeContextDefaultPayloadVCS represents a default payload when a workflow is attached to a repository Webhook
type WorkflowNodeContextDefaultPayloadVCS struct {
GitBranch string `json:"git.branch" db:"-"`
GitTag string `json:"git.tag" db:"-"`
GitHash string `json:"git.hash" db:"-"`
GitAuthor string `json:"git.author" db:"-"`
GitHashBefore string `json:"git.hash.before" db:"-"`
GitRepository string `json:"git.repository" db:"-"`
GitMessage string `json:"git.message" db:"-"`
}
// IsWorkflowNodeContextDefaultPayloadVCS checks with several way if the workflow node context has a default vcs payloas
func IsWorkflowNodeContextDefaultPayloadVCS(i interface{}) bool {
_, ok := i.(WorkflowNodeContextDefaultPayloadVCS)
if ok {
return true
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
mI, _ := dumper.ToMap(i)
mD, _ := dumper.ToMap(WorkflowNodeContextDefaultPayloadVCS{})
// compare interface keys with default payload keys
hasKey := func(s string) bool {
_, has := mI[s]
return has
}
if len(mI) == len(mD) {
for k := range mD {
if !hasKey(k) {
goto checkGitKey
}
}
return true
}
checkGitKey:
return hasKey("git.branch") &&
hasKey("git.hash") &&
hasKey("git.author") &&
hasKey("git.hash.before") &&
hasKey("git.repository") &&
hasKey("git.message")
}
//WorkflowList return the list of the workflows for a project
func WorkflowList(projectkey string) ([]Workflow, error) {
path := fmt.Sprintf("/project/%s/workflows", projectkey)
body, _, err := Request("GET", path, nil)
if err != nil {
return nil, err
}
var ws = []Workflow{}
if err := json.Unmarshal(body, &ws); err != nil {
return nil, err
}
return ws, nil
}
//WorkflowGet returns a workflow given its name
func WorkflowGet(projectkey, name string) (*Workflow, error) {
path := fmt.Sprintf("/project/%s/workflows/%s", projectkey, name)
body, _, err := Request("GET", path, nil)
if err != nil {
return nil, err
}
var w = Workflow{}
if err := json.Unmarshal(body, &w); err != nil {
return nil, err
}
return &w, nil
}
// WorkflowDelete Call API to delete a workflow
func WorkflowDelete(projectkey, name string) error {
path := fmt.Sprintf("/project/%s/workflows/%s", projectkey, name)
_, _, err := Request("DELETE", path, nil)
return err
}
// WorkflowNodeJobRunCount return nb workflow run job since 'since'
type WorkflowNodeJobRunCount struct {
Count int64 `json:"version"`
Since time.Time `json:"since"`
Until time.Time `json:"until"`
}
// Label represent a label linked to a workflow
type Label struct {
ID int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Color string `json:"color" db:"color"`
ProjectID int64 `json:"project_id" db:"project_id"`
WorkflowID int64 `json:"workflow_id,omitempty" db:"-"`
}
//Validate return error or update label if it is not valid
func (label *Label) Validate() error {
if label.Name == "" {
return WrapError(fmt.Errorf("Label must have a name"), "IsValid>")
}
if label.Color == "" {
bytes := make([]byte, 4)
if _, err := rand.Read(bytes); err != nil {
return WrapError(err, "IsValid> Cannot create random color")
}
label.Color = "#" + hex.EncodeToString(bytes)
} else {
if !ColorRegexp.Match([]byte(label.Color)) {
return ErrIconBadFormat
}
}
return nil
}
fix(api): use name to find node instead of ID (#3602)
package sdk
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"regexp"
"sort"
"time"
"github.com/fsamin/go-dump"
)
// DefaultHistoryLength is the default history length
const (
DefaultHistoryLength int64 = 20
)
// ColorRegexp represent the regexp for a format to hexadecimal color
var ColorRegexp = regexp.MustCompile(`^#\w{3,8}$`)
//Workflow represents a pipeline based workflow
type Workflow struct {
ID int64 `json:"id" db:"id" cli:"-"`
Name string `json:"name" db:"name" cli:"name,key"`
Description string `json:"description,omitempty" db:"description" cli:"description"`
Icon string `json:"icon,omitempty" db:"icon" cli:"-"`
LastModified time.Time `json:"last_modified" db:"last_modified" mapstructure:"-"`
ProjectID int64 `json:"project_id,omitempty" db:"project_id" cli:"-"`
ProjectKey string `json:"project_key" db:"-" cli:"-"`
RootID int64 `json:"root_id,omitempty" db:"root_node_id" cli:"-"`
Root *WorkflowNode `json:"root,omitempty" db:"-" cli:"-"`
Joins []WorkflowNodeJoin `json:"joins,omitempty" db:"-" cli:"-"`
Groups []GroupPermission `json:"groups,omitempty" db:"-" cli:"-"`
Permission int `json:"permission,omitempty" db:"-" cli:"-"`
Metadata Metadata `json:"metadata" yaml:"metadata" db:"-"`
Usage *Usage `json:"usage,omitempty" db:"-" cli:"-"`
HistoryLength int64 `json:"history_length" db:"history_length" cli:"-"`
PurgeTags []string `json:"purge_tags,omitempty" db:"-" cli:"-"`
Notifications []WorkflowNotification `json:"notifications,omitempty" db:"-" cli:"-"`
FromRepository string `json:"from_repository,omitempty" db:"from_repository" cli:"from"`
DerivedFromWorkflowID int64 `json:"derived_from_workflow_id,omitempty" db:"derived_from_workflow_id" cli:"-"`
DerivedFromWorkflowName string `json:"derived_from_workflow_name,omitempty" db:"derived_from_workflow_name" cli:"-"`
DerivationBranch string `json:"derivation_branch,omitempty" db:"derivation_branch" cli:"-"`
Audits []AuditWorklflow `json:"audits" db:"-"`
Pipelines map[int64]Pipeline `json:"pipelines" db:"-" cli:"-" mapstructure:"-"`
Applications map[int64]Application `json:"applications" db:"-" cli:"-" mapstructure:"-"`
Environments map[int64]Environment `json:"environments" db:"-" cli:"-" mapstructure:"-"`
ProjectPlatforms map[int64]ProjectPlatform `json:"project_platforms" db:"-" cli:"-" mapstructure:"-"`
HookModels map[int64]WorkflowHookModel `json:"hook_models" db:"-" cli:"-" mapstructure:"-"`
OutGoingHookModels map[int64]WorkflowHookModel `json:"outgoing_hook_models" db:"-" cli:"-" mapstructure:"-"`
Labels []Label `json:"labels" db:"-" cli:"labels"`
ToDelete bool `json:"to_delete" db:"to_delete" cli:"-"`
Favorite bool `json:"favorite" db:"-" cli:"favorite"`
WorkflowData *WorkflowData `json:"workflow_data" db:"-" cli:"-"`
}
// GetApplication retrieve application from workflow
func (w *Workflow) GetApplication(ID int64) Application {
return w.Applications[ID]
}
// RetroMigrate temporary method that convert new workflow structure into old workflow structure for backward compatibility
func (w *Workflow) RetroMigrate() {
root := w.WorkflowData.Node.retroMigrate()
w.Root = &root
if len(w.WorkflowData.Joins) > 0 {
w.Joins = make([]WorkflowNodeJoin, 0, len(w.WorkflowData.Joins))
for _, j := range w.WorkflowData.Joins {
w.Joins = append(w.Joins, j.retroMigrateJoin())
}
}
// Set context on old node
for _, n := range w.Nodes(true) {
node := w.GetNodeByName(n.Name)
if node.Context == nil {
continue
}
if node.Context.ApplicationID != 0 {
app, ok := w.Applications[node.Context.ApplicationID]
if ok {
node.Context.Application = &app
}
}
if node.Context.EnvironmentID != 0 {
env, ok := w.Environments[node.Context.EnvironmentID]
if ok {
node.Context.Environment = &env
}
}
if node.Context.ProjectPlatformID != 0 {
pp, ok := w.ProjectPlatforms[node.Context.ProjectPlatformID]
if ok {
node.Context.ProjectPlatform = &pp
}
}
}
}
// Migrate old workflow struct into new workflow struct
func (w *Workflow) Migrate(withID bool) WorkflowData {
work := WorkflowData{}
if w != nil && w.Root != nil {
// Add root node
work.Node = (*w.Root).migrate(withID)
// Add Join
work.Joins = make([]Node, 0, len(w.Joins))
for _, j := range w.Joins {
work.Joins = append(work.Joins, j.migrate(withID))
}
}
return work
}
// WorkflowNotification represents notifications on a workflow
type WorkflowNotification struct {
ID int64 `json:"id,omitempty" db:"id"`
WorkflowID int64 `json:"workflow_id,omitempty" db:"workflow_id"`
SourceNodeRefs []string `json:"source_node_ref,omitempty" db:"-"`
SourceNodeIDs []int64 `json:"source_node_id,omitempty" db:"-"`
Type string `json:"type" db:"type"`
Settings UserNotificationSettings `json:"settings" db:"-"`
}
func (w *Workflow) Forks() (map[int64]WorkflowNodeFork, map[int64]string) {
forkMap := make(map[int64]WorkflowNodeFork, 0)
forkTriggerMap := make(map[int64]string, 0)
w.Root.ForksMap(&forkMap, &forkTriggerMap)
for _, j := range w.Joins {
for _, t := range j.Triggers {
(&t.WorkflowDestNode).ForksMap(&forkMap, &forkTriggerMap)
}
}
return forkMap, forkTriggerMap
}
//JoinsID returns joins ID
func (w *Workflow) JoinsID() []int64 {
res := make([]int64, len(w.Joins))
for i, j := range w.Joins {
res[i] = j.ID
}
return res
}
// ResetIDs resets all nodes and joins ids
func (w *Workflow) ResetIDs() {
if w.Root == nil {
return
}
(w.Root).ResetIDs()
for i := range w.Joins {
j := &w.Joins[i]
j.ID = 0
j.SourceNodeIDs = nil
for tid := range j.Triggers {
t := &j.Triggers[tid]
(&t.WorkflowDestNode).ResetIDs()
}
}
}
//Nodes returns nodes IDs excluding the root ID
func (w *Workflow) Nodes(withRoot bool) []WorkflowNode {
if w.Root == nil {
return nil
}
res := []WorkflowNode{}
if withRoot {
res = append(res, w.Root.Nodes()...)
} else {
for _, t := range w.Root.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
for _, f := range w.Root.Forks {
for _, t := range f.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
}
for i := range w.Root.OutgoingHooks {
for j := range w.Root.OutgoingHooks[i].Triggers {
res = append(res, w.Root.OutgoingHooks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
}
return res
}
//AddTrigger adds a trigger to the destination node from the node found by its name
func (w *Workflow) AddTrigger(name string, dest Node) {
if w.WorkflowData == nil || w.WorkflowData.Node.Name == "" {
return
}
(&w.WorkflowData.Node).AddTrigger(name, dest)
for i := range w.WorkflowData.Joins {
for j := range w.WorkflowData.Joins[i].Triggers {
(&w.WorkflowData.Joins[i].Triggers[j].ChildNode).AddTrigger(name, dest)
}
}
}
//GetNodeByRef returns the node given its ref
func (w *Workflow) GetNodeByRef(ref string) *WorkflowNode {
n := w.Root.GetNodeByRef(ref)
if n != nil {
return n
}
for ji := range w.Joins {
j := &w.Joins[ji]
for ti := range j.Triggers {
t := &j.Triggers[ti]
n2 := (&t.WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
return nil
}
func (w *Workflow) GetForkByName(name string) *WorkflowNodeFork {
n := w.Root.GetForkByName(name)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetForkByName(name)
if n != nil {
return n
}
}
}
return nil
}
//GetNodeByName returns the node given its name
func (w *Workflow) GetNodeByName(name string) *WorkflowNode {
n := w.Root.GetNodeByName(name)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetNodeByName(name)
if n != nil {
return n
}
}
}
return nil
}
//GetNode returns the node given its id
func (w *Workflow) GetNode(id int64) *WorkflowNode {
n := w.Root.GetNode(id)
if n != nil {
return n
}
for _, j := range w.Joins {
for _, t := range j.Triggers {
n = t.WorkflowDestNode.GetNode(id)
if n != nil {
return n
}
}
}
return nil
}
//GetJoin returns the join given its id
func (w *Workflow) GetJoin(id int64) *WorkflowNodeJoin {
for _, j := range w.Joins {
if j.ID == id {
return &j
}
}
return nil
}
//TriggersID returns triggers IDs
func (w *Workflow) TriggersID() []int64 {
res := w.Root.TriggersID()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.ID)
res = append(res, t.WorkflowDestNode.TriggersID()...)
}
}
return res
}
//References returns a slice with all node references
func (w *Workflow) References() []string {
if w.Root == nil {
return nil
}
res := w.Root.References()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.References()...)
}
}
return res
}
//InvolvedApplications returns all applications used in the workflow
func (w *Workflow) InvolvedApplications() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedApplications()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedApplications()...)
}
}
return res
}
//InvolvedPipelines returns all pipelines used in the workflow
func (w *Workflow) InvolvedPipelines() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedPipelines()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPipelines()...)
}
}
return res
}
//GetApplications returns all applications used in the workflow
func (w *Workflow) GetApplications() []Application {
if w.Root == nil {
return nil
}
res := w.Root.GetApplications()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.GetApplications()...)
}
}
withoutDuplicates := []Application{}
for _, a := range res {
var found bool
for _, d := range withoutDuplicates {
if a.Name == d.Name {
found = true
break
}
}
if !found {
withoutDuplicates = append(withoutDuplicates, a)
}
}
return withoutDuplicates
}
//GetEnvironments returns all environments used in the workflow
func (w *Workflow) GetEnvironments() []Environment {
if w.Root == nil {
return nil
}
res := w.Root.GetEnvironments()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.GetEnvironments()...)
}
}
withoutDuplicates := []Environment{}
for _, a := range res {
var found bool
for _, d := range withoutDuplicates {
if a.Name == d.Name {
found = true
break
}
}
if !found {
withoutDuplicates = append(withoutDuplicates, a)
}
}
return withoutDuplicates
}
//GetPipelines returns all pipelines used in the workflow
func (w *Workflow) GetPipelines() []Pipeline {
if w.Root == nil {
return nil
}
res := make([]Pipeline, len(w.Pipelines))
var i int
for _, p := range w.Pipelines {
res[i] = p
i++
}
return res
}
// GetRepositories returns the list of repositories from applications
func (w *Workflow) GetRepositories() []string {
apps := w.GetApplications()
repos := map[string]struct{}{}
for _, a := range apps {
if a.RepositoryFullname != "" {
repos[a.RepositoryFullname] = struct{}{}
}
}
res := make([]string, len(repos))
var i int
for repo := range repos {
res[i] = repo
i++
}
return res
}
//InvolvedEnvironments returns all environments used in the workflow
func (w *Workflow) InvolvedEnvironments() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedEnvironments()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedEnvironments()...)
}
}
return res
}
//InvolvedPlatforms returns all platforms used in the workflow
func (w *Workflow) InvolvedPlatforms() []int64 {
if w.Root == nil {
return nil
}
res := w.Root.InvolvedPlatforms()
for _, j := range w.Joins {
for _, t := range j.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPlatforms()...)
}
}
return res
}
//Visit all the workflow and apply the visitor func on all nodes
func (w *Workflow) Visit(visitor func(*WorkflowNode)) {
w.Root.Visit(visitor)
for i := range w.Joins {
for j := range w.Joins[i].Triggers {
n := &w.Joins[i].Triggers[j].WorkflowDestNode
n.Visit(visitor)
}
}
}
//Visit all the workflow and apply the visitor func on all nodes
func (w *Workflow) VisitNode(visitor func(*Node, *Workflow)) {
w.WorkflowData.Node.VisitNode(w, visitor)
for i := range w.WorkflowData.Joins {
for j := range w.WorkflowData.Joins[i].Triggers {
n := &w.WorkflowData.Joins[i].Triggers[j].ChildNode
n.VisitNode(w, visitor)
}
}
}
//Sort sorts the workflow
func (w *Workflow) SortNode() {
w.VisitNode(func(n *Node, w *Workflow) {
n.Sort()
})
for _, join := range w.WorkflowData.Joins {
sort.Slice(join.Triggers, func(i, j int) bool {
return join.Triggers[i].ChildNode.Name < join.Triggers[j].ChildNode.Name
})
}
}
//Sort sorts the workflow
func (w *Workflow) Sort() {
w.Visit(func(n *WorkflowNode) {
n.Sort()
})
for _, join := range w.Joins {
sort.Slice(join.Triggers, func(i, j int) bool {
return join.Triggers[i].WorkflowDestNode.Name < join.Triggers[j].WorkflowDestNode.Name
})
}
}
// AssignEmptyType fill node type field
func (w *Workflow) AssignEmptyType() {
// set node type for join
for i := range w.WorkflowData.Joins {
j := &w.WorkflowData.Joins[i]
j.Type = NodeTypeJoin
}
nodesArray := w.WorkflowData.Array()
for i := range nodesArray {
n := nodesArray[i]
if n.Type == "" {
if n.Context != nil && n.Context.PipelineID != 0 {
n.Type = NodeTypePipeline
} else if n.OutGoingHookContext != nil && n.OutGoingHookContext.HookModelID != 0 {
n.Type = NodeTypeOutGoingHook
} else {
n.Type = NodeTypeFork
}
}
}
}
// ValidateType check if nodes have a correct nodeType
func (w *Workflow) ValidateType() error {
namesInError := make([]string, 0)
for _, n := range w.WorkflowData.Array() {
switch n.Type {
case NodeTypePipeline:
if n.Context == nil || (n.Context.PipelineID == 0 && n.Context.PipelineName == "") {
namesInError = append(namesInError, n.Name)
}
case NodeTypeOutGoingHook:
if n.OutGoingHookContext == nil || (n.OutGoingHookContext.HookModelID == 0 && n.OutGoingHookContext.HookModelName == "") {
namesInError = append(namesInError, n.Name)
}
case NodeTypeJoin:
if n.JoinContext == nil || len(n.JoinContext) == 0 {
namesInError = append(namesInError, n.Name)
}
case NodeTypeFork:
if (n.Context != nil && (n.Context.PipelineID != 0 || n.Context.PipelineName != "")) ||
(n.OutGoingHookContext != nil && (n.OutGoingHookContext.HookModelID != 0 || n.OutGoingHookContext.HookModelName != "")) ||
(n.JoinContext != nil && len(n.JoinContext) > 0) {
namesInError = append(namesInError, n.Name)
}
default:
namesInError = append(namesInError, n.Name)
}
}
if len(namesInError) > 0 {
return WithStack(fmt.Errorf("wrong type for nodes %v", namesInError))
}
return nil
}
//Visit all the workflow and apply the visitor func on the current node and the children
func (n *WorkflowNode) Visit(visitor func(*WorkflowNode)) {
visitor(n)
for i := range n.Triggers {
d := &n.Triggers[i].WorkflowDestNode
d.Visit(visitor)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
d := &n.OutgoingHooks[i].Triggers[j].WorkflowDestNode
d.Visit(visitor)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
d := &n.Forks[i].Triggers[j].WorkflowDestNode
d.Visit(visitor)
}
}
}
//Sort sorts the workflow node
func (n *WorkflowNode) Sort() {
sort.Slice(n.Triggers, func(i, j int) bool {
return n.Triggers[i].WorkflowDestNode.Name < n.Triggers[j].WorkflowDestNode.Name
})
}
//WorkflowNodeJoin aims to joins multiple node into multiple triggers
type WorkflowNodeJoin struct {
ID int64 `json:"id" db:"id"`
Ref string `json:"ref" db:"-"`
WorkflowID int64 `json:"workflow_id" db:"workflow_id"`
SourceNodeIDs []int64 `json:"source_node_id,omitempty" db:"-"`
SourceNodeRefs []string `json:"source_node_ref,omitempty" db:"-"`
Triggers []WorkflowNodeJoinTrigger `json:"triggers,omitempty" db:"-"`
}
func (j WorkflowNodeJoin) migrate(withID bool) Node {
newNode := Node{
Name: j.Ref,
Ref: j.Ref,
Type: NodeTypeJoin,
JoinContext: make([]NodeJoin, 0, len(j.SourceNodeIDs)),
Triggers: make([]NodeTrigger, 0, len(j.Triggers)),
}
if newNode.Ref == "" {
newNode.Ref = RandomString(5)
}
if withID {
newNode.ID = j.ID
}
for i := range j.SourceNodeIDs {
newNode.JoinContext = append(newNode.JoinContext, NodeJoin{
ParentID: j.SourceNodeIDs[i],
ParentName: j.SourceNodeRefs[i],
})
}
for _, t := range j.Triggers {
child := t.WorkflowDestNode.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: newNode.Name,
ChildNode: child,
})
}
return newNode
}
//WorkflowNodeJoinTrigger is a trigger for joins
type WorkflowNodeJoinTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeJoinID int64 `json:"join_id" db:"workflow_node_join_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNode represents a node in w workflow tree
type WorkflowNode struct {
ID int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Ref string `json:"ref,omitempty" db:"-"`
WorkflowID int64 `json:"workflow_id" db:"workflow_id"`
PipelineID int64 `json:"pipeline_id" db:"pipeline_id"`
PipelineName string `json:"pipeline_name" db:"-"`
DeprecatedPipeline Pipeline `json:"pipeline" db:"-"`
Context *WorkflowNodeContext `json:"context" db:"-"`
TriggerSrcID int64 `json:"-" db:"-"`
TriggerJoinSrcID int64 `json:"-" db:"-"`
TriggerHookSrcID int64 `json:"-" db:"-"`
TriggerSrcForkID int64 `json:"-" db:"-"`
Hooks []WorkflowNodeHook `json:"hooks,omitempty" db:"-"`
Forks []WorkflowNodeFork `json:"forks,omitempty" db:"-"`
Triggers []WorkflowNodeTrigger `json:"triggers,omitempty" db:"-"`
OutgoingHooks []WorkflowNodeOutgoingHook `json:"outgoing_hooks,omitempty" db:"-"`
}
func (n Node) retroMigrate() WorkflowNode {
newNode := WorkflowNode{
Ref: n.Ref,
Name: n.Name,
WorkflowID: n.WorkflowID,
Context: &WorkflowNodeContext{
ProjectPlatformID: n.Context.ProjectPlatformID,
EnvironmentID: n.Context.EnvironmentID,
ApplicationID: n.Context.ApplicationID,
DefaultPipelineParameters: n.Context.DefaultPipelineParameters,
DefaultPayload: n.Context.DefaultPayload,
Mutex: n.Context.Mutex,
Conditions: n.Context.Conditions,
},
PipelineID: n.Context.PipelineID,
OutgoingHooks: nil,
Hooks: make([]WorkflowNodeHook, 0, len(n.Hooks)),
Triggers: nil,
Forks: nil,
}
for _, h := range n.Hooks {
hook := WorkflowNodeHook{
UUID: h.UUID,
Ref: h.Ref,
WorkflowHookModelID: h.HookModelID,
Config: h.Config,
}
newNode.Hooks = append(newNode.Hooks, hook)
}
for _, t := range n.Triggers {
switch t.ChildNode.Type {
case NodeTypePipeline:
trig := WorkflowNodeTrigger{
WorkflowDestNode: t.ChildNode.retroMigrate(),
}
newNode.Triggers = append(newNode.Triggers, trig)
case NodeTypeFork:
newNode.Forks = append(newNode.Forks, t.ChildNode.retroMigrateFork())
case NodeTypeOutGoingHook:
newNode.OutgoingHooks = append(newNode.OutgoingHooks, t.ChildNode.retroMigrateOutGoingHook())
}
}
return newNode
}
func (n Node) retroMigrateFork() WorkflowNodeFork {
fork := WorkflowNodeFork{
Name: n.Name,
}
if len(n.Triggers) > 0 {
fork.Triggers = make([]WorkflowNodeForkTrigger, 0, len(n.Triggers))
}
for _, t := range n.Triggers {
trig := WorkflowNodeForkTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
fork.Triggers = append(fork.Triggers, trig)
}
return fork
}
func (n Node) retroMigrateOutGoingHook() WorkflowNodeOutgoingHook {
h := WorkflowNodeOutgoingHook{
Config: n.OutGoingHookContext.Config,
WorkflowHookModelID: n.OutGoingHookContext.HookModelID,
Ref: n.Ref,
Name: n.Name,
}
if len(n.Triggers) > 0 {
h.Triggers = make([]WorkflowNodeOutgoingHookTrigger, 0, len(n.Triggers))
for _, t := range n.Triggers {
trig := WorkflowNodeOutgoingHookTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
h.Triggers = append(h.Triggers, trig)
}
}
return h
}
func (n Node) retroMigrateJoin() WorkflowNodeJoin {
j := WorkflowNodeJoin{
Ref: n.Ref,
}
j.SourceNodeRefs = make([]string, 0, len(n.JoinContext))
for _, jc := range n.JoinContext {
j.SourceNodeRefs = append(j.SourceNodeRefs, jc.ParentName)
}
if len(n.Triggers) > 0 {
j.Triggers = make([]WorkflowNodeJoinTrigger, 0, len(n.Triggers))
for _, t := range n.Triggers {
trig := WorkflowNodeJoinTrigger{}
switch t.ChildNode.Type {
case NodeTypePipeline:
trig.WorkflowDestNode = t.ChildNode.retroMigrate()
default:
continue
}
j.Triggers = append(j.Triggers, trig)
}
}
return j
}
func (n WorkflowNode) migrate(withID bool) Node {
newNode := Node{
WorkflowID: n.WorkflowID,
Type: NodeTypePipeline,
Name: n.Name,
Ref: n.Ref,
Context: &NodeContext{
PipelineID: n.PipelineID,
ApplicationID: n.Context.ApplicationID,
EnvironmentID: n.Context.EnvironmentID,
ProjectPlatformID: n.Context.ProjectPlatformID,
Conditions: n.Context.Conditions,
DefaultPayload: n.Context.DefaultPayload,
DefaultPipelineParameters: n.Context.DefaultPipelineParameters,
Mutex: n.Context.Mutex,
},
Hooks: make([]NodeHook, 0, len(n.Hooks)),
Triggers: make([]NodeTrigger, 0, len(n.Triggers)+len(n.Forks)+len(n.OutgoingHooks)),
}
if n.Context.ApplicationID == 0 && n.Context.Application != nil {
newNode.Context.ApplicationID = n.Context.Application.ID
}
if n.Context.EnvironmentID == 0 && n.Context.Environment != nil {
newNode.Context.EnvironmentID = n.Context.Environment.ID
}
if n.Context.ProjectPlatformID == 0 && n.Context.ProjectPlatform != nil {
newNode.Context.ProjectPlatformID = n.Context.ProjectPlatform.ID
}
if withID {
newNode.ID = n.ID
}
if n.Ref == "" {
n.Ref = n.Name
}
for _, h := range n.Hooks {
nh := NodeHook{
Ref: h.Ref,
HookModelID: h.WorkflowHookModelID,
Config: h.Config,
UUID: h.UUID,
}
if withID {
nh.ID = h.ID
}
newNode.Hooks = append(newNode.Hooks, nh)
}
for _, t := range n.Triggers {
triggeredNode := t.WorkflowDestNode.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: triggeredNode,
})
}
for _, f := range n.Forks {
forkNode := f.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: forkNode,
})
}
for _, h := range n.OutgoingHooks {
ogh := h.migrate(withID)
newNode.Triggers = append(newNode.Triggers, NodeTrigger{
ParentNodeName: n.Name,
ChildNode: ogh,
})
}
return newNode
}
func (n *WorkflowNode) ForksMap(forkMap *map[int64]WorkflowNodeFork, triggerMap *map[int64]string) {
for _, f := range n.Forks {
(*forkMap)[f.ID] = f
for _, t := range f.Triggers {
(*triggerMap)[t.ID] = f.Name
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
}
for _, t := range n.Triggers {
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
for _, o := range n.OutgoingHooks {
for _, t := range o.Triggers {
(&t.WorkflowDestNode).ForksMap(forkMap, triggerMap)
}
}
}
// IsLinkedToRepo returns boolean to know if the node is linked to an application which is also linked to a repository
func (n *WorkflowNode) IsLinkedToRepo() bool {
if n == nil {
return false
}
return n.Context != nil && n.Context.Application != nil && n.Context.Application.RepositoryFullname != ""
}
// Application return an application and a boolean (false if no application)
func (n *WorkflowNode) Application() (a Application, b bool) {
if n == nil {
return a, false
}
if n.Context == nil {
return a, false
}
if n.Context.Application == nil {
return a, false
}
return *n.Context.Application, true
}
// Environment return an environment and a boolean (false if no environment)
func (n *WorkflowNode) Environment() (e Environment, b bool) {
if n == nil {
return e, false
}
if n.Context == nil {
return e, false
}
if n.Context.Environment == nil {
return e, false
}
return *n.Context.Environment, true
}
// ProjectPlatform return an projectPlatform and a boolean (false if no projectPlatform)
func (n *WorkflowNode) ProjectPlatform() (p ProjectPlatform, b bool) {
if n == nil {
return p, false
}
if n.Context == nil {
return p, false
}
if n.Context.ProjectPlatform == nil {
return p, false
}
return *n.Context.ProjectPlatform, true
}
// EqualsTo returns true if a node has the same pipeline and context than another
func (n *WorkflowNode) EqualsTo(n1 *WorkflowNode) bool {
if n.PipelineID != n1.PipelineID {
return false
}
if n.Context == nil && n1.Context != nil {
return false
}
if n.Context != nil && n1.Context == nil {
return false
}
if n.Context.ApplicationID != n1.Context.ApplicationID {
return false
}
if n.Context.EnvironmentID != n1.Context.EnvironmentID {
return false
}
return true
}
//GetNodeByRef returns the node given its ref
func (n *WorkflowNode) GetNodeByRef(ref string) *WorkflowNode {
if n == nil {
return nil
}
if n.Ref == ref {
return n
}
for i := range n.Triggers {
t := &n.Triggers[i]
n2 := (&t.WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNodeByRef(ref)
if n2 != nil {
return n2
}
}
}
return nil
}
func (n *WorkflowNode) GetForkByName(name string) *WorkflowNodeFork {
if n == nil {
return nil
}
for i := range n.Forks {
f := &n.Forks[i]
if f.Name == name {
return f
}
for j := range f.Triggers {
f2 := (&f.Triggers[j].WorkflowDestNode).GetForkByName(name)
if f2 != nil {
return f2
}
}
}
for j := range n.Triggers {
n2 := (&n.Triggers[j].WorkflowDestNode).GetForkByName(name)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetForkByName(name)
if n2 != nil {
return n2
}
}
}
return nil
}
//GetNodeByName returns the node given its name
func (n *WorkflowNode) GetNodeByName(name string) *WorkflowNode {
if n == nil {
return nil
}
if n.Name == name {
return n
}
for _, t := range n.Triggers {
n2 := t.WorkflowDestNode.GetNodeByName(name)
if n2 != nil {
return n2
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNodeByName(name)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNodeByName(name)
if n2 != nil {
return n2
}
}
}
return nil
}
//GetNode returns the node given its id
func (n *WorkflowNode) GetNode(id int64) *WorkflowNode {
if n == nil {
return nil
}
if n.ID == id {
return n
}
for _, t := range n.Triggers {
n1 := t.WorkflowDestNode.GetNode(id)
if n1 != nil {
return n1
}
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
n2 := (&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).GetNode(id)
if n2 != nil {
return n2
}
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
n2 := (&n.Forks[i].Triggers[j].WorkflowDestNode).GetNode(id)
if n2 != nil {
return n2
}
}
}
return nil
}
// ResetIDs resets node id for the following node and its triggers
func (n *WorkflowNode) ResetIDs() {
n.ID = 0
for i := range n.Triggers {
t := &n.Triggers[i]
(&t.WorkflowDestNode).ResetIDs()
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
(&n.OutgoingHooks[i].Triggers[j].WorkflowDestNode).ResetIDs()
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
(&n.Forks[i].Triggers[j].WorkflowDestNode).ResetIDs()
}
}
}
//Nodes returns a slice with all node IDs
func (n *WorkflowNode) Nodes() []WorkflowNode {
res := []WorkflowNode{*n}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.Nodes()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.Nodes()...)
}
}
return res
}
func ancestor(id int64, node *WorkflowNode, deep bool) (map[int64]bool, bool) {
res := map[int64]bool{}
if id == node.ID {
return res, true
}
for _, t := range node.Triggers {
if t.WorkflowDestNode.ID == id {
res[node.ID] = true
return res, true
}
ids, ok := ancestor(id, &t.WorkflowDestNode, deep)
if ok {
if len(ids) == 1 || deep {
for k := range ids {
res[k] = true
}
}
if deep {
res[node.ID] = true
}
return res, true
}
}
for i := range node.Forks {
for j := range node.Forks[i].Triggers {
destNode := &node.Forks[i].Triggers[j].WorkflowDestNode
if destNode.ID == id {
res[node.ID] = true
return res, true
}
ids, ok := ancestor(id, destNode, deep)
if ok {
if len(ids) == 1 || deep {
for k := range ids {
res[k] = true
}
}
if deep {
res[node.ID] = true
}
return res, true
}
}
}
return res, false
}
// Ancestors returns all node ancestors if deep equal true, and only his direct ancestors if deep equal false
func (n *WorkflowNode) Ancestors(w *Workflow, deep bool) []int64 {
if n == nil {
return nil
}
res, ok := ancestor(n.ID, w.Root, deep)
if !ok {
joinLoop:
for _, j := range w.Joins {
for _, t := range j.Triggers {
resAncestor, ok := ancestor(n.ID, &t.WorkflowDestNode, deep)
if ok {
if len(resAncestor) == 1 || deep {
for id := range resAncestor {
res[id] = true
}
}
if len(resAncestor) == 0 || deep {
for _, id := range j.SourceNodeIDs {
res[id] = true
if deep {
node := w.GetNode(id)
if node != nil {
ancerstorRes := node.Ancestors(w, deep)
for _, id := range ancerstorRes {
res[id] = true
}
}
}
}
}
break joinLoop
}
}
}
}
keys := make([]int64, len(res))
i := 0
for k := range res {
keys[i] = k
i++
}
return keys
}
//TriggersID returns a slides of triggers IDs
func (n *WorkflowNode) TriggersID() []int64 {
res := []int64{}
for _, t := range n.Triggers {
res = append(res, t.ID)
res = append(res, t.WorkflowDestNode.TriggersID()...)
}
return res
}
//References returns a slice with all node references
func (n *WorkflowNode) References() []string {
res := []string{}
if n.Ref != "" {
res = []string{n.Ref}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.References()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.References()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.References()...)
}
}
return res
}
//InvolvedApplications returns all applications used in the workflow
func (n *WorkflowNode) InvolvedApplications() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.ApplicationID == 0 && n.Context.Application != nil {
n.Context.ApplicationID = n.Context.Application.ID
}
if n.Context.ApplicationID != 0 {
res = []int64{n.Context.ApplicationID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedApplications()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedApplications()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedApplications()...)
}
}
return res
}
//InvolvedPipelines returns all pipelines used in the workflow
func (n *WorkflowNode) InvolvedPipelines() []int64 {
res := []int64{}
if n.PipelineID != 0 {
res = append(res, n.PipelineID)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPipelines()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedPipelines()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedPipelines()...)
}
}
return res
}
//GetApplications returns all applications used in the workflow
func (n *WorkflowNode) GetApplications() []Application {
res := []Application{}
if n.Context != nil && n.Context.Application != nil {
res = append(res, *n.Context.Application)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.GetApplications()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.GetApplications()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.GetApplications()...)
}
}
return res
}
//GetEnvironments returns all Environments used in the workflow
func (n *WorkflowNode) GetEnvironments() []Environment {
res := []Environment{}
if n.Context != nil && n.Context.Environment != nil {
res = append(res, *n.Context.Environment)
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.GetEnvironments()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.GetEnvironments()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.GetEnvironments()...)
}
}
return res
}
//InvolvedEnvironments returns all environments used in the workflow
func (n *WorkflowNode) InvolvedEnvironments() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.EnvironmentID == 0 && n.Context.Environment != nil {
n.Context.EnvironmentID = n.Context.Environment.ID
}
if n.Context.EnvironmentID != 0 {
res = []int64{n.Context.EnvironmentID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedEnvironments()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedEnvironments()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedEnvironments()...)
}
}
return res
}
//InvolvedPlatforms returns all platforms used in the workflow
func (n *WorkflowNode) InvolvedPlatforms() []int64 {
res := []int64{}
if n.Context != nil {
if n.Context.ProjectPlatformID == 0 && n.Context.ProjectPlatform != nil {
n.Context.ProjectPlatformID = n.Context.ProjectPlatform.ID
}
if n.Context.ProjectPlatformID != 0 {
res = []int64{n.Context.ProjectPlatformID}
}
}
for _, t := range n.Triggers {
res = append(res, t.WorkflowDestNode.InvolvedPlatforms()...)
}
for i := range n.OutgoingHooks {
for j := range n.OutgoingHooks[i].Triggers {
res = append(res, n.OutgoingHooks[i].Triggers[j].WorkflowDestNode.InvolvedPlatforms()...)
}
}
for i := range n.Forks {
for j := range n.Forks[i].Triggers {
res = append(res, n.Forks[i].Triggers[j].WorkflowDestNode.InvolvedPlatforms()...)
}
}
return res
}
// CheckApplicationDeploymentStrategies checks application deployment strategies
func (n *WorkflowNode) CheckApplicationDeploymentStrategies(proj *Project) error {
if n.Context == nil {
return nil
}
if n.Context.Application == nil {
return nil
}
var id = n.Context.ProjectPlatformID
if id == 0 && n.Context.ProjectPlatform != nil {
id = n.Context.ProjectPlatform.ID
}
if id == 0 {
return nil
}
pf := proj.GetPlatformByID(id)
if pf == nil {
return fmt.Errorf("platform unavailable")
}
for _, a := range proj.Applications {
if a.ID == n.Context.ApplicationID || (n.Context.Application != nil && n.Context.Application.ID == a.ID) {
if _, has := a.DeploymentStrategies[pf.Name]; !has {
return fmt.Errorf("platform %s unavailable", pf.Name)
}
}
}
return nil
}
//WorkflowNodeTrigger is a link between two pipelines in a workflow
type WorkflowNodeTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeID int64 `json:"workflow_node_id" db:"workflow_node_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
// WorkflowNodeForkTrigger is a link between a fork and a node
type WorkflowNodeForkTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowForkID int64 `json:"workflow_node_fork_id" db:"workflow_node_fork_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNodeOutgoingHookTrigger is a link between an outgoing hook and pipeline in a workflow
type WorkflowNodeOutgoingHookTrigger struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeOutgoingHookID int64 `json:"workflow_node_outgoing_hook_id" db:"workflow_node_outgoing_hook_id"`
WorkflowDestNodeID int64 `json:"workflow_dest_node_id" db:"workflow_dest_node_id"`
WorkflowDestNode WorkflowNode `json:"workflow_dest_node" db:"-"`
}
//WorkflowNodeConditions is either an array of WorkflowNodeCondition or a lua script
type WorkflowNodeConditions struct {
PlainConditions []WorkflowNodeCondition `json:"plain,omitempty" yaml:"check,omitempty"`
LuaScript string `json:"lua_script,omitempty" yaml:"script,omitempty"`
}
//WorkflowNodeCondition represents a condition to trigger ot not a pipeline in a workflow. Operator can be =, !=, regex
type WorkflowNodeCondition struct {
Variable string `json:"variable"`
Operator string `json:"operator"`
Value string `json:"value"`
}
//WorkflowNodeContext represents a context attached on a node
type WorkflowNodeContext struct {
ID int64 `json:"id" db:"id"`
WorkflowNodeID int64 `json:"workflow_node_id" db:"workflow_node_id"`
ApplicationID int64 `json:"application_id" db:"application_id"`
Application *Application `json:"application,omitempty" db:"-"`
Environment *Environment `json:"environment,omitempty" db:"-"`
EnvironmentID int64 `json:"environment_id" db:"environment_id"`
ProjectPlatform *ProjectPlatform `json:"project_platform" db:"-"`
ProjectPlatformID int64 `json:"project_platform_id" db:"project_platform_id"`
DefaultPayload interface{} `json:"default_payload,omitempty" db:"-"`
DefaultPipelineParameters []Parameter `json:"default_pipeline_parameters,omitempty" db:"-"`
Conditions WorkflowNodeConditions `json:"conditions,omitempty" db:"-"`
Mutex bool `json:"mutex"`
}
// HasDefaultPayload returns true if the node has a default payload
func (c *WorkflowNodeContext) HasDefaultPayload() bool {
if c == nil {
return false
}
if c.DefaultPayload == nil {
return false
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
m, _ := dumper.ToStringMap(c.DefaultPayload)
return len(m) > 0
}
// DefaultPayloadToMap returns default payload to map
func (c *WorkflowNodeContext) DefaultPayloadToMap() (map[string]string, error) {
if c == nil {
return nil, fmt.Errorf("Workflow node context is nil")
}
if c.DefaultPayload == nil {
return map[string]string{}, nil
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
return dumper.ToStringMap(c.DefaultPayload)
}
//WorkflowNodeContextDefaultPayloadVCS represents a default payload when a workflow is attached to a repository Webhook
type WorkflowNodeContextDefaultPayloadVCS struct {
GitBranch string `json:"git.branch" db:"-"`
GitTag string `json:"git.tag" db:"-"`
GitHash string `json:"git.hash" db:"-"`
GitAuthor string `json:"git.author" db:"-"`
GitHashBefore string `json:"git.hash.before" db:"-"`
GitRepository string `json:"git.repository" db:"-"`
GitMessage string `json:"git.message" db:"-"`
}
// IsWorkflowNodeContextDefaultPayloadVCS checks with several way if the workflow node context has a default vcs payloas
func IsWorkflowNodeContextDefaultPayloadVCS(i interface{}) bool {
_, ok := i.(WorkflowNodeContextDefaultPayloadVCS)
if ok {
return true
}
dumper := dump.NewDefaultEncoder(nil)
dumper.ExtraFields.DetailedMap = false
dumper.ExtraFields.DetailedStruct = false
dumper.ExtraFields.Len = false
dumper.ExtraFields.Type = false
mI, _ := dumper.ToMap(i)
mD, _ := dumper.ToMap(WorkflowNodeContextDefaultPayloadVCS{})
// compare interface keys with default payload keys
hasKey := func(s string) bool {
_, has := mI[s]
return has
}
if len(mI) == len(mD) {
for k := range mD {
if !hasKey(k) {
goto checkGitKey
}
}
return true
}
checkGitKey:
return hasKey("git.branch") &&
hasKey("git.hash") &&
hasKey("git.author") &&
hasKey("git.hash.before") &&
hasKey("git.repository") &&
hasKey("git.message")
}
//WorkflowList return the list of the workflows for a project
func WorkflowList(projectkey string) ([]Workflow, error) {
path := fmt.Sprintf("/project/%s/workflows", projectkey)
body, _, err := Request("GET", path, nil)
if err != nil {
return nil, err
}
var ws = []Workflow{}
if err := json.Unmarshal(body, &ws); err != nil {
return nil, err
}
return ws, nil
}
//WorkflowGet returns a workflow given its name
func WorkflowGet(projectkey, name string) (*Workflow, error) {
path := fmt.Sprintf("/project/%s/workflows/%s", projectkey, name)
body, _, err := Request("GET", path, nil)
if err != nil {
return nil, err
}
var w = Workflow{}
if err := json.Unmarshal(body, &w); err != nil {
return nil, err
}
return &w, nil
}
// WorkflowDelete Call API to delete a workflow
func WorkflowDelete(projectkey, name string) error {
path := fmt.Sprintf("/project/%s/workflows/%s", projectkey, name)
_, _, err := Request("DELETE", path, nil)
return err
}
// WorkflowNodeJobRunCount return nb workflow run job since 'since'
type WorkflowNodeJobRunCount struct {
Count int64 `json:"version"`
Since time.Time `json:"since"`
Until time.Time `json:"until"`
}
// Label represent a label linked to a workflow
type Label struct {
ID int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Color string `json:"color" db:"color"`
ProjectID int64 `json:"project_id" db:"project_id"`
WorkflowID int64 `json:"workflow_id,omitempty" db:"-"`
}
//Validate return error or update label if it is not valid
func (label *Label) Validate() error {
if label.Name == "" {
return WrapError(fmt.Errorf("Label must have a name"), "IsValid>")
}
if label.Color == "" {
bytes := make([]byte, 4)
if _, err := rand.Read(bytes); err != nil {
return WrapError(err, "IsValid> Cannot create random color")
}
label.Color = "#" + hex.EncodeToString(bytes)
} else {
if !ColorRegexp.Match([]byte(label.Color)) {
return ErrIconBadFormat
}
}
return nil
}
|
package handlers
import (
"crypto/rand"
"encoding/hex"
"flag"
"fmt"
"github.com/FactomProject/cli"
"github.com/FactomProject/serveridentity/functions"
"github.com/FactomProject/serveridentity/identity"
"io"
"os"
)
var SCRIPTNAME string = "fullidentity"
var PRINT_OUT bool = true
/*
* This file is only used for testing purposes
*/
var Full = func() *sevCmd {
identity.ShowBruteForce = PRINT_OUT
cmd := new(sevCmd)
cmd.helpMsg = "serveridentity full 'fresh'|ESAddress|elements"
cmd.description = "Create new identity and subchain as well as entries in the subchain."
cmd.execFunc = func(args []string) {
os.Args = args
flag.Parse()
args = flag.Args()
c := cli.New()
c.HandleFunc("elements", elementsFull)
c.HandleFunc("fresh", freshFull)
c.HandleDefaultFunc(existingECFull)
c.HandleFunc("help", func(args []string) {
fmt.Println(cmd.helpMsg)
})
c.Execute(args)
}
Help.Add("Create a full Identity", cmd)
return cmd
}()
func existingECFull(args []string) {
if len(args) == 0 {
Help.All()
return
}
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
garble := flag.Bool("b", false, "Make incorrect entries")
flag.Parse()
SCRIPTNAME = *filename
l := len(args[0])
if l != 64 && l != 52 {
fmt.Println("serveridentity full 'fresh'|ESAddress")
fmt.Println("Invalid ES Address entered, exiting program...")
return
} else if l == 52 {
// Generate all new Keys from EC
sid := generateKeysFromEC(args[0], PRINT_OUT)
if sid == nil {
return
}
fullStart(sid, *garble)
} else if l == 64 {
fmt.Println("Only base58 human readable key accepted.")
}
}
func freshFull(args []string) {
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
garble := flag.Bool("b", false, "Make incorrect entries")
flag.Parse()
SCRIPTNAME = *filename
// Generate all new Keys
sid := generateKeys(PRINT_OUT)
if sid == nil {
return
}
fullStart(sid, *garble)
}
func elementsFull(args []string) {
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
flag.Parse()
SCRIPTNAME = *filename
var sid *functions.ServerIdentity
if len(args) > 1 {
fmt.Println(args[1])
l := len(args[1])
if l != 52 {
fmt.Println("serveridentity elements ESAddress")
fmt.Println("Invalid ES Address entered, exiting program...")
return
} else {
// Generate all new Keys from EC
sid = generateKeysFromEC(args[1], PRINT_OUT)
if sid == nil {
return
}
}
} else {
sid = generateKeysFromEC("Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG", PRINT_OUT)
if sid == nil {
return
}
}
fullStartElements(sid)
}
func fullStart(sid *functions.ServerIdentity, garble bool) {
if garble {
fmt.Println("Incorrect curls also provided")
}
file = makeFile(SCRIPTNAME)
defer file.Close()
var bar string
for i := 0; i < 76; i++ {
bar = bar + "\\*"
}
if PRINT_OUT {
PrintHeader("Root Chain Curls")
}
createIdentityChain(sid, PRINT_OUT)
registerIdentityChain(sid, PRINT_OUT)
if PRINT_OUT {
PrintHeader("Sub Chain Curls")
}
createSubChain(sid, PRINT_OUT)
registerSubChain(sid, PRINT_OUT)
random := rand.Reader
var r [20]byte
_, _ = io.ReadFull(random, r[:20])
btcKeyHex := r[:20]
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv := p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err := functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mHash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Info\n")
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Chain: " + sid.RootChainID + "\n")
file.WriteString("echo Identity SubChain: " + sid.SubChainID + "\n")
file.WriteString("echo EC Public : " + sid.ECAddr.PubString() + "\n")
file.WriteString("echo EC Private: " + sid.ECAddr.SecString() + "\n")
file.WriteString("echo \n")
file.WriteString("echo Private Keys\n")
for i, r := range sid.IDSet.IdentityLevel {
file.WriteString(fmt.Sprintf("echo Level %d: %s\n", i+1, r.HumanReadablePrivate()))
}
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
keyString := hex.EncodeToString(newPriv)
keyString = "\n echo - Sec: " + keyString[:64] + "\n echo - Pub: " + keyString[64:]
file.WriteString("echo Block Signing Key: " + keyString + "\n")
file.WriteString("echo \n")
file.WriteString("echo MHashSeed: " + sid.RootChainID + "\n")
file.WriteString("echo MHash: " + mHash + "\n")
if garble {
PrintHeader("GARBLE: Wrong Key")
p = sid.IDSet.IdentityLevel[2].GetPrivateKey()
priv = p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err = functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err = functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mhash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
PrintHeader("GARBLE: Bad Key & BTC KEY")
btcKeyHex = []byte{0x00, 0x00, 0x00}
p = sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv = p[1:33]
file.WriteString("sleep 1\n")
strCom, strRev, err = functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err = functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mhash, err = functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
file.WriteString("echo Block Signing Key: " + hex.EncodeToString(newPriv) + "\n")
file.WriteString("echo MHash: " + mhash + "\n")
}
}
func cliFormat(cliCommand string, ECaddress string) string {
cliLine := "echo -n \"\" | factom-cli "
cliLine += cliCommand
cliLine += " "
cliLine += ECaddress
return cliLine
}
func fullStartElements(sid *functions.ServerIdentity) {
file = makeFile(SCRIPTNAME)
defer file.Close()
// var bar string
if PRINT_OUT {
//PrintHeader("Root Chain Curls")
}
ice, err := functions.CreateIdentityChainElements(sid)
if err != nil {
panic(err)
}
f := cliFormat(ice, sid.ECAddr.String())
fmt.Println(f)
icr, err := functions.RegisterServerIdentityElements(sid)
if err != nil {
panic(err)
}
f = cliFormat(icr, sid.ECAddr.String())
fmt.Println(f)
sce, err := functions.CreateSubChainElements(sid)
if err != nil {
panic(err)
}
f = cliFormat(sce, sid.ECAddr.String())
fmt.Println(f)
scr, err := functions.RegisterSubChainElements(sid)
if err != nil {
panic(err)
}
f = cliFormat(scr, sid.ECAddr.String())
fmt.Println(f)
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
lowestLevelSigningKey := p[:32]
lowestLevelSigningKeyHex := fmt.Sprintf("%032x", lowestLevelSigningKey)
bse, bsPriv, err := functions.CreateNewBlockSignEntryElements(sid)
if err != nil {
panic(err)
}
fmt.Println("now=$(printf '%016x' $(date +%s))")
unsignedUntimedBse, _ := functions.CreateNewBlockSignEntryUnsigned(sid, bsPriv)
fmt.Printf("sig=$(signwithed25519 %s$now %s)\n", unsignedUntimedBse, lowestLevelSigningKeyHex)
f = cliFormat(bse, sid.ECAddr.String())
fmt.Println(f)
//strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
//modified to here so far
/*
random := rand.Reader
var r [20]byte
_, _ = io.ReadFull(random, r[:20])
btcKeyHex := r[:20]
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv := p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err := functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mHash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Info\n")
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Chain: " + sid.RootChainID + "\n")
file.WriteString("echo Identity SubChain: " + sid.SubChainID + "\n")
file.WriteString("echo EC Public : " + sid.ECAddr.PubString() + "\n")
file.WriteString("echo EC Private: " + sid.ECAddr.SecString() + "\n")
file.WriteString("echo \n")
file.WriteString("echo Private Keys\n")
for i, r := range sid.IDSet.IdentityLevel {
file.WriteString(fmt.Sprintf("echo Level %d: %s\n", i+1, r.HumanReadablePrivate()))
}
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
keyString := hex.EncodeToString(newPriv)
keyString = "\n echo - Sec: " + keyString[:64] + "\n echo - Pub: " + keyString[64:]
file.WriteString("echo Block Signing Key: " + keyString + "\n")
file.WriteString("echo \n")
file.WriteString("echo MHashSeed: " + sid.RootChainID + "\n")
file.WriteString("echo MHash: " + mHash + "\n")
*/
}
now prints the block signing key
package handlers
import (
"crypto/rand"
"encoding/hex"
"flag"
"fmt"
"github.com/FactomProject/cli"
ed "github.com/FactomProject/ed25519"
"github.com/FactomProject/serveridentity/functions"
"github.com/FactomProject/serveridentity/identity"
"io"
"os"
)
var SCRIPTNAME string = "fullidentity"
var PRINT_OUT bool = true
/*
* This file is only used for testing purposes
*/
var Full = func() *sevCmd {
identity.ShowBruteForce = PRINT_OUT
cmd := new(sevCmd)
cmd.helpMsg = "serveridentity full 'fresh'|ESAddress|elements"
cmd.description = "Create new identity and subchain as well as entries in the subchain."
cmd.execFunc = func(args []string) {
os.Args = args
flag.Parse()
args = flag.Args()
c := cli.New()
c.HandleFunc("elements", elementsFull)
c.HandleFunc("fresh", freshFull)
c.HandleDefaultFunc(existingECFull)
c.HandleFunc("help", func(args []string) {
fmt.Println(cmd.helpMsg)
})
c.Execute(args)
}
Help.Add("Create a full Identity", cmd)
return cmd
}()
func existingECFull(args []string) {
if len(args) == 0 {
Help.All()
return
}
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
garble := flag.Bool("b", false, "Make incorrect entries")
flag.Parse()
SCRIPTNAME = *filename
l := len(args[0])
if l != 64 && l != 52 {
fmt.Println("serveridentity full 'fresh'|ESAddress")
fmt.Println("Invalid ES Address entered, exiting program...")
return
} else if l == 52 {
// Generate all new Keys from EC
sid := generateKeysFromEC(args[0], PRINT_OUT)
if sid == nil {
return
}
fullStart(sid, *garble)
} else if l == 64 {
fmt.Println("Only base58 human readable key accepted.")
}
}
func freshFull(args []string) {
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
garble := flag.Bool("b", false, "Make incorrect entries")
flag.Parse()
SCRIPTNAME = *filename
// Generate all new Keys
sid := generateKeys(PRINT_OUT)
if sid == nil {
return
}
fullStart(sid, *garble)
}
func elementsFull(args []string) {
os.Args = args
filename := flag.String("n", "fullidentity", "Change the script name")
flag.Parse()
SCRIPTNAME = *filename
var sid *functions.ServerIdentity
if len(args) > 1 {
fmt.Println(args[1])
l := len(args[1])
if l != 52 {
fmt.Println("serveridentity elements ESAddress")
fmt.Println("Invalid ES Address entered, exiting program...")
return
} else {
// Generate all new Keys from EC
sid = generateKeysFromEC(args[1], PRINT_OUT)
if sid == nil {
return
}
}
} else {
sid = generateKeysFromEC("Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG", PRINT_OUT)
if sid == nil {
return
}
}
fullStartElements(sid)
}
func fullStart(sid *functions.ServerIdentity, garble bool) {
if garble {
fmt.Println("Incorrect curls also provided")
}
file = makeFile(SCRIPTNAME)
defer file.Close()
var bar string
for i := 0; i < 76; i++ {
bar = bar + "\\*"
}
if PRINT_OUT {
PrintHeader("Root Chain Curls")
}
createIdentityChain(sid, PRINT_OUT)
registerIdentityChain(sid, PRINT_OUT)
if PRINT_OUT {
PrintHeader("Sub Chain Curls")
}
createSubChain(sid, PRINT_OUT)
registerSubChain(sid, PRINT_OUT)
random := rand.Reader
var r [20]byte
_, _ = io.ReadFull(random, r[:20])
btcKeyHex := r[:20]
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv := p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err := functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mHash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Info\n")
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Chain: " + sid.RootChainID + "\n")
file.WriteString("echo Identity SubChain: " + sid.SubChainID + "\n")
file.WriteString("echo EC Public : " + sid.ECAddr.PubString() + "\n")
file.WriteString("echo EC Private: " + sid.ECAddr.SecString() + "\n")
file.WriteString("echo \n")
file.WriteString("echo Private Keys\n")
for i, r := range sid.IDSet.IdentityLevel {
file.WriteString(fmt.Sprintf("echo Level %d: %s\n", i+1, r.HumanReadablePrivate()))
}
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
keyString := hex.EncodeToString(newPriv)
keyString = "\n echo - Sec: " + keyString[:64] + "\n echo - Pub: " + keyString[64:]
file.WriteString("echo Block Signing Key: " + keyString + "\n")
file.WriteString("echo \n")
file.WriteString("echo MHashSeed: " + sid.RootChainID + "\n")
file.WriteString("echo MHash: " + mHash + "\n")
if garble {
PrintHeader("GARBLE: Wrong Key")
p = sid.IDSet.IdentityLevel[2].GetPrivateKey()
priv = p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err = functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err = functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mhash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
PrintHeader("GARBLE: Bad Key & BTC KEY")
btcKeyHex = []byte{0x00, 0x00, 0x00}
p = sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv = p[1:33]
file.WriteString("sleep 1\n")
strCom, strRev, err = functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err = functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mhash, err = functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
//panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
file.WriteString("echo Block Signing Key: " + hex.EncodeToString(newPriv) + "\n")
file.WriteString("echo MHash: " + mhash + "\n")
}
}
func cliFormat(cliCommand string, ECaddress string) string {
cliLine := "echo -n \"\" | factom-cli "
cliLine += cliCommand
cliLine += " "
cliLine += ECaddress
return cliLine
}
func fullStartElements(sid *functions.ServerIdentity) {
file = makeFile(SCRIPTNAME)
defer file.Close()
// var bar string
if PRINT_OUT {
//PrintHeader("Root Chain Curls")
}
ice, err := functions.CreateIdentityChainElements(sid)
if err != nil {
panic(err)
}
fice := cliFormat(ice, sid.ECAddr.String())
icr, err := functions.RegisterServerIdentityElements(sid)
if err != nil {
panic(err)
}
ficr := cliFormat(icr, sid.ECAddr.String())
sce, err := functions.CreateSubChainElements(sid)
if err != nil {
panic(err)
}
fsce := cliFormat(sce, sid.ECAddr.String())
scr, err := functions.RegisterSubChainElements(sid)
if err != nil {
panic(err)
}
fscr := cliFormat(scr, sid.ECAddr.String())
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
lowestLevelSigningKey := p[:32]
lowestLevelSigningKeyHex := fmt.Sprintf("%032x", lowestLevelSigningKey)
bse, bsPriv, err := functions.CreateNewBlockSignEntryElements(sid)
if err != nil {
panic(err)
}
unsignedUntimedBse, _ := functions.CreateNewBlockSignEntryUnsigned(sid, bsPriv)
fbse := cliFormat(bse, sid.ECAddr.String())
bsPrivHex := fmt.Sprintf("%032x", bsPriv)
fmt.Printf("block signing private key: %s\n", bsPrivHex)
var priv [64]byte
copy(priv[:32], bsPriv[:32])
bsPub := ed.GetPublicKey(&priv)
fmt.Printf("block signing public key: %032x\n", *bsPub)
fmt.Println()
fmt.Println(fice)
fmt.Println(ficr)
fmt.Println(fsce)
fmt.Println(fscr)
fmt.Println("now=$(printf '%016x' $(date +%s))")
fmt.Printf("sig=$(signwithed25519 %s$now %s)\n", unsignedUntimedBse, lowestLevelSigningKeyHex)
fmt.Println(fbse)
//strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
//modified to here so far
/*
random := rand.Reader
var r [20]byte
_, _ = io.ReadFull(random, r[:20])
btcKeyHex := r[:20]
p := sid.IDSet.IdentityLevel[0].GetPrivateKey()
priv := p[:32]
file.WriteString("sleep 1\n")
strCom, strRev, err := functions.CreateNewBitcoinKey(sid.RootChainID, sid.SubChainID, 0, 0, btcKeyHex, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Bitcoin Key", strCom, strRev)
strCom, strRev, newPriv, err := functions.CreateNewBlockSignEntry(sid.RootChainID, sid.SubChainID, priv, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Block Signing Key", strCom, strRev)
strCom, strRev, mHash, err := functions.CreateNewMHash(sid.RootChainID, sid.SubChainID, priv, sid.RootChainID, sid.ECAddr)
if err != nil {
panic(err)
}
writeCurlCmd(file, "New Matryoshka Hash", strCom, strRev)
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Info\n")
file.WriteString("echo " + bar + "\n")
file.WriteString("echo Identity Chain: " + sid.RootChainID + "\n")
file.WriteString("echo Identity SubChain: " + sid.SubChainID + "\n")
file.WriteString("echo EC Public : " + sid.ECAddr.PubString() + "\n")
file.WriteString("echo EC Private: " + sid.ECAddr.SecString() + "\n")
file.WriteString("echo \n")
file.WriteString("echo Private Keys\n")
for i, r := range sid.IDSet.IdentityLevel {
file.WriteString(fmt.Sprintf("echo Level %d: %s\n", i+1, r.HumanReadablePrivate()))
}
file.WriteString("echo \n")
file.WriteString("echo BTC Key: " + hex.EncodeToString(btcKeyHex) + "\n")
keyString := hex.EncodeToString(newPriv)
keyString = "\n echo - Sec: " + keyString[:64] + "\n echo - Pub: " + keyString[64:]
file.WriteString("echo Block Signing Key: " + keyString + "\n")
file.WriteString("echo \n")
file.WriteString("echo MHashSeed: " + sid.RootChainID + "\n")
file.WriteString("echo MHash: " + mHash + "\n")
*/
}
|
package handlers
import (
"errors"
"fmt"
"github.com/codegangsta/cli"
"github.com/dmonay/okra/common"
"github.com/gin-gonic/gin"
"github.com/tommy351/gin-cors"
"gopkg.in/mgo.v2"
"gopkg.in/yaml.v1"
"io/ioutil"
"log"
"os"
)
func getConfig(c *cli.Context) (common.Config, error) {
yamlPath := c.GlobalString("config")
config := common.Config{}
if _, err := os.Stat(yamlPath); err != nil {
return config, errors.New("config path not valid")
}
ymlData, err := ioutil.ReadFile(yamlPath)
if err != nil {
return config, err
}
err = yaml.Unmarshal([]byte(ymlData), &config)
return config, err
}
func Run(cfg common.Config) error {
// initialize mongo
mongodb, err := InitMongo()
if err != nil {
colorMsg := "\x1b[31;1mMongoDB failed to initialize\x1b[0m"
log.Fatalln(colorMsg, err)
return err
}
// defer mongodb.Close()
doWorkResource := &DoWorkResource{
mongo: mongodb,
}
r := gin.New()
allowed := make([]string, 1)
env := os.Getenv("MONGOHQ_URL")
if env == "localhost:27017" {
allowed = append(allowed, "http://localhost:5555")
} else {
allowed = append(allowed, os.Getenv("ALLOWED_DOMAIN"))
}
fmt.Println(allowed)
// middlewares
r.Use(cors.Middleware(cors.Options{AllowOrigins: allowed}))
r.Use(gin.Logger())
r.Use(gin.Recovery())
// user
r.POST("/register", doWorkResource.Register)
r.GET("/user/:gid", doWorkResource.GetOneUser)
r.GET("/get/users/all/:user", doWorkResource.GetAllUsers)
// orgs
r.POST("/create/organization", doWorkResource.CreateOrg)
r.POST("/update/organization", doWorkResource.UpdateOrgName)
r.GET("/get/orgs/all/:userid", doWorkResource.GetAllOrgs)
r.GET("/get/orgs/members/:org", doWorkResource.GetMembers)
// trees
r.POST("/create/tree/:organization", doWorkResource.CreateTree)
r.POST("/update/tree/name/:organization/:treeid", doWorkResource.UpdateTreeName)
r.GET("/get/trees/:organization/:treeid", doWorkResource.GetTree)
r.GET("/get/trees/:organization", doWorkResource.GetAllTrees)
// mission
r.POST("/update/mission/:organization", doWorkResource.UpdateMission)
// members
r.POST("/update/members/:organization", doWorkResource.AddMembers)
r.DELETE("/update/members/:organization", doWorkResource.DeleteMembers)
// objectives and key results
r.POST("/create/objective/:organization", doWorkResource.CreateObjective)
r.POST("/update/objective/properties/:organization/:treeid/:objective", doWorkResource.UpdateObjProperties)
r.POST("/create/kr/:organization/:objective", doWorkResource.CreateKeyResult)
r.POST("/update/kr/properties/:organization/:treeid/:objective/:kr", doWorkResource.UpdateKrProperties)
// tasks
r.POST("/create/task/:organization/:objective/:kr", doWorkResource.CreateTask)
r.POST("/update/task/properties/:organization/:treeid/:objective/:kr/:task", doWorkResource.UpdateTaskProperties)
port := os.Getenv("PORT")
if port == "" {
port = cfg.SvcHost
}
fmt.Println("port: ", port)
r.Run(":" + port)
return nil
}
func InitMongo() (*mgo.Database, error) {
uri := os.Getenv("MONGOHQ_URL")
fmt.Println("uri: ", uri)
if uri == "" {
fmt.Println("\x1b[31;1mno connection string provided\x1b[0m")
os.Exit(1)
}
db_name := os.Getenv("MONGOHQ_DB")
fmt.Println("db: ", db_name)
if db_name == "" {
fmt.Println("\x1b[31;1mno db name provided\x1b[0m")
os.Exit(1)
}
url := uri + "/" + db_name
fmt.Println("full url: ", url)
session, err := mgo.Dial(url)
if err != nil {
colorMsg := "\x1b[31;1mMongo connection failed\x1b[0m"
log.Fatalln(colorMsg, err)
}
db := session.DB(db_name)
return db, nil
}
var Commands = []cli.Command{
{
Name: "server",
Usage: "Run the http server",
Action: func(c *cli.Context) {
cfg, err := getConfig(c)
if err != nil {
log.Fatal(err)
return
}
fmt.Println("\x1b[32;1mYou've started the server. Rejoice!\x1b[0m")
if err = Run(cfg); err != nil {
log.Fatal(err)
}
},
},
}
correct allowed origin
package handlers
import (
"errors"
"fmt"
"github.com/codegangsta/cli"
"github.com/dmonay/okra/common"
"github.com/gin-gonic/gin"
"github.com/tommy351/gin-cors"
"gopkg.in/mgo.v2"
"gopkg.in/yaml.v1"
"io/ioutil"
"log"
"os"
)
func getConfig(c *cli.Context) (common.Config, error) {
yamlPath := c.GlobalString("config")
config := common.Config{}
if _, err := os.Stat(yamlPath); err != nil {
return config, errors.New("config path not valid")
}
ymlData, err := ioutil.ReadFile(yamlPath)
if err != nil {
return config, err
}
err = yaml.Unmarshal([]byte(ymlData), &config)
return config, err
}
func Run(cfg common.Config) error {
// initialize mongo
mongodb, err := InitMongo()
if err != nil {
colorMsg := "\x1b[31;1mMongoDB failed to initialize\x1b[0m"
log.Fatalln(colorMsg, err)
return err
}
// defer mongodb.Close()
doWorkResource := &DoWorkResource{
mongo: mongodb,
}
r := gin.New()
allowed := make([]string, 1)
env := os.Getenv("MONGOHQ_URL")
if env == "localhost:27017" {
allowed = append(allowed, "http://localhost:3333")
} else {
allowed = append(allowed, os.Getenv("ALLOWED_DOMAIN"))
}
fmt.Println("Allowed origin:", allowed)
// middlewares
r.Use(cors.Middleware(cors.Options{AllowOrigins: allowed}))
r.Use(gin.Logger())
r.Use(gin.Recovery())
// user
r.POST("/register", doWorkResource.Register)
r.GET("/user/:gid", doWorkResource.GetOneUser)
r.GET("/get/users/all/:user", doWorkResource.GetAllUsers)
// orgs
r.POST("/create/organization", doWorkResource.CreateOrg)
r.POST("/update/organization", doWorkResource.UpdateOrgName)
r.GET("/get/orgs/all/:userid", doWorkResource.GetAllOrgs)
r.GET("/get/orgs/members/:org", doWorkResource.GetMembers)
// trees
r.POST("/create/tree/:organization", doWorkResource.CreateTree)
r.POST("/update/tree/name/:organization/:treeid", doWorkResource.UpdateTreeName)
r.GET("/get/trees/:organization/:treeid", doWorkResource.GetTree)
r.GET("/get/trees/:organization", doWorkResource.GetAllTrees)
// mission
r.POST("/update/mission/:organization", doWorkResource.UpdateMission)
// members
r.POST("/update/members/:organization", doWorkResource.AddMembers)
r.DELETE("/update/members/:organization", doWorkResource.DeleteMembers)
// objectives and key results
r.POST("/create/objective/:organization", doWorkResource.CreateObjective)
r.POST("/update/objective/properties/:organization/:treeid/:objective", doWorkResource.UpdateObjProperties)
r.POST("/create/kr/:organization/:objective", doWorkResource.CreateKeyResult)
r.POST("/update/kr/properties/:organization/:treeid/:objective/:kr", doWorkResource.UpdateKrProperties)
// tasks
r.POST("/create/task/:organization/:objective/:kr", doWorkResource.CreateTask)
r.POST("/update/task/properties/:organization/:treeid/:objective/:kr/:task", doWorkResource.UpdateTaskProperties)
port := os.Getenv("PORT")
if port == "" {
port = cfg.SvcHost
}
fmt.Println("port: ", port)
r.Run(":" + port)
return nil
}
func InitMongo() (*mgo.Database, error) {
uri := os.Getenv("MONGOHQ_URL")
fmt.Println("uri: ", uri)
if uri == "" {
fmt.Println("\x1b[31;1mno connection string provided\x1b[0m")
os.Exit(1)
}
db_name := os.Getenv("MONGOHQ_DB")
fmt.Println("db: ", db_name)
if db_name == "" {
fmt.Println("\x1b[31;1mno db name provided\x1b[0m")
os.Exit(1)
}
url := uri + "/" + db_name
fmt.Println("full url: ", url)
session, err := mgo.Dial(url)
if err != nil {
colorMsg := "\x1b[31;1mMongo connection failed\x1b[0m"
log.Fatalln(colorMsg, err)
}
db := session.DB(db_name)
return db, nil
}
var Commands = []cli.Command{
{
Name: "server",
Usage: "Run the http server",
Action: func(c *cli.Context) {
cfg, err := getConfig(c)
if err != nil {
log.Fatal(err)
return
}
fmt.Println("\x1b[32;1mYou've started the server. Rejoice!\x1b[0m")
if err = Run(cfg); err != nil {
log.Fatal(err)
}
},
},
}
|
/*
Copyright 2015-2016, RadiantBlue Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"net/http"
"os"
"time"
"github.com/julienschmidt/httprouter"
"github.com/venicegeo/pzsvc-pdal/functions"
"github.com/venicegeo/pzsvc-sdk-go/job"
"github.com/venicegeo/pzsvc-sdk-go/s3"
"github.com/venicegeo/pzsvc-sdk-go/utils"
)
// MakeFunction wraps the individual PDAL functions.
// Parse the input and output filenames, creating files as needed. Download the
// input data and upload the output data.
func makeFunction2(fn func(http.ResponseWriter, *http.Request,
*job.OutputMsg, job.InputMsg, string, string)) utils.FunctionFunc {
return func(w http.ResponseWriter, r *http.Request, res *job.OutputMsg,
msg job.InputMsg) {
var inputName, outputName string
var fileIn, fileOut *os.File
// Split the source S3 key string, interpreting the last element as the
// input filename. Create the input file, throwing 500 on error.
inputName = s3.ParseFilenameFromKey(msg.Source.Key)
fileIn, err := os.Create(inputName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer fileIn.Close()
// If provided, split the destination S3 key string, interpreting the last
// element as the output filename. Create the output file, throwing 500 on
// error.
if len(msg.Destination.Key) > 0 {
outputName = s3.ParseFilenameFromKey(msg.Destination.Key)
// fileOut, err = os.Create(outputName)
// if err != nil {
// job.InternalError(w, r, *res, err.Error())
// return
// }
// defer fileOut.Close()
}
// Download the source data from S3, throwing 500 on error.
err = s3.Download(fileIn, msg.Source.Bucket, msg.Source.Key)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Run the PDAL function.
fn(w, r, res, msg, inputName, outputName)
// If an output has been created, upload the destination data to S3,
// throwing 500 on error.
if len(msg.Destination.Key) > 0 {
fileOut, err = os.Open(outputName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer fileOut.Close()
err = s3.Upload(fileOut, msg.Destination.Bucket, msg.Destination.Key)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
}
// PdalHandler handles PDAL jobs.
func PdalHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// Create the job output message. No matter what happens, we should always be
// able to populate the StartedAt field.
var res job.OutputMsg
res.StartedAt = time.Now()
msg := job.GetInputMsg(w, r, res)
// Throw 400 if the JobInput does not specify a function.
if msg.Function == nil {
http.Error(w, "Must provide a function", http.StatusBadRequest)
return
}
// If everything is okay up to this point, we will echo the JobInput in the
// JobOutput and mark the job as Running.
res.Input = msg
job.Update(job.Running, r)
// Make/execute the requested function.
switch *msg.Function {
case "crop":
utils.MakeFunction(functions.Crop)(w, r, &res, msg)
case "dart":
utils.MakeFunction(functions.Dart)(w, r, &res, msg)
case "dtm":
utils.MakeFunction(functions.Dtm)(w, r, &res, msg)
case "ground":
utils.MakeFunction(functions.Ground)(w, r, &res, msg)
case "height":
utils.MakeFunction(functions.Height)(w, r, &res, msg)
case "info":
utils.MakeFunction(functions.Info)(w, r, &res, msg)
case "radius":
utils.MakeFunction(functions.Radius)(w, r, &res, msg)
case "statistical":
utils.MakeFunction(functions.Statistical)(w, r, &res, msg)
case "translate":
utils.MakeFunction(functions.Translate)(w, r, &res, msg)
case "vo":
makeFunction2(functions.VO)(w, r, &res, msg)
// An unrecognized function will result in 400 error, with message explaining
// how to list available functions.
default:
http.Error(w, "Unrecognized function", http.StatusBadRequest)
return
}
// If we made it here, we can record the FinishedAt time, notify the job
// manager of success, and return 200.
res.FinishedAt = time.Now()
job.Okay(w, r, res, "Success!")
}
Use the standard MakeFunction
/*
Copyright 2015-2016, RadiantBlue Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"net/http"
"os"
"time"
"github.com/julienschmidt/httprouter"
"github.com/venicegeo/pzsvc-pdal/functions"
"github.com/venicegeo/pzsvc-sdk-go/job"
"github.com/venicegeo/pzsvc-sdk-go/s3"
"github.com/venicegeo/pzsvc-sdk-go/utils"
)
// MakeFunction wraps the individual PDAL functions.
// Parse the input and output filenames, creating files as needed. Download the
// input data and upload the output data.
func makeFunction2(fn func(http.ResponseWriter, *http.Request,
*job.OutputMsg, job.InputMsg, string, string)) utils.FunctionFunc {
return func(w http.ResponseWriter, r *http.Request, res *job.OutputMsg,
msg job.InputMsg) {
var inputName, outputName string
var fileIn, fileOut *os.File
// Split the source S3 key string, interpreting the last element as the
// input filename. Create the input file, throwing 500 on error.
inputName = s3.ParseFilenameFromKey(msg.Source.Key)
fileIn, err := os.Create(inputName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer fileIn.Close()
// If provided, split the destination S3 key string, interpreting the last
// element as the output filename. Create the output file, throwing 500 on
// error.
if len(msg.Destination.Key) > 0 {
outputName = s3.ParseFilenameFromKey(msg.Destination.Key)
// fileOut, err = os.Create(outputName)
// if err != nil {
// job.InternalError(w, r, *res, err.Error())
// return
// }
// defer fileOut.Close()
}
// Download the source data from S3, throwing 500 on error.
err = s3.Download(fileIn, msg.Source.Bucket, msg.Source.Key)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Run the PDAL function.
fn(w, r, res, msg, inputName, outputName)
// If an output has been created, upload the destination data to S3,
// throwing 500 on error.
if len(msg.Destination.Key) > 0 {
fileOut, err = os.Open(outputName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer fileOut.Close()
err = s3.Upload(fileOut, msg.Destination.Bucket, msg.Destination.Key)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
}
// PdalHandler handles PDAL jobs.
func PdalHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// Create the job output message. No matter what happens, we should always be
// able to populate the StartedAt field.
var res job.OutputMsg
res.StartedAt = time.Now()
msg := job.GetInputMsg(w, r, res)
// Throw 400 if the JobInput does not specify a function.
if msg.Function == nil {
http.Error(w, "Must provide a function", http.StatusBadRequest)
return
}
// If everything is okay up to this point, we will echo the JobInput in the
// JobOutput and mark the job as Running.
res.Input = msg
job.Update(job.Running, r)
// Make/execute the requested function.
switch *msg.Function {
case "crop":
utils.MakeFunction(functions.Crop)(w, r, &res, msg)
case "dart":
utils.MakeFunction(functions.Dart)(w, r, &res, msg)
case "dtm":
utils.MakeFunction(functions.Dtm)(w, r, &res, msg)
case "ground":
utils.MakeFunction(functions.Ground)(w, r, &res, msg)
case "height":
utils.MakeFunction(functions.Height)(w, r, &res, msg)
case "info":
utils.MakeFunction(functions.Info)(w, r, &res, msg)
case "radius":
utils.MakeFunction(functions.Radius)(w, r, &res, msg)
case "statistical":
utils.MakeFunction(functions.Statistical)(w, r, &res, msg)
case "translate":
utils.MakeFunction(functions.Translate)(w, r, &res, msg)
case "vo":
utils.MakeFunction(functions.VO)(w, r, &res, msg)
// An unrecognized function will result in 400 error, with message explaining
// how to list available functions.
default:
http.Error(w, "Unrecognized function", http.StatusBadRequest)
return
}
// If we made it here, we can record the FinishedAt time, notify the job
// manager of success, and return 200.
res.FinishedAt = time.Now()
job.Okay(w, r, res, "Success!")
}
|
// +build linux
package netlink
import (
"errors"
)
var (
// ErrAttrHeaderTruncated is returned when a netlink attribute's header is
// truncated.
ErrAttrHeaderTruncated = errors.New("attribute header truncated")
// ErrAttrBodyTruncated is returned when a netlink attribute's body is
// truncated.
ErrAttrBodyTruncated = errors.New("attribute body truncated")
)
type Fou struct {
Family int
Port int
Protocol int
EncapType int
}
Netlink: Fix Darwin build
Having fou.go build only for linux breaks builds for darwin:
```
$ go build main.go
src/github.com/vishvananda/netlink/fou_unspecified.go:5:15: undefined: Fou
src/github.com/vishvananda/netlink/fou_unspecified.go:9:15: undefined: Fou
src/github.com/vishvananda/netlink/fou_unspecified.go:13:26: undefined: Fou
```
Instead, build fou.go for all platforms since it doesn't have platform-specific code:
```
$ go build main.go
$ ./main
not implemented
```
package netlink
import (
"errors"
)
var (
// ErrAttrHeaderTruncated is returned when a netlink attribute's header is
// truncated.
ErrAttrHeaderTruncated = errors.New("attribute header truncated")
// ErrAttrBodyTruncated is returned when a netlink attribute's body is
// truncated.
ErrAttrBodyTruncated = errors.New("attribute body truncated")
)
type Fou struct {
Family int
Port int
Protocol int
EncapType int
}
|
package goftp
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"regexp"
"strconv"
"strings"
)
var REGEX_PWD_PATH *regexp.Regexp = regexp.MustCompile(`\"(.*)\"`)
type FTP struct {
conn net.Conn
addr string
debug bool
tlsconfig *tls.Config
reader *bufio.Reader
writer *bufio.Writer
}
func (ftp *FTP) Close() {
ftp.conn.Close()
}
type WalkFunc func(path string, info os.FileMode, err error) error
type RetrFunc func(r io.Reader) error
func parseLine(line string) (perm string, t string, filename string) {
for _, v := range strings.Split(line, ";") {
v2 := strings.Split(v, "=")
switch v2[0] {
case "perm":
perm = v2[1]
case "type":
t = v2[1]
default:
filename = v[1 : len(v)-2]
}
}
return
}
// walks recursively through path and call walkfunc for each file
func (ftp *FTP) Walk(path string, walkFn WalkFunc) (err error) {
/*
if err = walkFn(path, os.ModeDir, nil); err != nil {
if err == filepath.SkipDir {
return nil
}
}
*/
if ftp.debug {
log.Printf("Walking: '%s'\n", path)
}
var lines []string
if lines, err = ftp.List(path); err != nil {
return
}
for _, line := range lines {
_, t, subpath := parseLine(line)
switch t {
case "dir":
if subpath == "." {
} else if subpath == ".." {
} else {
if err = ftp.Walk(path+subpath+"/", walkFn); err != nil {
return
}
}
case "file":
if err = walkFn(path+subpath, os.FileMode(0), nil); err != nil {
return
}
}
}
return
}
// send quit to the server and close the connection
func (ftp *FTP) Quit() (err error) {
if _, err := ftp.cmd("221", "QUIT"); err != nil {
return err
}
ftp.conn.Close()
ftp.conn = nil
return nil
}
// will send a NOOP (no operation) to the server
func (ftp *FTP) Noop() (err error) {
_, err = ftp.cmd("200", "NOOP")
return
}
// Send raw commands, return response as string and response code as int
func (ftp *FTP) RawCmd(command string, args ...interface{}) (code int, line string) {
if ftp.debug {
log.Printf("Raw-> %s\n", fmt.Sprintf(command, args...), code)
}
code = -1
var err error
if err = ftp.send(command, args...); err != nil {
return code, ""
}
if line, err = ftp.receive(); err != nil {
return code, ""
}
code, err = strconv.Atoi(line[:3])
if ftp.debug {
log.Printf("Raw<- <- %d \n", code)
}
return code, line
}
// private function to send command and compare return code with expects
func (ftp *FTP) cmd(expects string, command string, args ...interface{}) (line string, err error) {
if err = ftp.send(command, args...); err != nil {
return
}
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, expects) {
err = errors.New(line)
return
}
return
}
// rename file
func (ftp *FTP) Rename(from string, to string) (err error) {
if _, err = ftp.cmd("350", "RNFR %s", from); err != nil {
return
}
if _, err = ftp.cmd("250", "RNTO %s", to); err != nil {
return
}
return
}
// make directory
func (ftp *FTP) Mkd(path string) error {
_, err := ftp.cmd("257", "MKD %s", path)
return err
}
// get current path
func (ftp *FTP) Pwd() (path string, err error) {
var line string
if line, err = ftp.cmd("257", "PWD"); err != nil {
return
}
res := REGEX_PWD_PATH.FindAllStringSubmatch(line[4:], -1)
path = res[0][1]
return
}
// change current path
func (ftp *FTP) Cwd(path string) (err error) {
_, err = ftp.cmd("250", "CWD %s", path)
return
}
// delete file
func (ftp *FTP) Dele(path string) (err error) {
if err = ftp.send("DELE %s", path); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "250") {
return errors.New(line)
}
return
}
// secures the ftp connection by using TLS
func (ftp *FTP) AuthTLS(config tls.Config) error {
if _, err := ftp.cmd("234", "AUTH TLS"); err != nil {
return err
}
// wrap tls on existing connection
ftp.tlsconfig = &config
ftp.conn = tls.Client(ftp.conn, &config)
ftp.writer = bufio.NewWriter(ftp.conn)
ftp.reader = bufio.NewReader(ftp.conn)
if _, err := ftp.cmd("200", "PBSZ 0"); err != nil {
return err
}
if _, err := ftp.cmd("200", "PROT P"); err != nil {
return err
}
return nil
}
// read all the buffered bytes and return
func (ftp *FTP) ReadAndDiscard() (int, error) {
var i int
var err error
buffer_size := ftp.reader.Buffered()
for i = 0; i < buffer_size; i++ {
if _, err = ftp.reader.ReadByte(); err != nil {
return i, err
}
}
return i, err
}
// change transfer type
func (ftp *FTP) Type(t string) error {
_, err := ftp.cmd("200", "TYPE %s", t)
return err
}
func (ftp *FTP) receiveLine() (string, error) {
line, err := ftp.reader.ReadString('\n')
if ftp.debug {
log.Printf("< %s", line)
}
return line, err
}
func (ftp *FTP) receive() (string, error) {
line, err := ftp.receiveLine()
if err != nil {
return line, err
}
if (len(line) >= 4) && (line[3] == '-') {
//Multiline response
closingCode := line[:3] + " "
for {
str, err := ftp.receiveLine()
line = line + str
if err != nil {
return line, err
}
if len(str) < 4 {
if ftp.debug {
log.Println("Uncorrectly terminated response")
}
break
} else {
if str[:4] == closingCode {
break
}
}
}
}
ftp.ReadAndDiscard()
//fmt.Println(line)
return line, err
}
func (ftp *FTP) send(command string, arguments ...interface{}) error {
if ftp.debug {
log.Printf("> %s", fmt.Sprintf(command, arguments...))
}
command = fmt.Sprintf(command, arguments...)
command += "\r\n"
if _, err := ftp.writer.WriteString(command); err != nil {
return err
}
if err := ftp.writer.Flush(); err != nil {
return err
}
return nil
}
// enables passive data connection and returns port number
func (ftp *FTP) Pasv() (port int, err error) {
var line string
if line, err = ftp.cmd("227", "PASV"); err != nil {
return
}
re, err := regexp.Compile(`\((.*)\)`)
res := re.FindAllStringSubmatch(line, -1)
s := strings.Split(res[0][1], ",")
l1, _ := strconv.Atoi(s[len(s)-2])
l2, _ := strconv.Atoi(s[len(s)-1])
port = l1<<8 + l2
return
}
// open new data connection
func (ftp *FTP) newConnection(port int) (conn net.Conn, err error) {
addr := fmt.Sprintf("%s:%d", strings.Split(ftp.addr, ":")[0], port)
if ftp.debug {
log.Printf("Connecting to %s\n", addr)
}
if conn, err = net.Dial("tcp", addr); err != nil {
return
}
if ftp.tlsconfig != nil {
conn = tls.Client(conn, ftp.tlsconfig)
}
return
}
// upload file
func (ftp *FTP) Stor(path string, r io.Reader) (err error) {
if err = ftp.Type("I"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
if err = ftp.send("STOR %s", path); err != nil {
return
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
err = errors.New(line)
return
}
if _, err = io.Copy(pconn, r); err != nil {
return
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
// retrieves file
func (ftp *FTP) Retr(path string, retrFn RetrFunc) (s string, err error) {
if err = ftp.Type("I"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
if err = ftp.send("RETR %s", path); err != nil {
return
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
err = errors.New(line)
return
}
if err = retrFn(pconn); err != nil {
return
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
/*func GetFilesList(path string) (files []string, err error) {
}*/
// list the path (or current directory)
func (ftp *FTP) List(path string) (files []string, err error) {
if err = ftp.Type("A"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
// check if MLSD works
if err = ftp.send("MLSD %s", path); err != nil {
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
// MLSD failed, lets try LIST
if err = ftp.send("LIST %s", path); err != nil {
return
}
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
// Really list is not working here
err = errors.New(line)
return
}
}
reader := bufio.NewReader(pconn)
for {
line, err = reader.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return
}
files = append(files, string(line))
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
/*
// login on server with strange login behavior
func (ftp *FTP) SmartLogin(username string, password string) (err error) {
var code int
// Maybe the server has some useless words to say. Make him talk
code, _ = ftp.RawCmd("NOOP")
if code == 220 || code == 530 {
// Maybe with another Noop the server will ask us to login?
code, _ = ftp.RawCmd("NOOP")
if code == 530 {
// ok, let's login
code, _ = ftp.RawCmd("USER %s", username)
code, _ = ftp.RawCmd("NOOP")
if code == 331 {
// user accepted, password required
code, _ = ftp.RawCmd("PASS %s", password)
code, _ = ftp.RawCmd("PASS %s", password)
if code == 230 {
code, _ = ftp.RawCmd("NOOP")
return
}
}
}
}
// Nothing strange... let's try a normal login
return ftp.Login(username, password)
}
*/
// login to the server
func (ftp *FTP) Login(username string, password string) (err error) {
if _, err = ftp.cmd("331", "USER %s", username); err != nil {
if strings.HasPrefix(err.Error(), "230") {
// Ok, probably anonymous server
// but login was fine, so return no error
err = nil
} else {
return
}
}
if _, err = ftp.cmd("230", "PASS %s", password); err != nil {
return
}
return
}
// connect to server, debug is OFF
func Connect(addr string) (*FTP, error) {
var err error
var conn net.Conn
if conn, err = net.Dial("tcp", addr); err != nil {
return nil, err
}
writer := bufio.NewWriter(conn)
reader := bufio.NewReader(conn)
//reader.ReadString('\n')
object := &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}
object.receive()
return object, nil
}
// connect to server, debug is ON
func ConnectDbg(addr string) (*FTP, error) {
var err error
var conn net.Conn
if conn, err = net.Dial("tcp", addr); err != nil {
return nil, err
}
writer := bufio.NewWriter(conn)
reader := bufio.NewReader(conn)
var line string
object := &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}
line, _ = object.receive()
log.Print(line)
return object, nil
}
ftp: satisfy `golint`
Signed-off-by: Vincent Batts <d01a4772f0cffd3f005a7d7f01c59cd104c7a5fe@hashbangbash.com>
package goftp
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"regexp"
"strconv"
"strings"
)
// RePwdPath is the default expression for matching files in the current working directory
var RePwdPath = regexp.MustCompile(`\"(.*)\"`)
// FTP is a session for File Transfer Protocol
type FTP struct {
conn net.Conn
addr string
debug bool
tlsconfig *tls.Config
reader *bufio.Reader
writer *bufio.Writer
}
// Close ends the FTP connection
func (ftp *FTP) Close() {
ftp.conn.Close()
}
type (
// WalkFunc is called on each path in a Walk. Errors are filtered through WalkFunc
WalkFunc func(path string, info os.FileMode, err error) error
// RetrFunc is passed to Retr and is the handler for the stream received for a given path
RetrFunc func(r io.Reader) error
)
func parseLine(line string) (perm string, t string, filename string) {
for _, v := range strings.Split(line, ";") {
v2 := strings.Split(v, "=")
switch v2[0] {
case "perm":
perm = v2[1]
case "type":
t = v2[1]
default:
filename = v[1 : len(v)-2]
}
}
return
}
// Walk walks recursively through path and call walkfunc for each file
func (ftp *FTP) Walk(path string, walkFn WalkFunc) (err error) {
/*
if err = walkFn(path, os.ModeDir, nil); err != nil {
if err == filepath.SkipDir {
return nil
}
}
*/
if ftp.debug {
log.Printf("Walking: '%s'\n", path)
}
var lines []string
if lines, err = ftp.List(path); err != nil {
return
}
for _, line := range lines {
_, t, subpath := parseLine(line)
switch t {
case "dir":
if subpath == "." {
} else if subpath == ".." {
} else {
if err = ftp.Walk(path+subpath+"/", walkFn); err != nil {
return
}
}
case "file":
if err = walkFn(path+subpath, os.FileMode(0), nil); err != nil {
return
}
}
}
return
}
// Quit sends quit to the server and close the connection. No need to Close after this.
func (ftp *FTP) Quit() (err error) {
if _, err := ftp.cmd("221", "QUIT"); err != nil {
return err
}
ftp.conn.Close()
ftp.conn = nil
return nil
}
// Noop will send a NOOP (no operation) to the server
func (ftp *FTP) Noop() (err error) {
_, err = ftp.cmd("200", "NOOP")
return
}
// RawCmd sends raw commands to the remote server. Returns response code as int and response as string.
func (ftp *FTP) RawCmd(command string, args ...interface{}) (code int, line string) {
if ftp.debug {
log.Printf("Raw-> %s\n", fmt.Sprintf(command, args...), code)
}
code = -1
var err error
if err = ftp.send(command, args...); err != nil {
return code, ""
}
if line, err = ftp.receive(); err != nil {
return code, ""
}
code, err = strconv.Atoi(line[:3])
if ftp.debug {
log.Printf("Raw<- <- %d \n", code)
}
return code, line
}
// private function to send command and compare return code with expects
func (ftp *FTP) cmd(expects string, command string, args ...interface{}) (line string, err error) {
if err = ftp.send(command, args...); err != nil {
return
}
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, expects) {
err = errors.New(line)
return
}
return
}
// Rename file on the remote host
func (ftp *FTP) Rename(from string, to string) (err error) {
if _, err = ftp.cmd("350", "RNFR %s", from); err != nil {
return
}
if _, err = ftp.cmd("250", "RNTO %s", to); err != nil {
return
}
return
}
// Mkd makes a directory on the remote host
func (ftp *FTP) Mkd(path string) error {
_, err := ftp.cmd("257", "MKD %s", path)
return err
}
// Pwd gets current path on the remote host
func (ftp *FTP) Pwd() (path string, err error) {
var line string
if line, err = ftp.cmd("257", "PWD"); err != nil {
return
}
res := RePwdPath.FindAllStringSubmatch(line[4:], -1)
path = res[0][1]
return
}
// Cwd changes current working directory on remote host to path
func (ftp *FTP) Cwd(path string) (err error) {
_, err = ftp.cmd("250", "CWD %s", path)
return
}
// Dele deletes path on remote host
func (ftp *FTP) Dele(path string) (err error) {
if err = ftp.send("DELE %s", path); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "250") {
return errors.New(line)
}
return
}
// AuthTLS secures the ftp connection by using TLS
func (ftp *FTP) AuthTLS(config tls.Config) error {
if _, err := ftp.cmd("234", "AUTH TLS"); err != nil {
return err
}
// wrap tls on existing connection
ftp.tlsconfig = &config
ftp.conn = tls.Client(ftp.conn, &config)
ftp.writer = bufio.NewWriter(ftp.conn)
ftp.reader = bufio.NewReader(ftp.conn)
if _, err := ftp.cmd("200", "PBSZ 0"); err != nil {
return err
}
if _, err := ftp.cmd("200", "PROT P"); err != nil {
return err
}
return nil
}
// ReadAndDiscard reads all the buffered bytes and returns the number of bytes
// that cleared from the buffer
func (ftp *FTP) ReadAndDiscard() (int, error) {
var i int
bufferSize := ftp.reader.Buffered()
for i = 0; i < bufferSize; i++ {
if _, err := ftp.reader.ReadByte(); err != nil {
return i, err
}
}
return i, nil
}
// Type changes transfer type.
func (ftp *FTP) Type(t TypeCode) error {
_, err := ftp.cmd("200", "TYPE %s", t)
return err
}
// TypeCode for the representation types
type TypeCode string
const (
// TypeASCII for ASCII
TypeASCII = "A"
// TypeEBCDIC for EBCDIC
TypeEBCDIC = "E"
// TypeImage for an Image
TypeImage = "I"
// TypeLocal for local byte size
TypeLocal = "L"
)
func (ftp *FTP) receiveLine() (string, error) {
line, err := ftp.reader.ReadString('\n')
if ftp.debug {
log.Printf("< %s", line)
}
return line, err
}
func (ftp *FTP) receive() (string, error) {
line, err := ftp.receiveLine()
if err != nil {
return line, err
}
if (len(line) >= 4) && (line[3] == '-') {
//Multiline response
closingCode := line[:3] + " "
for {
str, err := ftp.receiveLine()
line = line + str
if err != nil {
return line, err
}
if len(str) < 4 {
if ftp.debug {
log.Println("Uncorrectly terminated response")
}
break
} else {
if str[:4] == closingCode {
break
}
}
}
}
ftp.ReadAndDiscard()
//fmt.Println(line)
return line, err
}
func (ftp *FTP) send(command string, arguments ...interface{}) error {
if ftp.debug {
log.Printf("> %s", fmt.Sprintf(command, arguments...))
}
command = fmt.Sprintf(command, arguments...)
command += "\r\n"
if _, err := ftp.writer.WriteString(command); err != nil {
return err
}
if err := ftp.writer.Flush(); err != nil {
return err
}
return nil
}
// Pasv enables passive data connection and returns port number
func (ftp *FTP) Pasv() (port int, err error) {
var line string
if line, err = ftp.cmd("227", "PASV"); err != nil {
return
}
re, err := regexp.Compile(`\((.*)\)`)
res := re.FindAllStringSubmatch(line, -1)
s := strings.Split(res[0][1], ",")
l1, _ := strconv.Atoi(s[len(s)-2])
l2, _ := strconv.Atoi(s[len(s)-1])
port = l1<<8 + l2
return
}
// open new data connection
func (ftp *FTP) newConnection(port int) (conn net.Conn, err error) {
addr := fmt.Sprintf("%s:%d", strings.Split(ftp.addr, ":")[0], port)
if ftp.debug {
log.Printf("Connecting to %s\n", addr)
}
if conn, err = net.Dial("tcp", addr); err != nil {
return
}
if ftp.tlsconfig != nil {
conn = tls.Client(conn, ftp.tlsconfig)
}
return
}
// Stor uploads file to remote host path, from r
func (ftp *FTP) Stor(path string, r io.Reader) (err error) {
if err = ftp.Type("I"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
if err = ftp.send("STOR %s", path); err != nil {
return
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
err = errors.New(line)
return
}
if _, err = io.Copy(pconn, r); err != nil {
return
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
// Retr retrieves file from remote host at path, using retrFn to read from the remote file.
func (ftp *FTP) Retr(path string, retrFn RetrFunc) (s string, err error) {
if err = ftp.Type("I"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
if err = ftp.send("RETR %s", path); err != nil {
return
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
err = errors.New(line)
return
}
if err = retrFn(pconn); err != nil {
return
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
/*func GetFilesList(path string) (files []string, err error) {
}*/
// List lists the path (or current directory)
func (ftp *FTP) List(path string) (files []string, err error) {
if err = ftp.Type("A"); err != nil {
return
}
var port int
if port, err = ftp.Pasv(); err != nil {
return
}
// check if MLSD works
if err = ftp.send("MLSD %s", path); err != nil {
}
var pconn net.Conn
if pconn, err = ftp.newConnection(port); err != nil {
return
}
var line string
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
// MLSD failed, lets try LIST
if err = ftp.send("LIST %s", path); err != nil {
return
}
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "150") {
// Really list is not working here
err = errors.New(line)
return
}
}
reader := bufio.NewReader(pconn)
for {
line, err = reader.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return
}
files = append(files, string(line))
}
pconn.Close()
if line, err = ftp.receive(); err != nil {
return
}
if !strings.HasPrefix(line, "226") {
err = errors.New(line)
return
}
return
}
/*
// login on server with strange login behavior
func (ftp *FTP) SmartLogin(username string, password string) (err error) {
var code int
// Maybe the server has some useless words to say. Make him talk
code, _ = ftp.RawCmd("NOOP")
if code == 220 || code == 530 {
// Maybe with another Noop the server will ask us to login?
code, _ = ftp.RawCmd("NOOP")
if code == 530 {
// ok, let's login
code, _ = ftp.RawCmd("USER %s", username)
code, _ = ftp.RawCmd("NOOP")
if code == 331 {
// user accepted, password required
code, _ = ftp.RawCmd("PASS %s", password)
code, _ = ftp.RawCmd("PASS %s", password)
if code == 230 {
code, _ = ftp.RawCmd("NOOP")
return
}
}
}
}
// Nothing strange... let's try a normal login
return ftp.Login(username, password)
}
*/
// Login to the server with provided username and password.
// Typical default may be ("anonymous","").
func (ftp *FTP) Login(username string, password string) (err error) {
if _, err = ftp.cmd("331", "USER %s", username); err != nil {
if strings.HasPrefix(err.Error(), "230") {
// Ok, probably anonymous server
// but login was fine, so return no error
err = nil
} else {
return
}
}
if _, err = ftp.cmd("230", "PASS %s", password); err != nil {
return
}
return
}
// Connect to server at addr (format "host:port"). debug is OFF
func Connect(addr string) (*FTP, error) {
var err error
var conn net.Conn
if conn, err = net.Dial("tcp", addr); err != nil {
return nil, err
}
writer := bufio.NewWriter(conn)
reader := bufio.NewReader(conn)
//reader.ReadString('\n')
object := &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}
object.receive()
return object, nil
}
// ConnectDbg to server at addr (format "host:port"). debug is ON
func ConnectDbg(addr string) (*FTP, error) {
var err error
var conn net.Conn
if conn, err = net.Dial("tcp", addr); err != nil {
return nil, err
}
writer := bufio.NewWriter(conn)
reader := bufio.NewReader(conn)
var line string
object := &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}
line, _ = object.receive()
log.Print(line)
return object, nil
}
|
package fluidbackup
import "sync"
import "math/rand"
import "time"
import "bytes"
import "strings"
import "os"
import "fmt"
import "encoding/hex"
import "bufio"
type BlockId int64
type BlockShardId int64
/*
* BlockShard represents a slice of a file block.
*/
type BlockShard struct {
Id BlockShardId
Hash []byte
Length int
Peer *Peer
Available bool // whether the peer has confirmed receipt of the shard
Parent *Block
ShardIndex int
// temporary fields
Contents []byte // cleared once the peer confirms receipt of the shard
}
/*
* Blocks are the unit of distribution.
* A block is sliced via erasure coding into a number of shards, each of which is
* stored onto a different peer. The block can be recovered by collecting K of the
* N shards.
*/
type Block struct {
Id BlockId
Hash []byte
N int
K int
Shards []*BlockShard
// source
ParentFile FileId
FileOffset int
}
type BlockStore struct {
mu sync.Mutex
peerList *PeerList
blocks map[BlockId]*Block
replicateN, replicateK int
}
func MakeBlockStore(fluidBackup *FluidBackup, peerList *PeerList) *BlockStore {
this := new(BlockStore)
this.peerList = peerList
this.blocks = make(map[BlockId]*Block, 0)
this.replicateN = DEFAULT_N
this.replicateK = DEFAULT_K
// perpetually ensure blocks are synced
go func() {
for !fluidBackup.Stopping() {
this.update()
time.Sleep(time.Duration(50 * time.Millisecond))
}
}()
return this
}
func (this *BlockStore) RegisterBlock(fileId FileId, offset int, contents []byte) BlockId {
this.mu.Lock()
defer this.mu.Unlock()
block := &Block{}
block.Id = BlockId(rand.Int63())
block.N = this.replicateN
block.K = this.replicateK
block.ParentFile = fileId
block.FileOffset = offset
block.Hash = hash(contents)
shards := erasureEncode(contents, block.K, block.N)
block.Shards = make([]*BlockShard, block.N)
for shardIndex, shardBytes := range shards {
block.Shards[shardIndex] = &BlockShard{
Id: BlockShardId(rand.Int63()),
Hash: hash(shardBytes),
Length: len(shardBytes),
Peer: nil,
Available: false,
Contents: shardBytes,
Parent: block,
ShardIndex: shardIndex,
}
}
this.blocks[block.Id] = block
Log.Debug.Printf("Registered new block %d with %d shards", block.Id, len(block.Shards))
return block.Id
}
func (this *BlockStore) RecoverBlock(blockId BlockId) []byte {
this.mu.Lock()
defer this.mu.Unlock()
// verify block exists
block := this.blocks[blockId]
if block == nil {
return nil
}
// recover the block
Log.Debug.Printf("Begin recovery of block %d", block.Id)
shardBytes := make([][]byte, 0, block.K)
shardChunks := make([]int, 0, block.K)
for shardIndex, shard := range block.Shards {
if shard.Peer == nil || !shard.Available {
Log.Debug.Printf("Skipping shard %d: no peer or not available", shard.Id)
continue
}
Log.Debug.Printf("Attempting to retrieve shard %d from peer %s", shard.Id, shard.Peer.id.String())
currBytes := shard.Peer.retrieveShard(shard)
if currBytes == nil {
Log.Warn.Printf("Failed to retrieve shard %d from peer %s (empty response)", shard.Id, shard.Peer.id.String())
continue
}
if bytes.Equal(hash(currBytes), shard.Hash) {
Log.Debug.Printf("Retrieved shard %d successfully (idx=%d, len=%d)", shard.Id, shardIndex, len(currBytes))
shardBytes = append(shardBytes, currBytes)
shardChunks = append(shardChunks, shardIndex)
if len(shardBytes) >= block.K {
break
}
} else {
Log.Warn.Printf("Failed to retrieve shard %d from peer %s (hash check failed, len=%d)", shard.Id, shard.Peer.id.String(), len(currBytes))
continue
}
}
if len(shardBytes) < block.K {
Log.Warn.Printf("Failed to retrieve block %d: only got %d shards", block.Id, len(shardBytes))
return nil
}
blockBytes := erasureDecode(shardBytes, block.K, block.N, shardChunks)
if !bytes.Equal(hash(blockBytes), block.Hash) {
Log.Error.Printf("Failed to recover block %d: hash check failed even though we retrieved K shards", block.Id)
return nil
}
Log.Debug.Printf("Successfully recovered block %d", block.Id)
return blockBytes
}
func (this *BlockStore) update() {
this.mu.Lock()
defer this.mu.Unlock()
// search for shards that don't have peers
for _, block := range this.blocks {
// first pass: find existing used peers
ignorePeers := make(map[PeerId]bool)
for _, shard := range block.Shards {
if shard.Peer != nil {
ignorePeers[shard.Peer.id] = true
}
}
// second pass: actually find new peers
for _, shard := range block.Shards {
if shard.Peer == nil {
availablePeer := this.peerList.FindAvailablePeer(shard.Length, ignorePeers, shard.Id)
if availablePeer == nil {
// no available peer for this shard, other shards in this block won't have peers either
break
} else {
shard.Peer = availablePeer
ignorePeers[shard.Peer.id] = true
}
}
}
}
// commit shard data to peers
// we only commit once per update iteration to avoid hogging the lock?
for _, block := range this.blocks {
for _, shard := range block.Shards {
if shard.Peer != nil && !shard.Available {
Log.Debug.Printf("Committing shard %d to peer %s", shard.Id, shard.Peer.id.String())
if shard.Peer.storeShard(shard) {
shard.Available = true
shard.Contents = nil
}
break
}
}
}
}
/*
* blockstore metadata can be written and read from the disk using Save/Load functions below.
* The file format is a block on each line, consisting of string:
* [blockid]:[fileid]:[file_offset]:[N]:[K]:[hex(hash)]:[shard1],[shard2],...,[shardn],
* Each shard looks like:
[shardid]/[length]/[peerid]/[available]/[hex(hash)]
*/
func (this *BlockStore) Save() bool {
this.mu.Lock()
defer this.mu.Unlock()
Log.Info.Printf("Saving metadata to blockstore.dat (%d blocks)", len(this.blocks))
fout, err := os.Create("blockstore.dat")
if err != nil {
Log.Warn.Printf("Failed to save metadata to blockstore.dat: %s", err.Error())
return false
}
defer fout.Close()
for _, block := range this.blocks {
blockDump := fmt.Sprintf("%d:%s:%d:%d:%d:%s:", block.Id, block.ParentFile, block.FileOffset, block.N, block.K, hex.EncodeToString(block.Hash))
for _, shard := range block.Shards {
peerString := ""
if shard.Peer != nil {
peerString = shard.Peer.id.String()
}
blockDump += fmt.Sprintf("%d/%d/%s/%d/%s", shard.Id, shard.Length, peerString, boolToInt(shard.Available), hex.EncodeToString(shard.Hash)) + ","
}
blockDump += "\n"
fout.Write([]byte(blockDump))
}
return true
}
func (this *BlockStore) Load() bool {
this.mu.Lock()
defer this.mu.Unlock()
fin, err := os.Open("blockstore.dat")
if err != nil {
Log.Warn.Printf("Failed to read metadata from blockstore.dat: %s", err.Error())
return false
}
defer fin.Close()
scanner := bufio.NewScanner(fin)
for scanner.Scan() {
parts := strings.SplitN(scanner.Text(), ":", 7)
if len(parts) != 7 {
continue
}
block := &Block{}
block.Id = BlockId(strToInt64(parts[0]))
block.ParentFile = FileId(parts[1])
block.FileOffset = strToInt(parts[2])
block.N = strToInt(parts[3])
block.K = strToInt(parts[4])
block.Hash, _ = hex.DecodeString(parts[5])
shardStrings := strings.Split(parts[6], ",")
block.Shards = make([]*BlockShard, len(shardStrings) - 1) // last element of shardStrings is empty
for i, shardString := range shardStrings {
if i < len(block.Shards) {
shardParts := strings.Split(shardString, "/")
if len(shardParts) != 5 {
Log.Warn.Printf("Failed to read metadata from blockstore.dat: invalid shard [%s]", shardString)
return false
}
shard := &BlockShard{}
shard.Id = BlockShardId(strToInt64(shardParts[0]))
shard.Length = strToInt(shardParts[1])
shard.Available = false
if shardParts[2] != "" {
shard.Peer = this.peerList.DiscoveredPeer(strToPeerId(shardParts[2]))
// check if configuration indicates this shard has been replicated on the peer
// if so, we want to notify the peer object so that we correctly account for our space usage
// if there's a problem, then we may actually have to replicate on a new peer...
// (e.g. we may have used the space for something else, and have strange accounting now?)
cfgAvailable := strToInt(shardParts[3]) == 1
if cfgAvailable {
if shard.Peer.reserveBytes(shard.Length, shard.Id) {
shard.Available = true
} else {
// failed to reserve, something bad happened in our accounting?
// we should replicate it elsewhere
shard.Peer = nil
}
}
}
shard.Hash, _ = hex.DecodeString(shardParts[4])
shard.Parent = block
shard.ShardIndex = i
block.Shards[i] = shard
}
}
this.blocks[block.Id] = block
}
Log.Info.Printf("Loaded %d blocks", len(this.blocks))
return true
}
Fix previous commit to handle case where we have found peer but haven't replicated yet.
Actually it may be better to simply discard the peer reference in the shard for this case, but it shouldn't matter either way.
package fluidbackup
import "sync"
import "math/rand"
import "time"
import "bytes"
import "strings"
import "os"
import "fmt"
import "encoding/hex"
import "bufio"
type BlockId int64
type BlockShardId int64
/*
* BlockShard represents a slice of a file block.
*/
type BlockShard struct {
Id BlockShardId
Hash []byte
Length int
Peer *Peer
Available bool // whether the peer has confirmed receipt of the shard
Parent *Block
ShardIndex int
// temporary fields
Contents []byte // cleared once the peer confirms receipt of the shard
}
/*
* Blocks are the unit of distribution.
* A block is sliced via erasure coding into a number of shards, each of which is
* stored onto a different peer. The block can be recovered by collecting K of the
* N shards.
*/
type Block struct {
Id BlockId
Hash []byte
N int
K int
Shards []*BlockShard
// source
ParentFile FileId
FileOffset int
}
type BlockStore struct {
mu sync.Mutex
peerList *PeerList
blocks map[BlockId]*Block
replicateN, replicateK int
}
func MakeBlockStore(fluidBackup *FluidBackup, peerList *PeerList) *BlockStore {
this := new(BlockStore)
this.peerList = peerList
this.blocks = make(map[BlockId]*Block, 0)
this.replicateN = DEFAULT_N
this.replicateK = DEFAULT_K
// perpetually ensure blocks are synced
go func() {
for !fluidBackup.Stopping() {
this.update()
time.Sleep(time.Duration(50 * time.Millisecond))
}
}()
return this
}
func (this *BlockStore) RegisterBlock(fileId FileId, offset int, contents []byte) BlockId {
this.mu.Lock()
defer this.mu.Unlock()
block := &Block{}
block.Id = BlockId(rand.Int63())
block.N = this.replicateN
block.K = this.replicateK
block.ParentFile = fileId
block.FileOffset = offset
block.Hash = hash(contents)
shards := erasureEncode(contents, block.K, block.N)
block.Shards = make([]*BlockShard, block.N)
for shardIndex, shardBytes := range shards {
block.Shards[shardIndex] = &BlockShard{
Id: BlockShardId(rand.Int63()),
Hash: hash(shardBytes),
Length: len(shardBytes),
Peer: nil,
Available: false,
Contents: shardBytes,
Parent: block,
ShardIndex: shardIndex,
}
}
this.blocks[block.Id] = block
Log.Debug.Printf("Registered new block %d with %d shards", block.Id, len(block.Shards))
return block.Id
}
func (this *BlockStore) RecoverBlock(blockId BlockId) []byte {
this.mu.Lock()
defer this.mu.Unlock()
// verify block exists
block := this.blocks[blockId]
if block == nil {
return nil
}
// recover the block
Log.Debug.Printf("Begin recovery of block %d", block.Id)
shardBytes := make([][]byte, 0, block.K)
shardChunks := make([]int, 0, block.K)
for shardIndex, shard := range block.Shards {
if shard.Peer == nil || !shard.Available {
Log.Debug.Printf("Skipping shard %d: no peer or not available", shard.Id)
continue
}
Log.Debug.Printf("Attempting to retrieve shard %d from peer %s", shard.Id, shard.Peer.id.String())
currBytes := shard.Peer.retrieveShard(shard)
if currBytes == nil {
Log.Warn.Printf("Failed to retrieve shard %d from peer %s (empty response)", shard.Id, shard.Peer.id.String())
continue
}
if bytes.Equal(hash(currBytes), shard.Hash) {
Log.Debug.Printf("Retrieved shard %d successfully (idx=%d, len=%d)", shard.Id, shardIndex, len(currBytes))
shardBytes = append(shardBytes, currBytes)
shardChunks = append(shardChunks, shardIndex)
if len(shardBytes) >= block.K {
break
}
} else {
Log.Warn.Printf("Failed to retrieve shard %d from peer %s (hash check failed, len=%d)", shard.Id, shard.Peer.id.String(), len(currBytes))
continue
}
}
if len(shardBytes) < block.K {
Log.Warn.Printf("Failed to retrieve block %d: only got %d shards", block.Id, len(shardBytes))
return nil
}
blockBytes := erasureDecode(shardBytes, block.K, block.N, shardChunks)
if !bytes.Equal(hash(blockBytes), block.Hash) {
Log.Error.Printf("Failed to recover block %d: hash check failed even though we retrieved K shards", block.Id)
return nil
}
Log.Debug.Printf("Successfully recovered block %d", block.Id)
return blockBytes
}
func (this *BlockStore) update() {
this.mu.Lock()
defer this.mu.Unlock()
// search for shards that don't have peers
for _, block := range this.blocks {
// first pass: find existing used peers
ignorePeers := make(map[PeerId]bool)
for _, shard := range block.Shards {
if shard.Peer != nil {
ignorePeers[shard.Peer.id] = true
}
}
// second pass: actually find new peers
for _, shard := range block.Shards {
if shard.Peer == nil {
availablePeer := this.peerList.FindAvailablePeer(shard.Length, ignorePeers, shard.Id)
if availablePeer == nil {
// no available peer for this shard, other shards in this block won't have peers either
break
} else {
shard.Peer = availablePeer
ignorePeers[shard.Peer.id] = true
}
}
}
}
// commit shard data to peers
// we only commit once per update iteration to avoid hogging the lock?
for _, block := range this.blocks {
for _, shard := range block.Shards {
if shard.Peer != nil && !shard.Available {
Log.Debug.Printf("Committing shard %d to peer %s", shard.Id, shard.Peer.id.String())
if shard.Peer.storeShard(shard) {
shard.Available = true
shard.Contents = nil
}
break
}
}
}
}
/*
* blockstore metadata can be written and read from the disk using Save/Load functions below.
* The file format is a block on each line, consisting of string:
* [blockid]:[fileid]:[file_offset]:[N]:[K]:[hex(hash)]:[shard1],[shard2],...,[shardn],
* Each shard looks like:
[shardid]/[length]/[peerid]/[available]/[hex(hash)]
*/
func (this *BlockStore) Save() bool {
this.mu.Lock()
defer this.mu.Unlock()
Log.Info.Printf("Saving metadata to blockstore.dat (%d blocks)", len(this.blocks))
fout, err := os.Create("blockstore.dat")
if err != nil {
Log.Warn.Printf("Failed to save metadata to blockstore.dat: %s", err.Error())
return false
}
defer fout.Close()
for _, block := range this.blocks {
blockDump := fmt.Sprintf("%d:%s:%d:%d:%d:%s:", block.Id, block.ParentFile, block.FileOffset, block.N, block.K, hex.EncodeToString(block.Hash))
for _, shard := range block.Shards {
peerString := ""
if shard.Peer != nil {
peerString = shard.Peer.id.String()
}
blockDump += fmt.Sprintf("%d/%d/%s/%d/%s", shard.Id, shard.Length, peerString, boolToInt(shard.Available), hex.EncodeToString(shard.Hash)) + ","
}
blockDump += "\n"
fout.Write([]byte(blockDump))
}
return true
}
func (this *BlockStore) Load() bool {
this.mu.Lock()
defer this.mu.Unlock()
fin, err := os.Open("blockstore.dat")
if err != nil {
Log.Warn.Printf("Failed to read metadata from blockstore.dat: %s", err.Error())
return false
}
defer fin.Close()
scanner := bufio.NewScanner(fin)
for scanner.Scan() {
parts := strings.SplitN(scanner.Text(), ":", 7)
if len(parts) != 7 {
continue
}
block := &Block{}
block.Id = BlockId(strToInt64(parts[0]))
block.ParentFile = FileId(parts[1])
block.FileOffset = strToInt(parts[2])
block.N = strToInt(parts[3])
block.K = strToInt(parts[4])
block.Hash, _ = hex.DecodeString(parts[5])
shardStrings := strings.Split(parts[6], ",")
block.Shards = make([]*BlockShard, len(shardStrings) - 1) // last element of shardStrings is empty
for i, shardString := range shardStrings {
if i < len(block.Shards) {
shardParts := strings.Split(shardString, "/")
if len(shardParts) != 5 {
Log.Warn.Printf("Failed to read metadata from blockstore.dat: invalid shard [%s]", shardString)
return false
}
shard := &BlockShard{}
shard.Id = BlockShardId(strToInt64(shardParts[0]))
shard.Length = strToInt(shardParts[1])
shard.Available = false
if shardParts[2] != "" {
shard.Peer = this.peerList.DiscoveredPeer(strToPeerId(shardParts[2]))
// check if configuration indicates this shard has been replicated on the peer
// if so, we want to notify the peer object so that we correctly account for our space usage
// if there's a problem, then we may actually have to replicate on a new peer...
// (e.g. we may have used the space for something else, and have strange accounting now?)
cfgAvailable := strToInt(shardParts[3]) == 1
if shard.Peer.reserveBytes(shard.Length, shard.Id) {
shard.Available = cfgAvailable
} else {
// failed to reserve, something bad happened in our accounting?
// we should replicate it elsewhere
shard.Peer = nil
}
}
shard.Hash, _ = hex.DecodeString(shardParts[4])
shard.Parent = block
shard.ShardIndex = i
block.Shards[i] = shard
}
}
this.blocks[block.Id] = block
}
Log.Info.Printf("Loaded %d blocks", len(this.blocks))
return true
}
|
/*Package seq balabala
This package defines a *Seq* type, and provides some basic operations of sequence,
like validation of DNA/RNA/Protein sequence and getting reverse complement sequence.
This package was inspired by
[biogo](https://code.google.com/p/biogo/source/browse/#git%2Falphabet).
IUPAC nucleotide code: ACGTURYSWKMBDHVN
http://droog.gs.washington.edu/parc/images/iupac.html
code base Complement
A A T
C C G
G G C
T/U T A
R A/G Y
Y C/T R
S C/G S
W A/T W
K G/T M
M A/C K
B C/G/T V
D A/G/T H
H A/C/T D
V A/C/G B
X/N A/C/G/T X
. not A/C/G/T
or- gap
IUPAC amino acid code: `ACGTRYSWKMBDHV`
A Ala Alanine
B Asx Aspartic acid or Asparagine [2]
C Cys Cysteine
D Asp Aspartic Acid
E Glu Glutamic Acid
F Phe Phenylalanine
G Gly Glycine
H His Histidine
I Ile Isoleucine
J Isoleucine or Leucine [4]
K Lys Lysine
L Leu Leucine
M Met Methionine
N Asn Asparagine
P Pro Proline
Q Gln Glutamine
R Arg Arginine
S Ser Serine
T Thr Threonine
V Val Valine
W Trp Tryptophan
Y Tyr Tyrosine
Z Glx Glutamine or Glutamic acid [2]
Other links:
1. http://www.bioinformatics.org/sms/iupac.html
2. http://www.dnabaser.com/articles/IUPAC%20ambiguity%20codes.html
3. http://www.bioinformatics.org/sms2/iupac.html
4. http://www.matrixscience.com/blog/non-standard-amino-acid-residues.html
*/
package seq
import (
"errors"
"fmt"
"github.com/shenwei356/util/byteutil"
)
/*Alphabet could be defined. Attention that,
**the letters are case sensitive**.
For exmaple, DNA:
DNA, _ = NewAlphabet(
"DNA",
[]byte("acgtACGT"),
[]byte("tgcaTGCA"),
[]byte(" -"),
[]byte("nN"))
*/
type Alphabet struct {
t string
isUnlimit bool
letters []byte
pairs []byte
gap []byte
ambiguous []byte
pairLetters map[byte]byte
}
// NewAlphabet is Constructor for type *Alphabet*
func NewAlphabet(
t string,
isUnlimit bool,
letters []byte,
pairs []byte,
gap []byte,
ambiguous []byte,
) (*Alphabet, error) {
a := &Alphabet{t, isUnlimit, letters, pairs, gap, ambiguous, nil}
if isUnlimit {
return a, nil
}
if len(letters) != len(pairs) {
return a, errors.New("mismatch of length of letters and pairs")
}
a.pairLetters = make(map[byte]byte, len(letters))
for i := 0; i < len(letters); i++ {
a.pairLetters[letters[i]] = pairs[i]
}
// add gap and ambiguous code
for _, v := range gap {
a.pairLetters[v] = v
}
for _, v := range ambiguous {
a.pairLetters[v] = v
}
return a, nil
}
// Type returns type of the alphabet
func (a *Alphabet) Type() string {
return a.t
}
// Letters returns letters
func (a *Alphabet) Letters() []byte {
return a.letters
}
// Gaps returns gaps
func (a *Alphabet) Gaps() []byte {
return a.gap
}
// String returns type of the alphabet
func (a *Alphabet) String() string {
return a.t
}
// IsValidLetter is used to validate a letter
func (a *Alphabet) IsValidLetter(b byte) bool {
if a.isUnlimit {
return true
}
_, ok := a.pairLetters[b]
return ok
}
// IsValid is used to validate a byte slice
func (a *Alphabet) IsValid(s []byte) error {
if len(s) == 0 {
return nil
}
if a == nil || a.isUnlimit {
return nil
}
for _, b := range s {
if !a.IsValidLetter(b) {
return fmt.Errorf("invalid %s lebtter: %s", a, []byte{b})
}
}
return nil
}
// PairLetter return the Pair Letter
func (a *Alphabet) PairLetter(b byte) (byte, error) {
if a.isUnlimit {
return b, nil
}
if !a.IsValidLetter(b) {
return b, fmt.Errorf("invalid letter: %c", b)
}
v, _ := a.pairLetters[b]
return v, nil
}
/*Four types of alphabets are pre-defined:
DNA Deoxyribonucleotide code
DNAredundant DNA + Ambiguity Codes
RNA Oxyribonucleotide code
RNAredundant RNA + Ambiguity Codes
Protein Amino Acide single-letter Code
Unlimit Self-defined, including all 26 English letters
*/
var (
DNA *Alphabet
DNAredundant *Alphabet
RNA *Alphabet
RNAredundant *Alphabet
Protein *Alphabet
Unlimit *Alphabet
abProtein map[byte]bool
abDNAredundant map[byte]bool
abDNA map[byte]bool
abRNAredundant map[byte]bool
abRNA map[byte]bool
)
func init() {
DNA, _ = NewAlphabet(
"DNA",
false,
[]byte("acgtACGT."),
[]byte("tgcaTGCA."),
[]byte(" -"),
[]byte("nxNX"))
DNAredundant, _ = NewAlphabet(
"DNAredundant",
false,
[]byte("acgtryswkmbdhvACGTRYSWKMBDHV."),
[]byte("tgcayrswmkvhdbTGCAYRSWMKVHDB."),
[]byte(" -"),
[]byte("nxNX"))
RNA, _ = NewAlphabet(
"RNA",
false,
[]byte("acguACGU"),
[]byte("ugcaUGCA"),
[]byte(" -"),
[]byte("nxNX"))
RNAredundant, _ = NewAlphabet(
"RNAredundant",
false,
[]byte("acguryswkmbdhvACGURYSWKMBDHV."),
[]byte("ugcayrswmkvhdbUGCAYRSWMKVHDB."),
[]byte(" -"),
[]byte("nxNX"))
Protein, _ = NewAlphabet(
"Protein",
false,
[]byte("abcdefghijklmnpqrstvwyzABCDEFGHIJKLMNPQRSTVWYZ*_."),
[]byte("abcdefghijklmnpqrstvwyzABCDEFGHIJKLMNPQRSTVWYZ*_."),
[]byte(" -"),
[]byte("xX"))
Unlimit, _ = NewAlphabet(
"Unlimit",
true,
nil,
nil,
nil,
nil)
abProtein = slice2map(byteutil.Alphabet(append(Protein.letters, Protein.Gaps()...)))
abDNAredundant = slice2map(byteutil.Alphabet(append(DNAredundant.letters, DNAredundant.Gaps()...)))
abDNA = slice2map(byteutil.Alphabet(append(DNA.letters, DNA.Gaps()...)))
abRNAredundant = slice2map(byteutil.Alphabet(append(RNAredundant.letters, RNAredundant.Gaps()...)))
abRNA = slice2map(byteutil.Alphabet(append(RNA.letters, RNA.Gaps()...)))
}
// GuessAlphabet guesses alphabet by given
func GuessAlphabet(seqs []byte) *Alphabet {
alphabetMap := slice2map(byteutil.Alphabet(seqs))
if isSubset(alphabetMap, abDNA) {
return DNA
}
if isSubset(alphabetMap, abRNA) {
return RNA
}
if isSubset(alphabetMap, abDNAredundant) {
return DNAredundant
}
if isSubset(alphabetMap, abRNAredundant) {
return RNAredundant
}
if isSubset(alphabetMap, abProtein) {
return Protein
}
return Unlimit
}
// GuessAlphabetLessConservatively change DNA to DNAredundant and RNA to RNAredundant
func GuessAlphabetLessConservatively(seqs []byte) *Alphabet {
ab := GuessAlphabet(seqs)
if ab == DNA {
return DNAredundant
}
if ab == RNA {
return RNAredundant
}
return ab
}
func isSubset(query, subject map[byte]bool) bool {
for b := range query {
if _, ok := subject[b]; !ok {
return false
}
}
return true
}
func slice2map(s []byte) map[byte]bool {
m := make(map[byte]bool)
for _, b := range s {
m[b] = true
}
return m
}
fix bug of GuessAlphabet
/*Package seq balabala
This package defines a *Seq* type, and provides some basic operations of sequence,
like validation of DNA/RNA/Protein sequence and getting reverse complement sequence.
This package was inspired by
[biogo](https://code.google.com/p/biogo/source/browse/#git%2Falphabet).
IUPAC nucleotide code: ACGTURYSWKMBDHVN
http://droog.gs.washington.edu/parc/images/iupac.html
code base Complement
A A T
C C G
G G C
T/U T A
R A/G Y
Y C/T R
S C/G S
W A/T W
K G/T M
M A/C K
B C/G/T V
D A/G/T H
H A/C/T D
V A/C/G B
X/N A/C/G/T X
. not A/C/G/T
or- gap
IUPAC amino acid code: `ACGTRYSWKMBDHV`
A Ala Alanine
B Asx Aspartic acid or Asparagine [2]
C Cys Cysteine
D Asp Aspartic Acid
E Glu Glutamic Acid
F Phe Phenylalanine
G Gly Glycine
H His Histidine
I Ile Isoleucine
J Isoleucine or Leucine [4]
K Lys Lysine
L Leu Leucine
M Met Methionine
N Asn Asparagine
P Pro Proline
Q Gln Glutamine
R Arg Arginine
S Ser Serine
T Thr Threonine
V Val Valine
W Trp Tryptophan
Y Tyr Tyrosine
Z Glx Glutamine or Glutamic acid [2]
Other links:
1. http://www.bioinformatics.org/sms/iupac.html
2. http://www.dnabaser.com/articles/IUPAC%20ambiguity%20codes.html
3. http://www.bioinformatics.org/sms2/iupac.html
4. http://www.matrixscience.com/blog/non-standard-amino-acid-residues.html
*/
package seq
import (
"errors"
"fmt"
"github.com/shenwei356/util/byteutil"
)
/*Alphabet could be defined. Attention that,
**the letters are case sensitive**.
For exmaple, DNA:
DNA, _ = NewAlphabet(
"DNA",
[]byte("acgtACGT"),
[]byte("tgcaTGCA"),
[]byte(" -"),
[]byte("nN"))
*/
type Alphabet struct {
t string
isUnlimit bool
letters []byte
pairs []byte
gap []byte
ambiguous []byte
pairLetters map[byte]byte
}
// NewAlphabet is Constructor for type *Alphabet*
func NewAlphabet(
t string,
isUnlimit bool,
letters []byte,
pairs []byte,
gap []byte,
ambiguous []byte,
) (*Alphabet, error) {
a := &Alphabet{t, isUnlimit, letters, pairs, gap, ambiguous, nil}
if isUnlimit {
return a, nil
}
if len(letters) != len(pairs) {
return a, errors.New("mismatch of length of letters and pairs")
}
a.pairLetters = make(map[byte]byte, len(letters))
for i := 0; i < len(letters); i++ {
a.pairLetters[letters[i]] = pairs[i]
}
// add gap and ambiguous code
for _, v := range gap {
a.pairLetters[v] = v
}
for _, v := range ambiguous {
a.pairLetters[v] = v
}
return a, nil
}
// Type returns type of the alphabet
func (a *Alphabet) Type() string {
return a.t
}
// Letters returns letters
func (a *Alphabet) Letters() []byte {
return a.letters
}
// Gaps returns gaps
func (a *Alphabet) Gaps() []byte {
return a.gap
}
// AmbiguousLetters returns AmbiguousLetters
func (a *Alphabet) AmbiguousLetters() []byte {
return a.ambiguous
}
// AllLetters return all letters
func (a *Alphabet) AllLetters() []byte {
letter := append(a.letters, a.Gaps()...)
for _, l := range a.ambiguous {
letter = append(letter, l)
}
return letter
}
// String returns type of the alphabet
func (a *Alphabet) String() string {
return a.t
}
// IsValidLetter is used to validate a letter
func (a *Alphabet) IsValidLetter(b byte) bool {
if a.isUnlimit {
return true
}
_, ok := a.pairLetters[b]
return ok
}
// IsValid is used to validate a byte slice
func (a *Alphabet) IsValid(s []byte) error {
if len(s) == 0 {
return nil
}
if a == nil || a.isUnlimit {
return nil
}
for _, b := range s {
if !a.IsValidLetter(b) {
return fmt.Errorf("invalid %s lebtter: %s", a, []byte{b})
}
}
return nil
}
// PairLetter return the Pair Letter
func (a *Alphabet) PairLetter(b byte) (byte, error) {
if a.isUnlimit {
return b, nil
}
if !a.IsValidLetter(b) {
return b, fmt.Errorf("invalid letter: %c", b)
}
v, _ := a.pairLetters[b]
return v, nil
}
/*Four types of alphabets are pre-defined:
DNA Deoxyribonucleotide code
DNAredundant DNA + Ambiguity Codes
RNA Oxyribonucleotide code
RNAredundant RNA + Ambiguity Codes
Protein Amino Acide single-letter Code
Unlimit Self-defined, including all 26 English letters
*/
var (
DNA *Alphabet
DNAredundant *Alphabet
RNA *Alphabet
RNAredundant *Alphabet
Protein *Alphabet
Unlimit *Alphabet
abProtein map[byte]bool
abDNAredundant map[byte]bool
abDNA map[byte]bool
abRNAredundant map[byte]bool
abRNA map[byte]bool
)
func init() {
DNA, _ = NewAlphabet(
"DNA",
false,
[]byte("acgtACGT."),
[]byte("tgcaTGCA."),
[]byte(" -"),
[]byte("nxNX"))
DNAredundant, _ = NewAlphabet(
"DNAredundant",
false,
[]byte("acgtryswkmbdhvACGTRYSWKMBDHV."),
[]byte("tgcayrswmkvhdbTGCAYRSWMKVHDB."),
[]byte(" -"),
[]byte("nxNX"))
RNA, _ = NewAlphabet(
"RNA",
false,
[]byte("acguACGU"),
[]byte("ugcaUGCA"),
[]byte(" -"),
[]byte("nxNX"))
RNAredundant, _ = NewAlphabet(
"RNAredundant",
false,
[]byte("acguryswkmbdhvACGURYSWKMBDHV."),
[]byte("ugcayrswmkvhdbUGCAYRSWMKVHDB."),
[]byte(" -"),
[]byte("nxNX"))
Protein, _ = NewAlphabet(
"Protein",
false,
[]byte("abcdefghijklmnpqrstvwyzABCDEFGHIJKLMNPQRSTVWYZ*_."),
[]byte("abcdefghijklmnpqrstvwyzABCDEFGHIJKLMNPQRSTVWYZ*_."),
[]byte(" -"),
[]byte("xX"))
Unlimit, _ = NewAlphabet(
"Unlimit",
true,
nil,
nil,
nil,
nil)
abProtein = slice2map(byteutil.Alphabet(Protein.AllLetters()))
abDNAredundant = slice2map(byteutil.Alphabet(DNAredundant.AllLetters()))
abDNA = slice2map(byteutil.Alphabet(DNA.AllLetters()))
abRNAredundant = slice2map(byteutil.Alphabet(RNAredundant.AllLetters()))
abRNA = slice2map(byteutil.Alphabet(RNA.AllLetters()))
}
// GuessAlphabet guesses alphabet by given
func GuessAlphabet(seqs []byte) *Alphabet {
alphabetMap := slice2map(byteutil.Alphabet(seqs))
if isSubset(alphabetMap, abDNA) {
return DNA
}
if isSubset(alphabetMap, abRNA) {
return RNA
}
if isSubset(alphabetMap, abDNAredundant) {
return DNAredundant
}
if isSubset(alphabetMap, abRNAredundant) {
return RNAredundant
}
if isSubset(alphabetMap, abProtein) {
return Protein
}
return Unlimit
}
// GuessAlphabetLessConservatively change DNA to DNAredundant and RNA to RNAredundant
func GuessAlphabetLessConservatively(seqs []byte) *Alphabet {
ab := GuessAlphabet(seqs)
if ab == DNA {
return DNAredundant
}
if ab == RNA {
return RNAredundant
}
return ab
}
func isSubset(query, subject map[byte]bool) bool {
for b := range query {
if _, ok := subject[b]; !ok {
return false
}
}
return true
}
func slice2map(s []byte) map[byte]bool {
m := make(map[byte]bool)
for _, b := range s {
m[b] = true
}
return m
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
func RunIntcode(input []int) []int {
output := make([]int, len(input))
for i, val := range input {
output[i] = val
}
for i := 0; i < len(input); i += 4 {
switch output[i] {
case 1:
// add
output[output[i+3]] = output[output[i+1]] + output[output[i+2]]
case 2:
// multiply
output[output[i+3]] = output[output[i+1]] * output[output[i+2]]
case 99:
// halt
break
}
}
return output
}
func main() {
file, err := os.Open("input")
if err != nil {
log.Fatal(err)
}
defer file.Close()
var input []int
scanner := bufio.NewScanner(file)
for scanner.Scan() {
intsStr := strings.Split(scanner.Text(), ",")
for _, intStr := range intsStr {
i, err := strconv.Atoi(intStr)
if err != nil {
log.Fatal(err)
}
input = append(input, i)
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
// modify input
input[1] = 12
input[2] = 2
output := RunIntcode(input)
fmt.Printf("Part 1: Position 0: %v\n", output[0])
}
2019/02p2: Find noun and verb
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
func RunIntcode(input []int) []int {
output := make([]int, len(input))
for i, val := range input {
output[i] = val
}
for i := 0; i < len(input); i += 4 {
switch output[i] {
case 1:
// add
output[output[i+3]] = output[output[i+1]] + output[output[i+2]]
case 2:
// multiply
output[output[i+3]] = output[output[i+1]] * output[output[i+2]]
case 99:
// halt
break
}
}
return output
}
func main() {
file, err := os.Open("input")
if err != nil {
log.Fatal(err)
}
defer file.Close()
var input []int
scanner := bufio.NewScanner(file)
for scanner.Scan() {
intsStr := strings.Split(scanner.Text(), ",")
for _, intStr := range intsStr {
i, err := strconv.Atoi(intStr)
if err != nil {
log.Fatal(err)
}
input = append(input, i)
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
// modify input
input[1] = 12
input[2] = 2
output := RunIntcode(input)
fmt.Printf("Part 1: Position 0: %v\n", output[0])
for noun := 0; noun < 99; noun++ {
for verb := 0; verb < 99; verb++ {
// modify input
input[1] = noun
input[2] = verb
output = RunIntcode(input)
if output[0] == 19690720 {
fmt.Printf("Part 2: %v\n", 100*noun+verb)
}
}
}
}
|
package gas
import (
"io"
"io/ioutil"
"sync"
)
var (
fs *FS
lock sync.RWMutex
)
// Initialize the code using the default (UnitedFS)
func init() {
fs = UnitedFS()
}
// Refresh the internal FS to reflect possible changes in the UnitedFS
func Refresh() {
lock.Lock()
defer lock.Unlock()
fs = UnitedFS()
}
// Open the file for reading or returns an error
//
// For more information, see the FS type
func Open(file string) (io.ReadCloser, error) {
lock.RLock()
defer lock.RUnlock()
return fs.Open(file)
}
// Return the absolute filepath for the requested resource or return an error if not found
func Abs(file string) (string, error) {
lock.RLock()
defer lock.RUnlock()
return fs.Abs(file, true)
}
// ReadFile return the contents of the file at the given gopath
func ReadFile(file string) ([]byte, error) {
rc, err := Open(file)
if err != nil {
return nil, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
Included MustAbs allowing global variable initialization
package gas
import (
"io"
"io/ioutil"
"sync"
)
var (
fs *FS
lock sync.RWMutex
)
// Initialize the code using the default (UnitedFS)
func init() {
fs = UnitedFS()
}
// Refresh the internal FS to reflect possible changes in the UnitedFS
func Refresh() {
lock.Lock()
defer lock.Unlock()
fs = UnitedFS()
}
// Open the file for reading or returns an error
//
// For more information, see the FS type
func Open(file string) (io.ReadCloser, error) {
lock.RLock()
defer lock.RUnlock()
return fs.Open(file)
}
// Return the absolute filepath for the requested resource or return an error if not found
func Abs(file string) (string, error) {
lock.RLock()
defer lock.RUnlock()
return fs.Abs(file, true)
}
// MustAbs ensure that the given file is present in the system, if the file can't
// be found, this will call panic giving the reason
func MustAbs(file string) string {
ret, err := Abs(file)
if err != nil {
panic(ret)
}
return ret
}
// ReadFile return the contents of the file at the given gopath
func ReadFile(file string) ([]byte, error) {
rc, err := Open(file)
if err != nil {
return nil, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
|
package main
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
type mpath struct {
name string
path string
}
type vdir string
var mpathre = regexp.MustCompile(`^\.(.*)###(.*)$`)
var vdirre = regexp.MustCompile(`^[a-f0-9]{64}$`)
func newmpath(mp string) *mpath {
res := mpathre.FindAllStringSubmatch(mp, -1)
var mres *mpath
if res != nil && len(res) == 1 && len(res[0]) == 3 {
name := res[0][1]
path := res[0][2]
path = strings.Replace(path, ",#,", "/", -1)
mres = &mpath{name, path}
}
return mres
}
func (mp *mpath) String() string {
return "." + mp.name + "###" + strings.Replace(mp.path, "/", ",#,", -1)
}
func newvdir(vd string) vdir {
res := vdirre.FindString(vd)
vres := vdir("")
if res != "" && res == vd {
vres = vdir(vd)
}
return vres
}
type marker struct {
mp *mpath
dir vdir
}
type markers []*marker
var allmarkers = markers{}
func (marks markers) getMarker(name string, path string, dir vdir) *marker {
var mark *marker
for _, marker := range marks {
if marker.mp.name == name && marker.mp.path == path {
mark = marker
break
}
}
if mark == nil {
mark = &marker{mp: &mpath{name: name, path: path}, dir: dir}
}
if mark.dir != dir {
// TODO: move dir and ln -s mark.dir dir
fmt.Printf("Mark container named '%s' for path '%s' as link to '%s' (from '%s')\n", name, path, mark.dir, dir)
}
return mark
}
type volume struct {
path string
dir vdir
mark *marker
}
type volumes []*volume
var allvolumes = volumes{}
func (v *volume) String() string {
return "vol '" + string(v.dir) + "'"
}
func (vols volumes) getVolume(vd vdir, path string, name string) *volume {
var vol *volume
for _, volume := range vols {
if string(volume.dir) == string(vd) {
vol = volume
if vol.path == "" {
vol.path = path
}
if vol.path != path {
fmt.Printf("Invalid volume path detected: '%s' (vs. container volume path '%s')\n", vol.path, path)
}
// TODO check marker
if vol.mark == nil {
vol.mark = allmarkers.getMarker(name, vol.path, vol.dir)
}
break
}
}
return vol
}
type container struct {
name string
id string
stopped bool
volumes []*volume
}
func (c *container) trunc() string {
if len(c.id) > 0 {
return c.id[:7]
}
return ""
}
func (c *container) String() string {
return "cnt '" + c.name + "' (" + c.trunc() + ")" + fmt.Sprintf("[%v] - %d vol", c.stopped, len(c.volumes))
}
var containers = []*container{}
func mustcmd(acmd string) string {
out, err := cmd(acmd)
if err != nil {
log.Fatal(fmt.Sprintf("out='%s', err='%s'", out, err))
}
return string(out)
}
func cmd(cmd string) (string, error) {
fmt.Println(cmd)
out, err := exec.Command("sh", "-c", cmd).Output()
return strings.TrimSpace(string(out)), err
}
func readVolumes() {
out := mustcmd("sudo ls -a1F /mnt/sda1/var/lib/docker/vfs/dir")
vollines := strings.Split(out, "\n")
// fmt.Println(vollines)
for _, volline := range vollines {
dir := volline
if dir == "./" || dir == "../" {
continue
}
if strings.HasSuffix(dir, "@") {
dir = dir[:len(dir)-1]
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
mp := newmpath(dir)
if mp == nil {
fmt.Printf("Invalid marker detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
dirlink, err := cmd("sudo readlink " + fdir)
fmt.Printf("---\ndir: '%s'\ndlk: '%s'\nerr='%v'\n", dir, dirlink, err)
if err != nil {
fmt.Printf("Invalid marker (no readlink) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
_, err := cmd("sudo ls /mnt/sda1/var/lib/docker/vfs/dir/" + dirlink)
if err != nil {
fmt.Printf("Invalid marker (readlink no ls) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
vd := newvdir(dirlink)
if vd == "" {
fmt.Printf("Invalid marker (readlink no vdir) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
allmarkers = append(allmarkers, &marker{mp, vd})
}
}
}
}
} else if strings.HasSuffix(dir, "/") {
dir = dir[:len(dir)-1]
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
vd := newvdir(dir)
if vd == "" {
fmt.Printf("Invalid volume folder detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
allvolumes = append(allvolumes, &volume{dir: vd})
}
} else {
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
fmt.Printf("Invalid file detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
}
}
fmt.Printf("volumes: %v\nmarkers: %v\n", allvolumes, allmarkers)
}
func readContainer() {
out := mustcmd("docker ps -aq --no-trunc")
contlines := strings.Split(out, "\n")
// fmt.Println(contlines)
for _, contline := range contlines {
id := contline
res := mustcmd("docker inspect -f '{{ .Name }},{{ range $key, $value := .Volumes }}{{ $key }},{{ $value }}##~#{{ end }}' " + id)
// fmt.Println("res1: '" + res + "'")
name := res[1:strings.Index(res, ",")]
cont := &container{name: name, id: id}
res = res[strings.Index(res, ",")+1:]
// fmt.Println("res2: '" + res + "'")
vols := strings.Split(res, "##~#")
// fmt.Println(vols)
for _, vol := range vols {
elts := strings.Split(vol, ",")
if len(elts) == 2 {
// fmt.Printf("elts: '%v'\n", elts)
path := elts[0]
vfs := elts[1]
if strings.Contains(vfs, "/var/lib/docker/vfs/dir/") {
vd := newvdir(filepath.Base(vfs))
if vd == "" {
fmt.Printf("Invalid volume folder detected: '%s'\n", vfs)
break
}
var newvol *volume
// TODO uses allvolumes.getVolume here
for _, volume := range allvolumes {
if string(volume.dir) == string(vd) {
newvol = volume
if newvol.path == "" {
newvol.path = path
}
if newvol.path != path {
fmt.Printf("Invalid volume path detected: '%s' (vs. container volume path '%s')\n", newvol.path, path)
}
// TODO check marker
if newvol.mark == nil {
} else {
if string(newvol.mark.dir) != string(newvol.dir) {
}
}
break
}
}
if newvol == nil {
// TODO make marker
newvol = &volume{path: path, dir: vd}
}
cont.volumes = append(cont.volumes, newvol)
}
}
}
containers = append(containers, cont)
}
fmt.Printf("containers: %v\n", containers)
}
func (v *volume) accept(m *marker) bool {
// TODO
return false
}
func checkContainers() {
for _, container := range containers {
for _, volume := range container.volumes {
if volume.mark == nil {
for _, mark := range allmarkers {
if volume.accept(mark) {
fmt.Printf("Set mark '%v' to volume '%v' of container '%v'\n", mark, volume, container)
volume.mark = mark
// TODO check if ln is needed
}
}
}
if volume.mark == nil {
// TODO check if vfs folder exist.
// If it does, make the marker
}
}
}
}
func (c *container) accept(v *volume) bool {
// TODO
return false
}
func checkVolumes() {
for _, volume := range allvolumes {
orphan := true
for _, container := range containers {
if container.accept(volume) {
orphan = false
break
}
}
if orphan {
fmt.Printf("Orphan detected, volume '%v'\n", volume)
// TODO rm if necessary or at least mv _xxx
}
}
}
// docker run --rm -i -t -v `pwd`:`pwd` -w `pwd` --entrypoint="/bin/bash" go -c 'go build gcl.go'
func main() {
readVolumes()
readContainer()
checkContainers()
checkVolumes()
os.Exit(0)
}
gcl.go: uses allvolumes.getVolume() in readContainer()
package main
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
type mpath struct {
name string
path string
}
type vdir string
var mpathre = regexp.MustCompile(`^\.(.*)###(.*)$`)
var vdirre = regexp.MustCompile(`^[a-f0-9]{64}$`)
func newmpath(mp string) *mpath {
res := mpathre.FindAllStringSubmatch(mp, -1)
var mres *mpath
if res != nil && len(res) == 1 && len(res[0]) == 3 {
name := res[0][1]
path := res[0][2]
path = strings.Replace(path, ",#,", "/", -1)
mres = &mpath{name, path}
}
return mres
}
func (mp *mpath) String() string {
return "." + mp.name + "###" + strings.Replace(mp.path, "/", ",#,", -1)
}
func newvdir(vd string) vdir {
res := vdirre.FindString(vd)
vres := vdir("")
if res != "" && res == vd {
vres = vdir(vd)
}
return vres
}
type marker struct {
mp *mpath
dir vdir
}
type markers []*marker
var allmarkers = markers{}
func (marks markers) getMarker(name string, path string, dir vdir) *marker {
var mark *marker
for _, marker := range marks {
if marker.mp.name == name && marker.mp.path == path {
mark = marker
break
}
}
if mark == nil {
mark = &marker{mp: &mpath{name: name, path: path}, dir: dir}
}
if mark.dir != dir {
// TODO: move dir and ln -s mark.dir dir
fmt.Printf("Mark container named '%s' for path '%s' as link to '%s' (from '%s')\n", name, path, mark.dir, dir)
}
// TODO: check that link exists
return mark
}
type volume struct {
path string
dir vdir
mark *marker
}
type volumes []*volume
var allvolumes = volumes{}
func (v *volume) String() string {
return "vol '" + string(v.dir) + "'"
}
func (vols volumes) getVolume(vd vdir, path string, name string) *volume {
var vol *volume
for _, volume := range vols {
if string(volume.dir) == string(vd) {
vol = volume
if vol.path == "" {
vol.path = path
}
if vol.path != path {
fmt.Printf("Invalid volume path detected: '%s' (vs. container volume path '%s')\n", vol.path, path)
}
if vol.mark == nil {
vol.mark = allmarkers.getMarker(name, vol.path, vol.dir)
}
break
}
}
if vol == nil {
// TODO make marker
vol = &volume{path: path, dir: vd}
}
return vol
}
type container struct {
name string
id string
stopped bool
volumes []*volume
}
func (c *container) trunc() string {
if len(c.id) > 0 {
return c.id[:7]
}
return ""
}
func (c *container) String() string {
return "cnt '" + c.name + "' (" + c.trunc() + ")" + fmt.Sprintf("[%v] - %d vol", c.stopped, len(c.volumes))
}
var containers = []*container{}
func mustcmd(acmd string) string {
out, err := cmd(acmd)
if err != nil {
log.Fatal(fmt.Sprintf("out='%s', err='%s'", out, err))
}
return string(out)
}
func cmd(cmd string) (string, error) {
fmt.Println(cmd)
out, err := exec.Command("sh", "-c", cmd).Output()
return strings.TrimSpace(string(out)), err
}
func readVolumes() {
out := mustcmd("sudo ls -a1F /mnt/sda1/var/lib/docker/vfs/dir")
vollines := strings.Split(out, "\n")
// fmt.Println(vollines)
for _, volline := range vollines {
dir := volline
if dir == "./" || dir == "../" {
continue
}
if strings.HasSuffix(dir, "@") {
dir = dir[:len(dir)-1]
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
mp := newmpath(dir)
if mp == nil {
fmt.Printf("Invalid marker detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
dirlink, err := cmd("sudo readlink " + fdir)
fmt.Printf("---\ndir: '%s'\ndlk: '%s'\nerr='%v'\n", dir, dirlink, err)
if err != nil {
fmt.Printf("Invalid marker (no readlink) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
_, err := cmd("sudo ls /mnt/sda1/var/lib/docker/vfs/dir/" + dirlink)
if err != nil {
fmt.Printf("Invalid marker (readlink no ls) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
vd := newvdir(dirlink)
if vd == "" {
fmt.Printf("Invalid marker (readlink no vdir) detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
allmarkers = append(allmarkers, &marker{mp, vd})
}
}
}
}
} else if strings.HasSuffix(dir, "/") {
dir = dir[:len(dir)-1]
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
vd := newvdir(dir)
if vd == "" {
fmt.Printf("Invalid volume folder detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
} else {
allvolumes = append(allvolumes, &volume{dir: vd})
}
} else {
fdir := fmt.Sprintf("/mnt/sda1/var/lib/docker/vfs/dir/%s", dir)
fmt.Printf("Invalid file detected: '%s'\n", dir)
mustcmd("sudo rm " + fdir)
}
}
fmt.Printf("volumes: %v\nmarkers: %v\n", allvolumes, allmarkers)
}
func readContainer() {
out := mustcmd("docker ps -aq --no-trunc")
contlines := strings.Split(out, "\n")
// fmt.Println(contlines)
for _, contline := range contlines {
id := contline
res := mustcmd("docker inspect -f '{{ .Name }},{{ range $key, $value := .Volumes }}{{ $key }},{{ $value }}##~#{{ end }}' " + id)
// fmt.Println("res1: '" + res + "'")
name := res[1:strings.Index(res, ",")]
cont := &container{name: name, id: id}
res = res[strings.Index(res, ",")+1:]
// fmt.Println("res2: '" + res + "'")
vols := strings.Split(res, "##~#")
// fmt.Println(vols)
for _, vol := range vols {
elts := strings.Split(vol, ",")
if len(elts) == 2 {
// fmt.Printf("elts: '%v'\n", elts)
path := elts[0]
vfs := elts[1]
if strings.Contains(vfs, "/var/lib/docker/vfs/dir/") {
vd := newvdir(filepath.Base(vfs))
if vd == "" {
fmt.Printf("Invalid volume folder detected: '%s'\n", vfs)
break
}
newvol := allvolumes.getVolume(vd, path, name)
cont.volumes = append(cont.volumes, newvol)
}
}
}
containers = append(containers, cont)
}
fmt.Printf("containers: %v\n", containers)
}
func (v *volume) accept(m *marker) bool {
// TODO
return false
}
func checkContainers() {
for _, container := range containers {
for _, volume := range container.volumes {
if volume.mark == nil {
for _, mark := range allmarkers {
if volume.accept(mark) {
fmt.Printf("Set mark '%v' to volume '%v' of container '%v'\n", mark, volume, container)
volume.mark = mark
// TODO check if ln is needed
}
}
}
if volume.mark == nil {
// TODO check if vfs folder exist.
// If it does, make the marker
}
}
}
}
func (c *container) accept(v *volume) bool {
// TODO
return false
}
func checkVolumes() {
for _, volume := range allvolumes {
orphan := true
for _, container := range containers {
if container.accept(volume) {
orphan = false
break
}
}
if orphan {
fmt.Printf("Orphan detected, volume '%v'\n", volume)
// TODO rm if necessary or at least mv _xxx
}
}
}
// docker run --rm -i -t -v `pwd`:`pwd` -w `pwd` --entrypoint="/bin/bash" go -c 'go build gcl.go'
func main() {
readVolumes()
readContainer()
checkContainers()
checkVolumes()
os.Exit(0)
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"fmt"
"net/http"
"github.com/pkg/errors"
"regexp"
)
var (
gcrSHA256Pattern = regexp.MustCompile(`^gcr.io\/.*@sha256:[0-9a-f]{64}$`) // TODO is this redundant given below
gcrSHA256Group = regexp.MustCompile(`^gcr.io\/(.*)@(sha256:[0-9a-f]{64})$`)
)
func isGCRHash(image string) bool { return gcrSHA256Pattern.MatchString(image) }
// resolveGCRHashToTag returns the image with IMAGE:TAG format if it can be
// resolved. If no tags are available, an empty string is returned. If multiple
// tags are available, first one that's not "latest" is returned.
func resolveGCRHashToTag(image string) (string, error) {
groups := gcrSHA256Group.FindStringSubmatch(image)
if len(groups) != 3 {
return "", errors.Errorf("image %s cannot be parsed into repo/sha (got %d groups)", image, len(groups))
}
repo, hash := groups[1], groups[2]
resp, err := http.Get(fmt.Sprintf("https://gcr.io/v2/%s/tags/list", repo))
if err != nil {
return "", errors.Wrapf(err, "failed to query tags from GCR for image %s", image)
}
defer resp.Body.Close()
var v struct {
Manifest map[string]struct {
Tags []string `json:"tag"`
} `json:"manifest"`
}
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
return "", errors.Wrap(err, "failed to read and decode response body")
}
man, ok := v.Manifest[hash]
if !ok {
return "", errors.Wrapf(err, "hash %q not found in response manifest", hash)
}
if len(man.Tags) == 0 {
return "", errors.Errorf("no tags found for gcr image %s", image)
}
// return the first tag that's not "latest"
var tag string
for _, t := range man.Tags {
if t != "latest" {
tag = t
break
}
}
if tag == "" {
tag = man.Tags[0]
}
return fmt.Sprintf("gcr.io/%s:%s", repo, tag), nil
}
remove redundant regexp pattern
Signed-off-by: Ahmet Alp Balkan <c786c8527fd6be7431bb94ca48b6f76cc06787cc@google.com>
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"fmt"
"net/http"
"github.com/pkg/errors"
"regexp"
)
var (
gcrSHA256Pattern = regexp.MustCompile(`^gcr.io\/(.*)@(sha256:[0-9a-f]{64})$`)
)
func isGCRHash(image string) bool { return gcrSHA256Pattern.MatchString(image) }
// resolveGCRHashToTag returns the image with IMAGE:TAG format if it can be
// resolved. If no tags are available, an empty string is returned. If multiple
// tags are available, first one that's not "latest" is returned.
func resolveGCRHashToTag(image string) (string, error) {
groups := gcrSHA256Pattern.FindStringSubmatch(image)
if len(groups) != 3 {
return "", errors.Errorf("image %s cannot be parsed into repo/sha (got %d groups)", image, len(groups))
}
repo, hash := groups[1], groups[2]
resp, err := http.Get(fmt.Sprintf("https://gcr.io/v2/%s/tags/list", repo))
if err != nil {
return "", errors.Wrapf(err, "failed to query tags from GCR for image %s", image)
}
defer resp.Body.Close()
var v struct {
Manifest map[string]struct {
Tags []string `json:"tag"`
} `json:"manifest"`
}
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
return "", errors.Wrap(err, "failed to read and decode response body")
}
man, ok := v.Manifest[hash]
if !ok {
return "", errors.Wrapf(err, "hash %q not found in response manifest", hash)
}
if len(man.Tags) == 0 {
return "", errors.Errorf("no tags found for gcr image %s", image)
}
// return the first tag that's not "latest"
var tag string
for _, t := range man.Tags {
if t != "latest" {
tag = t
break
}
}
if tag == "" {
tag = man.Tags[0]
}
return fmt.Sprintf("gcr.io/%s:%s", repo, tag), nil
}
|
//
// Copyright (C) 2014 Sebastian 'tokkee' Harl <sh@tokkee.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package server
// Helper functions for handling and plotting graphs.
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"time"
"code.google.com/p/plotinum/plot"
"code.google.com/p/plotinum/plotter"
"code.google.com/p/plotinum/plotutil"
"code.google.com/p/plotinum/vg"
"code.google.com/p/plotinum/vg/vgsvg"
"github.com/sysdb/go/sysdb"
)
var urldate = "20060102150405"
func (s *Server) graph(w http.ResponseWriter, req request) {
if len(req.args) < 2 || 4 < len(req.args) {
s.badrequest(w, fmt.Errorf("Missing host/metric information"))
return
}
end := time.Now()
start := end.Add(-24 * time.Hour)
var err error
if len(req.args) > 2 {
if start, err = time.Parse(urldate, req.args[2]); err != nil {
s.badrequest(w, fmt.Errorf("Invalid start time: %v", err))
return
}
}
if len(req.args) > 3 {
if end, err = time.Parse(urldate, req.args[3]); err != nil {
s.badrequest(w, fmt.Errorf("Invalid start time: %v", err))
return
}
}
if start.Equal(end) || start.After(end) {
s.badrequest(w, fmt.Errorf("START(%v) is greater than or equal to END(%v)", start, end))
return
}
res, err := s.query("TIMESERIES %s.%s START %s END %s", req.args[0], req.args[1], start, end)
if err != nil {
s.internal(w, fmt.Errorf("Failed to retrieve graph data: %v", err))
return
}
ts, ok := res.(sysdb.Timeseries)
if !ok {
s.internal(w, errors.New("TIMESERIES did not return a time-series"))
return
}
p, err := plot.New()
if err != nil {
s.internal(w, fmt.Errorf("Failed to create plot: %v", err))
return
}
p.Add(plotter.NewGrid())
p.X.Tick.Marker = dateTicks
var i int
for name, data := range ts.Data {
pts := make(plotter.XYs, len(data))
for i, p := range data {
pts[i].X = float64(time.Time(p.Timestamp).UnixNano())
pts[i].Y = p.Value
}
l, err := plotter.NewLine(pts)
if err != nil {
s.internal(w, fmt.Errorf("Failed to create line plotter: %v", err))
return
}
l.LineStyle.Color = plotutil.DarkColors[i%len(plotutil.DarkColors)]
p.Add(l)
p.Legend.Add(name, l)
i++
}
c := vgsvg.New(vg.Length(500), vg.Length(200))
p.Draw(plot.MakeDrawArea(c))
var buf bytes.Buffer
if _, err := c.WriteTo(&buf); err != nil {
s.internal(w, fmt.Errorf("Failed to write plot: %v", err))
return
}
w.Header().Set("Content-Type", "image/svg+xml")
w.WriteHeader(http.StatusOK)
io.Copy(w, &buf)
}
func dateTicks(min, max float64) []plot.Tick {
// TODO: this is surely not the best we can do
// but it'll distribute ticks evenly.
ticks := plot.DefaultTicks(min, max)
for i, t := range ticks {
if t.Label == "" {
// Skip minor ticks.
continue
}
ticks[i].Label = time.Unix(0, int64(t.Value)).Format(time.RFC822)
}
return ticks
}
// vim: set tw=78 sw=4 sw=4 noexpandtab :
Migrate to github.com/gonum/plot.
This is the new, official fork of code.google.com/p/plotinum.
//
// Copyright (C) 2014 Sebastian 'tokkee' Harl <sh@tokkee.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package server
// Helper functions for handling and plotting graphs.
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/gonum/plot"
"github.com/gonum/plot/plotter"
"github.com/gonum/plot/plotutil"
"github.com/gonum/plot/vg"
"github.com/sysdb/go/sysdb"
)
var urldate = "20060102150405"
func (s *Server) graph(w http.ResponseWriter, req request) {
if len(req.args) < 2 || 4 < len(req.args) {
s.badrequest(w, fmt.Errorf("Missing host/metric information"))
return
}
end := time.Now()
start := end.Add(-24 * time.Hour)
var err error
if len(req.args) > 2 {
if start, err = time.Parse(urldate, req.args[2]); err != nil {
s.badrequest(w, fmt.Errorf("Invalid start time: %v", err))
return
}
}
if len(req.args) > 3 {
if end, err = time.Parse(urldate, req.args[3]); err != nil {
s.badrequest(w, fmt.Errorf("Invalid start time: %v", err))
return
}
}
if start.Equal(end) || start.After(end) {
s.badrequest(w, fmt.Errorf("START(%v) is greater than or equal to END(%v)", start, end))
return
}
res, err := s.query("TIMESERIES %s.%s START %s END %s", req.args[0], req.args[1], start, end)
if err != nil {
s.internal(w, fmt.Errorf("Failed to retrieve graph data: %v", err))
return
}
ts, ok := res.(sysdb.Timeseries)
if !ok {
s.internal(w, errors.New("TIMESERIES did not return a time-series"))
return
}
p, err := plot.New()
if err != nil {
s.internal(w, fmt.Errorf("Failed to create plot: %v", err))
return
}
p.Add(plotter.NewGrid())
p.X.Tick.Marker = dateTicks{}
var i int
for name, data := range ts.Data {
pts := make(plotter.XYs, len(data))
for i, p := range data {
pts[i].X = float64(time.Time(p.Timestamp).UnixNano())
pts[i].Y = p.Value
}
l, err := plotter.NewLine(pts)
if err != nil {
s.internal(w, fmt.Errorf("Failed to create line plotter: %v", err))
return
}
l.LineStyle.Color = plotutil.DarkColors[i%len(plotutil.DarkColors)]
p.Add(l)
p.Legend.Add(name, l)
i++
}
pw, err := p.WriterTo(vg.Length(500), vg.Length(200), "svg")
if err != nil {
s.internal(w, fmt.Errorf("Failed to write plot: %v", err))
return
}
var buf bytes.Buffer
if _, err := pw.WriteTo(&buf); err != nil {
s.internal(w, fmt.Errorf("Failed to write plot: %v", err))
return
}
w.Header().Set("Content-Type", "image/svg+xml")
w.WriteHeader(http.StatusOK)
io.Copy(w, &buf)
}
type dateTicks struct{}
func (dateTicks) Ticks(min, max float64) []plot.Tick {
// TODO: this is surely not the best we can do
// but it'll distribute ticks evenly.
ticks := plot.DefaultTicks{}.Ticks(min, max)
for i, t := range ticks {
if t.Label == "" {
// Skip minor ticks.
continue
}
ticks[i].Label = time.Unix(0, int64(t.Value)).Format(time.RFC822)
}
return ticks
}
// vim: set tw=78 sw=4 sw=4 noexpandtab :
|
// Package goofclient consists of structs and their methods which can be used to
// connect to the host, send or receive messages through the server
package goofclient
import (
"bufio"
"errors"
"flag"
"fmt"
"log"
"net"
"net/rpc"
"os"
"strconv"
"strings"
)
//Nothing defines a blank variable
type Nothing bool
//Message defines struct for every message
type Message struct {
User string
Target string
Msg string
}
//ChatClient defines struct for each of the ChatClient
type ChatClient struct {
Username string
Address string
Client *rpc.Client
}
// Global variables to store default port and host.
var (
DefaultPort = 3410
DefaultHost = "localhost"
)
// getClientConnection function dials into given host and returns the client variable if success, error otherwise
func (c *ChatClient) getClientConnection() *rpc.Client {
var err error
if c.Client == nil {
c.Client, err = rpc.DialHTTP("tcp", c.Address)
if err != nil {
log.Panicf("Error establishing connection with host: %q", err)
}
}
return c.Client
}
// RegisterGoofs function takes a username and registers it with the server
func (c *ChatClient) RegisterGoofs() {
var reply string
c.Client = c.getClientConnection()
err := c.Client.Call("ChatServer.RegisterGoofs", c.Username, &reply)
if err != nil {
fmt.Printf("Error registering user: %q\n", err)
fmt.Println("Enter new GOOF name:")
fmt.Scanln(&c.Username)
c.RegisterGoofs()
} else {
fmt.Printf("\n %s", reply)
}
}
//ListGoofs function lists all the users in the chat currently
func (c *ChatClient) ListGoofs() {
var reply []string
var none Nothing
c.Client = c.getClientConnection()
err := c.Client.Call("ChatServer.ListGoofs", none, &reply)
if err != nil {
log.Printf("Error listing users: %q\n", err)
}
for i := range reply {
fmt.Println(reply[i])
}
}
//Logout function logouts a goof out
func (c *ChatClient) Logout() {
var reply Nothing
err := c.Client.Call("ChatServer.Logout", c.Username, &reply)
if err != nil {
log.Printf("Error logging out: %q\n", err)
} else {
log.Println("Logged out Succesfully")
os.Exit(0)
}
}
//CreateClientFromFlags function parses the command line arguments
func CreateClientFromFlags() (*ChatClient, error) {
var c = &ChatClient{}
var host string
flag.StringVar(&c.Username, "user", "Goof", "Your username")
flag.StringVar(&host, "host", "localhost", "The host you want to connect to")
flag.Parse()
if c.Username == "Goof" {
fmt.Println("Enter your Goof ID: ")
fmt.Scanln(&c.Username)
}
if !flag.Parsed() {
return c, errors.New("Unable to create user from commandline flags. Please try again")
}
// Check for the structure of the flag to see if we can make any educated guesses for them
if len(host) != 0 {
if strings.HasPrefix(host, ":") { // Begins with a colon means :3410 (just port)
c.Address = DefaultHost + host
} else if strings.Contains(host, ":") { // Contains a colon means host:port
c.Address = host
} else { // Otherwise, it's just a host
c.Address = net.JoinHostPort(host, strconv.Itoa(DefaultPort))
}
} else {
c.Address = net.JoinHostPort(DefaultHost, strconv.Itoa(DefaultPort)) // Default to our default port and host
}
return c, nil
}
// MainLoop function waits for input from stadard input i.e. keyboard and checks it against list of available functions
func MainLoop(c *ChatClient) {
for {
reader := bufio.NewReader(os.Stdin)
line, err := reader.ReadString('\n')
if err != nil {
log.Printf("Error: %q\n", err)
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "listGoofs") {
c.ListGoofs()
} else if strings.HasPrefix(line, "logout") {
c.Logout()
} else if strings.HasPrefix(line, "help") {
fmt.Println("Welcome to GOOFtalk help:")
fmt.Println("List of funcitons, \n1. listGoofs\n4. logout")
} else {
fmt.Println("Invalid function, try 'help' to list all available functions")
}
}
}
Updated with whisper function
// Package goofclient consists of structs and their methods which can be used to
// connect to the host, send or receive messages through the server
package goofclient
import (
"bufio"
"errors"
"flag"
"fmt"
"log"
"net"
"net/rpc"
"os"
"strconv"
"strings"
"time"
)
//Nothing defines a blank variable
type Nothing bool
//Message defines struct for every message
type Message struct {
User string
Target string
Msg string
}
//ChatClient defines struct for each of the ChatClient
type ChatClient struct {
Username string
Address string
Client *rpc.Client
}
// Global variables to store default port and host.
var (
DefaultPort = 3410
DefaultHost = "localhost"
)
// getClientConnection function dials into given host and returns the client variable if success, error otherwise
func (c *ChatClient) getClientConnection() *rpc.Client {
var err error
if c.Client == nil {
c.Client, err = rpc.DialHTTP("tcp", c.Address)
if err != nil {
log.Panicf("Error establishing connection with host: %q", err)
}
}
return c.Client
}
// RegisterGoofs function takes a username and registers it with the server
func (c *ChatClient) RegisterGoofs() {
var reply string
c.Client = c.getClientConnection()
err := c.Client.Call("ChatServer.RegisterGoofs", c.Username, &reply)
if err != nil {
fmt.Printf("Error registering user: %q\n", err)
fmt.Println("Enter new GOOF name:")
fmt.Scanln(&c.Username)
c.RegisterGoofs()
} else {
fmt.Printf("\n %s", reply)
}
}
// CheckMessages does a check every second for new messages for the user
func (c *ChatClient) CheckMessages() {
var reply []string
c.Client = c.getClientConnection()
for {
err := c.Client.Call("ChatServer.CheckMessages", c.Username, &reply)
if err != nil {
log.Fatalln("Chat has been shutdown. Goodbye.")
}
for i := range reply {
log.Println(reply[i])
}
time.Sleep(time.Second)
}
}
//ListGoofs function lists all the users in the chat currently
func (c *ChatClient) ListGoofs() {
var reply []string
var none Nothing
c.Client = c.getClientConnection()
err := c.Client.Call("ChatServer.ListGoofs", none, &reply)
if err != nil {
log.Printf("Error listing users: %q\n", err)
}
for i := range reply {
fmt.Println(reply[i])
}
}
// Whisper function sends a message to a specific user
func (c *ChatClient) Whisper(params []string) {
var reply Nothing
c.Client = c.getClientConnection()
target := strings.Replace(params[0], "@", "", 1)
if len(params) == 2 {
msg := strings.Join(params[1:], " ")
message := Message{
User: c.Username,
Target: target,
Msg: msg,
}
err := c.Client.Call("ChatServer.Whisper", message, &reply)
if err != nil {
log.Printf("Error telling users something: %q", err)
}
} else {
log.Println("Usage of whisper: @<username> <your message>")
}
}
//Logout function logouts a goof out
func (c *ChatClient) Logout() {
var reply Nothing
err := c.Client.Call("ChatServer.Logout", c.Username, &reply)
if err != nil {
log.Printf("Error logging out: %q\n", err)
} else {
log.Println("Logged out Succesfully")
os.Exit(0)
}
}
//CreateClientFromFlags function parses the command line arguments
func CreateClientFromFlags() (*ChatClient, error) {
var c = &ChatClient{}
var host string
flag.StringVar(&c.Username, "user", "Goof", "Your username")
flag.StringVar(&host, "host", "localhost", "The host you want to connect to")
flag.Parse()
if c.Username == "Goof" {
fmt.Println("Enter your Goof ID: ")
fmt.Scanln(&c.Username)
}
if !flag.Parsed() {
return c, errors.New("Unable to create user from commandline flags. Please try again")
}
// Check for the structure of the flag to see if we can make any educated guesses for them
if len(host) != 0 {
if strings.HasPrefix(host, ":") { // Begins with a colon means :3410 (just port)
c.Address = DefaultHost + host
} else if strings.Contains(host, ":") { // Contains a colon means host:port
c.Address = host
} else { // Otherwise, it's just a host
c.Address = net.JoinHostPort(host, strconv.Itoa(DefaultPort))
}
} else {
c.Address = net.JoinHostPort(DefaultHost, strconv.Itoa(DefaultPort)) // Default to our default port and host
}
return c, nil
}
// MainLoop function waits for input from stadard input i.e. keyboard and checks it against list of available functions
func MainLoop(c *ChatClient) {
for {
reader := bufio.NewReader(os.Stdin)
line, err := reader.ReadString('\n')
if err != nil {
log.Printf("Error: %q\n", err)
}
line = strings.TrimSpace(line)
params := strings.Fields(line)
if strings.HasPrefix(line, "list") {
c.ListGoofs()
} else if strings.HasPrefix(line, "@") {
c.Whisper(params)
} else if strings.HasPrefix(line, "logout") {
c.Logout()
} else if strings.HasPrefix(line, "help") {
fmt.Println("Welcome to GOOFtalk help:")
fmt.Println("List of funcitons, \n1. List all online Goofs : list\n2. Whisper: @<username> <message>\n3.Logout: logout")
} else {
fmt.Println("Invalid function, try 'help' to list all available functions")
}
}
}
|
package service
import (
"fmt"
"github.com/ninjasphere/app-presets/model"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/logger"
nmodel "github.com/ninjasphere/go-ninja/model"
"strings"
)
// check that the service has been initialized
func (ps *PresetsService) checkInit() {
if ps.Log == nil {
ps.Log = logger.GetLogger("com.ninja.app-presets")
}
if !ps.initialized {
ps.Log.Fatalf("illegal state: the service is not initialized")
}
}
// make a copy of the channel's state, or nil if there is no such state
func copyState(ch *nmodel.Channel) interface{} {
if ch.LastState != nil {
if state, ok := ch.LastState.(map[string]interface{}); ok {
if payload, ok := state["payload"]; ok {
return payload
}
}
}
return nil
}
// parse a scope parameter and return the normalized form and the components
func (ps *PresetsService) parseScope(scope *string) (string, string, string, error) {
var err error
room := ""
siteID := ""
resultScope := ""
if scope == nil || *scope == "" {
return "", "", "", nil
}
resultScope = *scope
parts := strings.Split(resultScope, ":")
if len(parts) > 2 {
err = fmt.Errorf("illegal argument: scope has too many parts")
} else {
if len(parts) == 0 {
parts = []string{"site"}
}
switch parts[0] {
case "room":
room = parts[1]
case "site":
siteID = config.MustString("siteId")
if len(parts) == 2 && parts[1] != siteID {
err = fmt.Errorf("cannot configure presets for foreign site")
} else {
resultScope = fmt.Sprintf("site:%s", siteID)
}
default:
err = fmt.Errorf("illegal argument: scope has an unrecognized scheme")
}
}
if err != nil {
ps.Log.Errorf("bad scope: %s: %v", scope, err)
}
return resultScope, room, siteID, err
}
// find the indicies of all matching scenes
func (ps *PresetsService) match(spec *model.Query) []int {
found := make([]int, 0, len(ps.Model.Scenes))
if spec.Scope != nil && *spec.Scope == "" {
spec.Scope = nil
}
matchAll := spec.Scope == nil && spec.ID == nil && spec.Slot == nil
for i, m := range ps.Model.Scenes {
if matchAll {
found = append(found, i)
} else {
// look for the index of all matching scenes
if spec.Scope != nil && m.Scope == *spec.Scope {
if spec.Slot != nil {
if m.Slot == *spec.Slot {
found = append(found, i)
continue
}
} else {
found = append(found, i)
continue
}
}
if spec.ID != nil && m.ID == *spec.ID {
found = append(found, i)
continue
}
}
}
return found
}
// make a copy of the specified scenes
func (ps *PresetsService) copyScenes(selection []int) []*model.Scene {
result := make([]*model.Scene, len(selection))
for i, x := range selection {
result[i] = ps.Model.Scenes[x]
}
return result
}
// delete all the matching scenes
func (ps *PresetsService) deleteAll(selection []int) []*model.Scene {
// no two scenes can have the same slot,scope or id.
// delete the duplicates
result := make([]*model.Scene, len(selection))
j := 0
k := 0
for i, e := range ps.Model.Scenes {
if j == len(selection) || i != selection[j] {
if i != k {
ps.Model.Scenes[k] = e
}
k++
} else {
result[j] = e
j++
}
}
ps.Model.Scenes = ps.Model.Scenes[0:k]
return result
}
// create a ThingState object from a thing.
func (ps *PresetsService) createThingState(t *nmodel.Thing) *model.ThingState {
if t.Device == nil || t.Device.Channels == nil {
return nil
}
thingState := model.ThingState{
ID: t.ID,
Channels: make([]model.ChannelState, 0, len(*t.Device.Channels)),
}
Channels:
for _, c := range *t.Device.Channels {
for _, x := range excludedChannels {
// don't include channels with excluded schema
if x == c.Schema {
continue Channels
}
}
if c.SupportedMethods == nil {
// don't include channels with no supported methods
continue
}
found := false
for _, m := range *c.SupportedMethods {
found = (m == "set")
if found {
break
}
}
if !found {
// don't include channels that do not support the set method
continue
}
state := copyState(c)
if state == nil {
return nil
}
channelState := model.ChannelState{
ID: c.ID,
State: state,
}
thingState.Channels = append(thingState.Channels, channelState)
}
if len(thingState.Channels) == 0 {
return nil
}
return &thingState
}
fix: some devices do not appear in presets prototype.
In particular, if a single channel has a nil state, then no channels are available for preset.
Signed-off-by: Jon Seymour <44f878afe53efc66b76772bd845eb65944ed8232@ninjablocks.com>
package service
import (
"fmt"
"github.com/ninjasphere/app-presets/model"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/logger"
nmodel "github.com/ninjasphere/go-ninja/model"
"strings"
)
// check that the service has been initialized
func (ps *PresetsService) checkInit() {
if ps.Log == nil {
ps.Log = logger.GetLogger("com.ninja.app-presets")
}
if !ps.initialized {
ps.Log.Fatalf("illegal state: the service is not initialized")
}
}
// make a copy of the channel's state, or nil if there is no such state
func copyState(ch *nmodel.Channel) interface{} {
if ch.LastState != nil {
if state, ok := ch.LastState.(map[string]interface{}); ok {
if payload, ok := state["payload"]; ok {
return payload
}
}
}
return nil
}
// parse a scope parameter and return the normalized form and the components
func (ps *PresetsService) parseScope(scope *string) (string, string, string, error) {
var err error
room := ""
siteID := ""
resultScope := ""
if scope == nil || *scope == "" {
return "", "", "", nil
}
resultScope = *scope
parts := strings.Split(resultScope, ":")
if len(parts) > 2 {
err = fmt.Errorf("illegal argument: scope has too many parts")
} else {
if len(parts) == 0 {
parts = []string{"site"}
}
switch parts[0] {
case "room":
room = parts[1]
case "site":
siteID = config.MustString("siteId")
if len(parts) == 2 && parts[1] != siteID {
err = fmt.Errorf("cannot configure presets for foreign site")
} else {
resultScope = fmt.Sprintf("site:%s", siteID)
}
default:
err = fmt.Errorf("illegal argument: scope has an unrecognized scheme")
}
}
if err != nil {
ps.Log.Errorf("bad scope: %s: %v", scope, err)
}
return resultScope, room, siteID, err
}
// find the indicies of all matching scenes
func (ps *PresetsService) match(spec *model.Query) []int {
found := make([]int, 0, len(ps.Model.Scenes))
if spec.Scope != nil && *spec.Scope == "" {
spec.Scope = nil
}
matchAll := spec.Scope == nil && spec.ID == nil && spec.Slot == nil
for i, m := range ps.Model.Scenes {
if matchAll {
found = append(found, i)
} else {
// look for the index of all matching scenes
if spec.Scope != nil && m.Scope == *spec.Scope {
if spec.Slot != nil {
if m.Slot == *spec.Slot {
found = append(found, i)
continue
}
} else {
found = append(found, i)
continue
}
}
if spec.ID != nil && m.ID == *spec.ID {
found = append(found, i)
continue
}
}
}
return found
}
// make a copy of the specified scenes
func (ps *PresetsService) copyScenes(selection []int) []*model.Scene {
result := make([]*model.Scene, len(selection))
for i, x := range selection {
result[i] = ps.Model.Scenes[x]
}
return result
}
// delete all the matching scenes
func (ps *PresetsService) deleteAll(selection []int) []*model.Scene {
// no two scenes can have the same slot,scope or id.
// delete the duplicates
result := make([]*model.Scene, len(selection))
j := 0
k := 0
for i, e := range ps.Model.Scenes {
if j == len(selection) || i != selection[j] {
if i != k {
ps.Model.Scenes[k] = e
}
k++
} else {
result[j] = e
j++
}
}
ps.Model.Scenes = ps.Model.Scenes[0:k]
return result
}
// create a ThingState object from a thing.
func (ps *PresetsService) createThingState(t *nmodel.Thing) *model.ThingState {
if t.Device == nil || t.Device.Channels == nil {
return nil
}
thingState := model.ThingState{
ID: t.ID,
Channels: make([]model.ChannelState, 0, len(*t.Device.Channels)),
}
Channels:
for _, c := range *t.Device.Channels {
for _, x := range excludedChannels {
// don't include channels with excluded schema
if x == c.Schema {
continue Channels
}
}
if c.SupportedMethods == nil {
// don't include channels with no supported methods
continue
}
found := false
for _, m := range *c.SupportedMethods {
found = (m == "set")
if found {
break
}
}
if !found {
// don't include channels that do not support the set method
continue
}
state := copyState(c)
if state == nil {
continue
}
channelState := model.ChannelState{
ID: c.ID,
State: state,
}
thingState.Channels = append(thingState.Channels, channelState)
}
if len(thingState.Channels) == 0 {
return nil
}
return &thingState
}
|
/*
Prifi-app starts a cothority node in either trustee, relay or client mode.
*/
package main
import (
"fmt"
"os"
"io/ioutil"
"os/user"
"path"
"runtime"
"bytes"
"github.com/BurntSushi/toml"
prifi_protocol "github.com/lbarman/prifi/sda/protocols"
prifi_service "github.com/lbarman/prifi/sda/services"
"gopkg.in/dedis/crypto.v0/abstract"
cryptoconfig "gopkg.in/dedis/crypto.v0/config"
"gopkg.in/dedis/onet.v1"
"gopkg.in/dedis/onet.v1/app"
"gopkg.in/dedis/onet.v1/crypto"
"gopkg.in/dedis/onet.v1/log"
"gopkg.in/dedis/onet.v1/network"
"gopkg.in/urfave/cli.v1"
"net"
"os/exec"
"strconv"
"time"
)
// DefaultName is the name of the binary we produce and is used to create a directory
// folder with this name
const DefaultName = "prifi"
// Default name of configuration file
const DefaultCothorityConfigFile = "identity.toml"
// Default name of group file
const DefaultCothorityGroupConfigFile = "group.toml"
// Default name of prifi's config file
const DefaultPriFiConfigFile = "prifi.toml"
// DefaultPort to listen and connect to. As of this writing, this port is not listed in
// /etc/services
const DefaultPort = 6879
// This app can launch the prifi service in either client, trustee or relay mode
func main() {
app := cli.NewApp()
app.Name = "prifi"
app.Usage = "Starts PriFi in either Trustee, Relay or Client mode."
app.Version = "0.1"
app.Commands = []cli.Command{
{
Name: "gen-id",
Aliases: []string{"gen"},
Usage: "creates a new identity.toml",
Action: createNewIdentityToml,
},
{
Name: "trustee",
Usage: "start in trustee mode",
Aliases: []string{"t"},
Action: startTrustee,
},
{
Name: "relay",
Usage: "start in relay mode",
ArgsUsage: "group [id-name]",
Aliases: []string{"r"},
Action: startRelay,
},
{
Name: "client",
Usage: "start in client mode",
Aliases: []string{"c"},
Action: startClient,
},
{
Name: "sockstest",
Usage: "only starts the socks server and the socks clients without prifi",
Aliases: []string{"socks"},
Action: startSocksTunnelOnly,
},
}
app.Flags = []cli.Flag{
cli.IntFlag{
Name: "debug, d",
Value: 0,
Usage: "debug-level: 1 for terse, 5 for maximal",
},
cli.StringFlag{
Name: "cothority_config, cc",
Value: getDefaultFilePathForName(DefaultCothorityConfigFile),
Usage: "configuration-file",
},
cli.StringFlag{
Name: "prifi_config, pc",
Value: getDefaultFilePathForName(DefaultPriFiConfigFile),
Usage: "configuration-file",
},
cli.IntFlag{
Name: "port, p",
Value: 12345,
Usage: "port for the socks server (this is the port that you need to set in your browser)",
},
cli.IntFlag{
Name: "port_client",
Value: 8081,
Usage: "port for the socks client (that will connect to a remote socks server)",
},
cli.StringFlag{
Name: "group, g",
Value: getDefaultFilePathForName(DefaultCothorityGroupConfigFile),
Usage: "Group file",
},
cli.StringFlag{
Name: "default_path",
Value: ".",
Usage: "The default creation path for identity.toml when doing gen-id",
},
cli.BoolFlag{
Name: "nowait",
Usage: "Return immediately",
},
}
app.Before = func(c *cli.Context) error {
log.SetDebugVisible(c.Int("debug"))
return nil
}
app.Run(os.Args)
}
/**
* Every "app" require reading config files and starting cothority beforehand
*/
func readConfigAndStartCothority(c *cli.Context) (*onet.Server, *app.Group, *prifi_service.ServiceState) {
//parse PriFi parameters
prifiTomlConfig, err := readPriFiConfigFile(c)
//override log level and color
if prifiTomlConfig.OverrideLogLevel > 0 {
log.Lvl3("Overriding log level (from .toml) to", prifiTomlConfig.OverrideLogLevel)
log.SetDebugVisible(prifiTomlConfig.OverrideLogLevel)
}
if prifiTomlConfig.ForceConsoleColor {
log.Lvl3("Forcing the console output to be colored (from .toml)")
log.SetUseColors(true)
}
if err != nil {
log.Error("Could not read prifi config:", err)
os.Exit(1)
}
//start cothority server
host, err := startCothorityNode(c)
if err != nil {
log.Error("Could not start Cothority server:", err)
os.Exit(1)
}
//finds the PriFi service
service := host.GetService(prifi_service.ServiceName).(*prifi_service.ServiceState)
//set the config from the .toml file
service.SetConfigFromToml(prifiTomlConfig)
//reads the group description
group := readCothorityGroupConfig(c)
if err != nil {
log.Error("Could not read the group description:", err)
os.Exit(1)
}
prifiTomlConfig.ProtocolVersion = getGitCommitID()
return host, group, service
}
// This folder's git commit ID is used as a Protocol Version field to avoid mismatched version between nodes
func getGitCommitID() string {
var (
cmdOut []byte
err error
)
cmdName := "git"
cmdArgs := []string{"rev-parse", "HEAD"}
//sends the command to the shell and retrieves the commitID for HEAD
if cmdOut, err = exec.Command(cmdName, cmdArgs...).Output(); err != nil {
log.Error("There was an error running git rev-parse command: ", err)
os.Exit(1)
}
return string(cmdOut)
}
// trustee start the cothority in trustee-mode using the already stored configuration.
func startTrustee(c *cli.Context) error {
log.Info("Starting trustee")
host, group, service := readConfigAndStartCothority(c)
if err := service.StartTrustee(group); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// relay starts the cothority in relay-mode using the already stored configuration.
func startRelay(c *cli.Context) error {
log.Info("Starting relay")
host, group, service := readConfigAndStartCothority(c)
service.AutoStart = true
if err := service.StartRelay(group); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// client starts the cothority in client-mode using the already stored configuration.
func startClient(c *cli.Context) error {
log.Info("Starting client")
host, group, service := readConfigAndStartCothority(c)
if err := service.StartClient(group, time.Duration(0)); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// this is used to test the socks server and clients integrated to PriFi, without using DC-nets.
func startSocksTunnelOnly(c *cli.Context) error {
log.Info("Starting socks tunnel (bypassing PriFi)")
host, _, service := readConfigAndStartCothority(c)
if err := service.StartSocksTunnelOnly(); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Start()
return nil
}
/**
* COTHORITY
*/
// Returns true if file exists and user confirms overwriting, or if file doesn't exist.
// Returns false if file exists and user doesn't confirm overwriting.
func checkOverwrite(file string) bool {
// check if the file exists and ask for override
if _, err := os.Stat(file); err == nil {
return app.InputYN(true, "Configuration file "+file+" already exists. Override?")
}
return true
}
func createNewIdentityToml(c *cli.Context) error {
log.Print("Generating public/private keys...")
privStr, pubStr := createKeyPair()
addrPort := app.Inputf(":"+strconv.Itoa(DefaultPort)+"", "Which port do you want PriFi to use locally ?")
//parse IP + Port
var hostStr string
var portStr string
host, port, err := net.SplitHostPort(addrPort)
log.ErrFatal(err, "Couldn't interpret", addrPort)
if addrPort == "" {
portStr = strconv.Itoa(DefaultPort)
hostStr = "127.0.0.1"
} else if host == "" {
hostStr = "127.0.0.1"
portStr = port
} else {
hostStr = host
portStr = port
}
serverBinding := network.NewTCPAddress(hostStr + ":" + portStr)
identity := &app.CothorityConfig{
Public: pubStr,
Private: privStr,
Address: serverBinding,
}
var configDone bool
var folderPath string
var identityFilePath string
for !configDone {
// get name of config file and write to config file
defaultPath := "."
if c.GlobalIsSet("default_path") {
defaultPath = c.GlobalString("default_path")
}
folderPath = app.Inputf(defaultPath, "Please enter the path for the new identity.toml file:")
identityFilePath = path.Join(folderPath, DefaultCothorityConfigFile)
// check if the directory exists
if _, err := os.Stat(folderPath); os.IsNotExist(err) {
log.Info("Creating inexistant directories for ", folderPath)
if err = os.MkdirAll(folderPath, 0744); err != nil {
log.Fatalf("Could not create directory %s %v", folderPath, err)
}
}
if checkOverwrite(identityFilePath) {
break
}
}
if err := identity.Save(identityFilePath); err != nil {
log.Fatal("Unable to write the config to file:", err)
}
//now since cothority is smart enough to write only the decimal format of the key, AND require the base64 format for group.toml, let's add it as a comment
public, err := crypto.StringHexToPub(network.Suite, pubStr)
if err != nil {
log.Fatal("Impossible to parse public key:", err)
}
var buff bytes.Buffer
if err := crypto.Write64Pub(network.Suite, &buff, public); err != nil {
log.Error("Can't convert public key to base 64")
return nil
}
f, err := os.OpenFile(identityFilePath, os.O_RDWR|os.O_APPEND, 0660)
if err != nil {
log.Fatal("Unable to write the config to file (2):", err)
}
publicKeyBase64String := string(buff.Bytes())
f.WriteString("# Public (base64) = " + publicKeyBase64String + "\n")
f.Close()
log.Info("Identity file saved.")
return nil
}
// Starts the cothority node to enable communication with the prifi-service.
func startCothorityNode(c *cli.Context) (*onet.Server, error) {
// first check the options
cfile := c.GlobalString("cothority_config")
if _, err := os.Stat(cfile); os.IsNotExist(err) {
log.Error("Could not open file \"", cfile, "\" (specified by flag cothority_config)")
return nil, err
}
// Let's read the config
_, host, err := app.ParseCothority(cfile)
if err != nil {
log.Error("Could not parse file", cfile)
return nil, err
}
return host, nil
}
/**
* CONFIG
*/
func readPriFiConfigFile(c *cli.Context) (*prifi_protocol.PrifiTomlConfig, error) {
cfile := c.GlobalString("prifi_config")
if _, err := os.Stat(cfile); os.IsNotExist(err) {
log.Error("Could not open file \"", cfile, "\" (specified by flag prifi_config)")
return nil, err
}
tomlRawData, err := ioutil.ReadFile(cfile)
if err != nil {
log.Error("Could not read file \"", cfile, "\" (specified by flag prifi_config)")
}
tomlConfig := &prifi_protocol.PrifiTomlConfig{}
_, err = toml.Decode(string(tomlRawData), tomlConfig)
if err != nil {
log.Error("Could not parse toml file", cfile)
return nil, err
}
//ports can be overridden by the command line params
if c.GlobalIsSet("port") {
tomlConfig.SocksServerPort = c.GlobalInt("port")
}
if c.GlobalIsSet("port_client") {
tomlConfig.SocksClientPort = c.GlobalInt("port_client")
}
return tomlConfig, nil
}
// getDefaultFile creates a path to the default config folder and appends fileName to it.
func getDefaultFilePathForName(fileName string) string {
u, err := user.Current()
// can't get the user dir, so fallback to current working dir
if err != nil {
fmt.Print("[-] Could not get your home's directory. Switching back to current dir.")
if curr, err := os.Getwd(); err != nil {
log.Fatalf("Impossible to get the current directory. %v", err)
} else {
return path.Join(curr, fileName)
}
}
// let's try to stick to usual OS folders
switch runtime.GOOS {
case "darwin":
return path.Join(u.HomeDir, "Library", DefaultName, fileName)
default:
return path.Join(u.HomeDir, ".config", DefaultName, fileName)
// TODO WIndows ? FreeBSD ?
}
}
// getGroup reads the group-file and returns it.
func readCothorityGroupConfig(c *cli.Context) *app.Group {
gfile := c.GlobalString("group")
if _, err := os.Stat(gfile); os.IsNotExist(err) {
log.Error("Could not open file \"", gfile, "\" (specified by flag group)")
return nil
}
gr, err := os.Open(gfile)
if err != nil {
log.Error("Could not read file \"", gfile, "\"")
return nil
}
defer gr.Close()
groups, err := app.ReadGroupDescToml(gr)
if err != nil {
log.Error("Could not parse toml file \"", gfile, "\"")
return nil
}
if groups == nil || groups.Roster == nil || len(groups.Roster.List) == 0 {
log.Error("No servers found in roster from", gfile)
return nil
}
return groups
}
// createKeyPair returns the private and public key in hexadecimal representation.
func createKeyPair() (string, string) {
kp := cryptoconfig.NewKeyPair(network.Suite)
privStr, err := crypto.ScalarToStringHex(network.Suite, kp.Secret)
if err != nil {
log.Fatal("Error formating private key to hexadecimal. Abort.")
}
var point abstract.Point
// use the transformation for EdDSA signatures
//point = cosi.Ed25519Public(network.Suite, kp.Secret)
point = kp.Public
pubStr, err := crypto.PubToStringHex(network.Suite, point)
if err != nil {
log.Fatal("Could not parse public key. Abort.")
}
return privStr, pubStr
}
Fix Server deprecation warning
/*
Prifi-app starts a cothority node in either trustee, relay or client mode.
*/
package main
import (
"fmt"
"os"
"io/ioutil"
"os/user"
"path"
"runtime"
"bytes"
"github.com/BurntSushi/toml"
prifi_protocol "github.com/lbarman/prifi/sda/protocols"
prifi_service "github.com/lbarman/prifi/sda/services"
"gopkg.in/dedis/crypto.v0/abstract"
cryptoconfig "gopkg.in/dedis/crypto.v0/config"
"gopkg.in/dedis/onet.v1"
"gopkg.in/dedis/onet.v1/app"
"gopkg.in/dedis/onet.v1/crypto"
"gopkg.in/dedis/onet.v1/log"
"gopkg.in/dedis/onet.v1/network"
"gopkg.in/urfave/cli.v1"
"net"
"os/exec"
"strconv"
"time"
)
// DefaultName is the name of the binary we produce and is used to create a directory
// folder with this name
const DefaultName = "prifi"
// Default name of configuration file
const DefaultCothorityConfigFile = "identity.toml"
// Default name of group file
const DefaultCothorityGroupConfigFile = "group.toml"
// Default name of prifi's config file
const DefaultPriFiConfigFile = "prifi.toml"
// DefaultPort to listen and connect to. As of this writing, this port is not listed in
// /etc/services
const DefaultPort = 6879
// This app can launch the prifi service in either client, trustee or relay mode
func main() {
app := cli.NewApp()
app.Name = "prifi"
app.Usage = "Starts PriFi in either Trustee, Relay or Client mode."
app.Version = "0.1"
app.Commands = []cli.Command{
{
Name: "gen-id",
Aliases: []string{"gen"},
Usage: "creates a new identity.toml",
Action: createNewIdentityToml,
},
{
Name: "trustee",
Usage: "start in trustee mode",
Aliases: []string{"t"},
Action: startTrustee,
},
{
Name: "relay",
Usage: "start in relay mode",
ArgsUsage: "group [id-name]",
Aliases: []string{"r"},
Action: startRelay,
},
{
Name: "client",
Usage: "start in client mode",
Aliases: []string{"c"},
Action: startClient,
},
{
Name: "sockstest",
Usage: "only starts the socks server and the socks clients without prifi",
Aliases: []string{"socks"},
Action: startSocksTunnelOnly,
},
}
app.Flags = []cli.Flag{
cli.IntFlag{
Name: "debug, d",
Value: 0,
Usage: "debug-level: 1 for terse, 5 for maximal",
},
cli.StringFlag{
Name: "cothority_config, cc",
Value: getDefaultFilePathForName(DefaultCothorityConfigFile),
Usage: "configuration-file",
},
cli.StringFlag{
Name: "prifi_config, pc",
Value: getDefaultFilePathForName(DefaultPriFiConfigFile),
Usage: "configuration-file",
},
cli.IntFlag{
Name: "port, p",
Value: 12345,
Usage: "port for the socks server (this is the port that you need to set in your browser)",
},
cli.IntFlag{
Name: "port_client",
Value: 8081,
Usage: "port for the socks client (that will connect to a remote socks server)",
},
cli.StringFlag{
Name: "group, g",
Value: getDefaultFilePathForName(DefaultCothorityGroupConfigFile),
Usage: "Group file",
},
cli.StringFlag{
Name: "default_path",
Value: ".",
Usage: "The default creation path for identity.toml when doing gen-id",
},
cli.BoolFlag{
Name: "nowait",
Usage: "Return immediately",
},
}
app.Before = func(c *cli.Context) error {
log.SetDebugVisible(c.Int("debug"))
return nil
}
app.Run(os.Args)
}
/**
* Every "app" require reading config files and starting cothority beforehand
*/
func readConfigAndStartCothority(c *cli.Context) (*onet.Server, *app.Group, *prifi_service.ServiceState) {
//parse PriFi parameters
prifiTomlConfig, err := readPriFiConfigFile(c)
//override log level and color
if prifiTomlConfig.OverrideLogLevel > 0 {
log.Lvl3("Overriding log level (from .toml) to", prifiTomlConfig.OverrideLogLevel)
log.SetDebugVisible(prifiTomlConfig.OverrideLogLevel)
}
if prifiTomlConfig.ForceConsoleColor {
log.Lvl3("Forcing the console output to be colored (from .toml)")
log.SetUseColors(true)
}
if err != nil {
log.Error("Could not read prifi config:", err)
os.Exit(1)
}
//start cothority server
host, err := startCothorityNode(c)
if err != nil {
log.Error("Could not start Cothority server:", err)
os.Exit(1)
}
//finds the PriFi service
service := host.Service(prifi_service.ServiceName).(*prifi_service.ServiceState)
//set the config from the .toml file
service.SetConfigFromToml(prifiTomlConfig)
//reads the group description
group := readCothorityGroupConfig(c)
if err != nil {
log.Error("Could not read the group description:", err)
os.Exit(1)
}
prifiTomlConfig.ProtocolVersion = getGitCommitID()
return host, group, service
}
// This folder's git commit ID is used as a Protocol Version field to avoid mismatched version between nodes
func getGitCommitID() string {
var (
cmdOut []byte
err error
)
cmdName := "git"
cmdArgs := []string{"rev-parse", "HEAD"}
//sends the command to the shell and retrieves the commitID for HEAD
if cmdOut, err = exec.Command(cmdName, cmdArgs...).Output(); err != nil {
log.Error("There was an error running git rev-parse command: ", err)
os.Exit(1)
}
return string(cmdOut)
}
// trustee start the cothority in trustee-mode using the already stored configuration.
func startTrustee(c *cli.Context) error {
log.Info("Starting trustee")
host, group, service := readConfigAndStartCothority(c)
if err := service.StartTrustee(group); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// relay starts the cothority in relay-mode using the already stored configuration.
func startRelay(c *cli.Context) error {
log.Info("Starting relay")
host, group, service := readConfigAndStartCothority(c)
service.AutoStart = true
if err := service.StartRelay(group); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// client starts the cothority in client-mode using the already stored configuration.
func startClient(c *cli.Context) error {
log.Info("Starting client")
host, group, service := readConfigAndStartCothority(c)
if err := service.StartClient(group, time.Duration(0)); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Router.AddErrorHandler(service.NetworkErrorHappened)
host.Start()
return nil
}
// this is used to test the socks server and clients integrated to PriFi, without using DC-nets.
func startSocksTunnelOnly(c *cli.Context) error {
log.Info("Starting socks tunnel (bypassing PriFi)")
host, _, service := readConfigAndStartCothority(c)
if err := service.StartSocksTunnelOnly(); err != nil {
log.Error("Could not start the prifi service:", err)
os.Exit(1)
}
host.Start()
return nil
}
/**
* COTHORITY
*/
// Returns true if file exists and user confirms overwriting, or if file doesn't exist.
// Returns false if file exists and user doesn't confirm overwriting.
func checkOverwrite(file string) bool {
// check if the file exists and ask for override
if _, err := os.Stat(file); err == nil {
return app.InputYN(true, "Configuration file "+file+" already exists. Override?")
}
return true
}
func createNewIdentityToml(c *cli.Context) error {
log.Print("Generating public/private keys...")
privStr, pubStr := createKeyPair()
addrPort := app.Inputf(":"+strconv.Itoa(DefaultPort)+"", "Which port do you want PriFi to use locally ?")
//parse IP + Port
var hostStr string
var portStr string
host, port, err := net.SplitHostPort(addrPort)
log.ErrFatal(err, "Couldn't interpret", addrPort)
if addrPort == "" {
portStr = strconv.Itoa(DefaultPort)
hostStr = "127.0.0.1"
} else if host == "" {
hostStr = "127.0.0.1"
portStr = port
} else {
hostStr = host
portStr = port
}
serverBinding := network.NewTCPAddress(hostStr + ":" + portStr)
identity := &app.CothorityConfig{
Public: pubStr,
Private: privStr,
Address: serverBinding,
}
var configDone bool
var folderPath string
var identityFilePath string
for !configDone {
// get name of config file and write to config file
defaultPath := "."
if c.GlobalIsSet("default_path") {
defaultPath = c.GlobalString("default_path")
}
folderPath = app.Inputf(defaultPath, "Please enter the path for the new identity.toml file:")
identityFilePath = path.Join(folderPath, DefaultCothorityConfigFile)
// check if the directory exists
if _, err := os.Stat(folderPath); os.IsNotExist(err) {
log.Info("Creating inexistant directories for ", folderPath)
if err = os.MkdirAll(folderPath, 0744); err != nil {
log.Fatalf("Could not create directory %s %v", folderPath, err)
}
}
if checkOverwrite(identityFilePath) {
break
}
}
if err := identity.Save(identityFilePath); err != nil {
log.Fatal("Unable to write the config to file:", err)
}
//now since cothority is smart enough to write only the decimal format of the key, AND require the base64 format for group.toml, let's add it as a comment
public, err := crypto.StringHexToPub(network.Suite, pubStr)
if err != nil {
log.Fatal("Impossible to parse public key:", err)
}
var buff bytes.Buffer
if err := crypto.Write64Pub(network.Suite, &buff, public); err != nil {
log.Error("Can't convert public key to base 64")
return nil
}
f, err := os.OpenFile(identityFilePath, os.O_RDWR|os.O_APPEND, 0660)
if err != nil {
log.Fatal("Unable to write the config to file (2):", err)
}
publicKeyBase64String := string(buff.Bytes())
f.WriteString("# Public (base64) = " + publicKeyBase64String + "\n")
f.Close()
log.Info("Identity file saved.")
return nil
}
// Starts the cothority node to enable communication with the prifi-service.
func startCothorityNode(c *cli.Context) (*onet.Server, error) {
// first check the options
cfile := c.GlobalString("cothority_config")
if _, err := os.Stat(cfile); os.IsNotExist(err) {
log.Error("Could not open file \"", cfile, "\" (specified by flag cothority_config)")
return nil, err
}
// Let's read the config
_, host, err := app.ParseCothority(cfile)
if err != nil {
log.Error("Could not parse file", cfile)
return nil, err
}
return host, nil
}
/**
* CONFIG
*/
func readPriFiConfigFile(c *cli.Context) (*prifi_protocol.PrifiTomlConfig, error) {
cfile := c.GlobalString("prifi_config")
if _, err := os.Stat(cfile); os.IsNotExist(err) {
log.Error("Could not open file \"", cfile, "\" (specified by flag prifi_config)")
return nil, err
}
tomlRawData, err := ioutil.ReadFile(cfile)
if err != nil {
log.Error("Could not read file \"", cfile, "\" (specified by flag prifi_config)")
}
tomlConfig := &prifi_protocol.PrifiTomlConfig{}
_, err = toml.Decode(string(tomlRawData), tomlConfig)
if err != nil {
log.Error("Could not parse toml file", cfile)
return nil, err
}
//ports can be overridden by the command line params
if c.GlobalIsSet("port") {
tomlConfig.SocksServerPort = c.GlobalInt("port")
}
if c.GlobalIsSet("port_client") {
tomlConfig.SocksClientPort = c.GlobalInt("port_client")
}
return tomlConfig, nil
}
// getDefaultFile creates a path to the default config folder and appends fileName to it.
func getDefaultFilePathForName(fileName string) string {
u, err := user.Current()
// can't get the user dir, so fallback to current working dir
if err != nil {
fmt.Print("[-] Could not get your home's directory. Switching back to current dir.")
if curr, err := os.Getwd(); err != nil {
log.Fatalf("Impossible to get the current directory. %v", err)
} else {
return path.Join(curr, fileName)
}
}
// let's try to stick to usual OS folders
switch runtime.GOOS {
case "darwin":
return path.Join(u.HomeDir, "Library", DefaultName, fileName)
default:
return path.Join(u.HomeDir, ".config", DefaultName, fileName)
// TODO WIndows ? FreeBSD ?
}
}
// getGroup reads the group-file and returns it.
func readCothorityGroupConfig(c *cli.Context) *app.Group {
gfile := c.GlobalString("group")
if _, err := os.Stat(gfile); os.IsNotExist(err) {
log.Error("Could not open file \"", gfile, "\" (specified by flag group)")
return nil
}
gr, err := os.Open(gfile)
if err != nil {
log.Error("Could not read file \"", gfile, "\"")
return nil
}
defer gr.Close()
groups, err := app.ReadGroupDescToml(gr)
if err != nil {
log.Error("Could not parse toml file \"", gfile, "\"")
return nil
}
if groups == nil || groups.Roster == nil || len(groups.Roster.List) == 0 {
log.Error("No servers found in roster from", gfile)
return nil
}
return groups
}
// createKeyPair returns the private and public key in hexadecimal representation.
func createKeyPair() (string, string) {
kp := cryptoconfig.NewKeyPair(network.Suite)
privStr, err := crypto.ScalarToStringHex(network.Suite, kp.Secret)
if err != nil {
log.Fatal("Error formating private key to hexadecimal. Abort.")
}
var point abstract.Point
// use the transformation for EdDSA signatures
//point = cosi.Ed25519Public(network.Suite, kp.Secret)
point = kp.Public
pubStr, err := crypto.PubToStringHex(network.Suite, point)
if err != nil {
log.Fatal("Could not parse public key. Abort.")
}
return privStr, pubStr
}
|
package smux
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
_ "net/http/pprof"
"strings"
"sync"
"testing"
"time"
)
func init() {
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
log.SetFlags(log.LstdFlags | log.Lshortfile)
ln, err := net.Listen("tcp", "127.0.0.1:19999")
if err != nil {
// handle error
panic(err)
}
go func() {
for {
conn, err := ln.Accept()
if err != nil {
// handle error
}
go handleConnection(conn)
}
}()
}
func handleConnection(conn net.Conn) {
session, _ := Server(conn, nil)
for {
if stream, err := session.AcceptStream(); err == nil {
go func(s io.ReadWriteCloser) {
buf := make([]byte, 65536)
for {
n, err := s.Read(buf)
if err != nil {
return
}
s.Write(buf[:n])
}
}(stream)
} else {
return
}
}
}
func TestEcho(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var sent string
var received string
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
sent += msg
if n, err := stream.Read(buf); err != nil {
t.Fatal(err)
} else {
received += string(buf[:n])
}
}
if sent != received {
t.Fatal("data mimatch")
}
session.Close()
}
func TestSpeed(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
start := time.Now()
var wg sync.WaitGroup
wg.Add(1)
go func() {
buf := make([]byte, 1024*1024)
nrecv := 0
for {
n, err := stream.Read(buf)
if err != nil {
t.Fatal(err)
break
} else {
nrecv += n
if nrecv == 4096*4096 {
break
}
}
}
stream.Close()
t.Log("time for 16MB rtt", time.Since(start))
wg.Done()
}()
msg := make([]byte, 8192)
for i := 0; i < 2048; i++ {
stream.Write(msg)
}
wg.Wait()
session.Close()
}
func TestParallel(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
par := 1000
messages := 100
var wg sync.WaitGroup
wg.Add(par)
for i := 0; i < par; i++ {
stream, _ := session.OpenStream()
go func(s *Stream) {
buf := make([]byte, 20)
for j := 0; j < messages; j++ {
msg := fmt.Sprintf("hello%v", j)
s.Write([]byte(msg))
if _, err := s.Read(buf); err != nil {
break
}
}
s.Close()
wg.Done()
}(stream)
}
t.Log("created", session.NumStreams(), "streams")
wg.Wait()
session.Close()
}
func TestCloseThenOpen(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
session.Close()
if _, err := session.OpenStream(); err == nil {
t.Fatal("opened after close")
}
}
func TestStreamDoubleClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
stream.Close()
if err := stream.Close(); err == nil {
t.Log("double close doesn't return error")
}
session.Close()
}
func TestTinyReadBuffer(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
tinybuf := make([]byte, 6)
var sent string
var received string
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
sent += msg
nsent, err := stream.Write([]byte(msg))
if err != nil {
t.Fatal("cannot write")
}
nrecv := 0
for nrecv < nsent {
if n, err := stream.Read(tinybuf); err == nil {
nrecv += n
received += string(tinybuf[:n])
} else {
t.Fatal("cannot read with tiny buffer")
}
}
}
if sent != received {
t.Fatal("data mimatch")
}
session.Close()
}
func TestIsClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
session.Close()
if session.IsClosed() != true {
t.Fatal("still open after close")
}
}
func TestKeepAliveTimeout(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:29999")
if err != nil {
// handle error
panic(err)
}
go func() {
ln.Accept()
}()
cli, err := net.Dial("tcp", "127.0.0.1:29999")
if err != nil {
t.Fatal(err)
}
config := DefaultConfig()
config.KeepAliveInterval = time.Second
config.KeepAliveTimeout = 2 * time.Second
session, _ := Client(cli, config)
<-time.After(3 * time.Second)
if session.IsClosed() != true {
t.Fatal("keepalive-timeout failed")
}
}
func TestServerEcho(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:39999")
if err != nil {
// handle error
panic(err)
}
go func() {
if conn, err := ln.Accept(); err == nil {
session, _ := Server(conn, nil)
if stream, err := session.OpenStream(); err == nil {
const N = 100
buf := make([]byte, 10)
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
if n, err := stream.Read(buf); err != nil {
t.Fatal(err)
} else if string(buf[:n]) != msg {
t.Fatal(err)
}
}
stream.Close()
} else {
t.Fatal(err)
}
} else {
t.Fatal(err)
}
}()
cli, err := net.Dial("tcp", "127.0.0.1:39999")
if err != nil {
t.Fatal(err)
}
if session, err := Client(cli, nil); err == nil {
if stream, err := session.AcceptStream(); err == nil {
buf := make([]byte, 65536)
for {
n, err := stream.Read(buf)
if err != nil {
break
}
stream.Write(buf[:n])
}
} else {
t.Fatal(err)
}
} else {
t.Fatal(err)
}
}
func TestSendWithoutRecv(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
}
buf := make([]byte, 1)
if _, err := stream.Read(buf); err != nil {
t.Fatal(err)
}
stream.Close()
}
func TestWriteAfterClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
stream.Close()
if _, err := stream.Write([]byte("write after close")); err == nil {
t.Fatal("write after close failed")
}
}
func TestReadStreamAfterSessionClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
session.Close()
buf := make([]byte, 10)
if _, err := stream.Read(buf); err != nil {
t.Log(err)
} else {
t.Fatal("read stream after session close succeeded")
}
}
func TestWriteStreamAfterConnectionClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
session.conn.Close()
if _, err := stream.Write([]byte("write after connection close")); err == nil {
t.Fatal("write after connection close failed")
}
}
func TestNumStreamAfterClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
if _, err := session.OpenStream(); err == nil {
if session.NumStreams() != 1 {
t.Fatal("wrong number of streams after opened")
}
session.Close()
if session.NumStreams() != 0 {
t.Fatal("wrong number of streams after session closed")
}
} else {
t.Fatal(err)
}
cli.Close()
}
func TestRandomFrame(t *testing.T) {
// pure random
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
for i := 0; i < 100; i++ {
rnd := make([]byte, rand.Uint32()%1024)
io.ReadFull(crand.Reader, rnd)
session.conn.Write(rnd)
}
cli.Close()
// double syn
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(cmdSYN, 1000)
session.writeFrame(f)
}
cli.Close()
// random cmds
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
allcmds := []byte{cmdSYN, cmdRST, cmdPSH, cmdNOP}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(allcmds[rand.Int()%len(allcmds)], rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// random cmds & sids
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(byte(rand.Uint32()), rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// random version
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(byte(rand.Uint32()), rand.Uint32())
f.ver = byte(rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// incorrect size
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
f := newFrame(byte(rand.Uint32()), rand.Uint32())
rnd := make([]byte, rand.Uint32()%1024)
io.ReadFull(crand.Reader, rnd)
f.data = rnd
buf := make([]byte, headerSize+len(f.data))
buf[0] = f.ver
buf[1] = f.cmd
binary.LittleEndian.PutUint16(buf[2:], uint16(len(rnd)+1)) /// incorrect size
binary.LittleEndian.PutUint32(buf[4:], f.sid)
copy(buf[headerSize:], f.data)
session.conn.Write(buf)
t.Log(rawHeader(buf))
cli.Close()
}
func TestReadDeadline(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var readErr error
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
stream.SetReadDeadline(time.Now().Add(-1 * time.Minute))
if _, readErr = stream.Read(buf); readErr != nil {
break
}
}
if readErr != nil {
if !strings.Contains(readErr.Error(), "i/o timeout") {
t.Fatalf("Wrong error: %v", readErr)
}
} else {
t.Fatal("No error when reading with past deadline")
}
session.Close()
}
func TestWriteDeadline(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var writeErr error
for i := 0; i < N; i++ {
stream.SetWriteDeadline(time.Now().Add(-1 * time.Minute))
if _, writeErr = stream.Write(buf); writeErr != nil {
break
}
}
if writeErr != nil {
if !strings.Contains(writeErr.Error(), "i/o timeout") {
t.Fatalf("Wrong error: %v", writeErr)
}
} else {
t.Fatal("No error when writing with past deadline")
}
session.Close()
}
func BenchmarkAcceptClose(b *testing.B) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
b.Fatal(err)
}
session, _ := Client(cli, nil)
for i := 0; i < b.N; i++ {
if stream, err := session.OpenStream(); err == nil {
stream.Close()
} else {
b.Fatal(err)
}
}
}
func BenchmarkConnSmux(b *testing.B) {
cs, ss, err := getSmuxStreamPair()
if err != nil {
b.Fatal(err)
}
defer cs.Close()
defer ss.Close()
bench(b, cs, ss)
}
func BenchmarkConnTCP(b *testing.B) {
cs, ss, err := getTCPConnectionPair()
if err != nil {
b.Fatal(err)
}
defer cs.Close()
defer ss.Close()
bench(b, cs, ss)
}
func getSmuxStreamPair() (*Stream, *Stream, error) {
c1, c2, err := getTCPConnectionPair()
if err != nil {
return nil, nil, err
}
s, err := Server(c2, nil)
if err != nil {
return nil, nil, err
}
c, err := Client(c1, nil)
if err != nil {
return nil, nil, err
}
var ss *Stream
done := make(chan error)
go func() {
var rerr error
ss, rerr = s.AcceptStream()
done <- rerr
close(done)
}()
cs, err := c.OpenStream()
if err != nil {
return nil, nil, err
}
err = <-done
if err != nil {
return nil, nil, err
}
return cs, ss, nil
}
func getTCPConnectionPair() (net.Conn, net.Conn, error) {
lst, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, nil, err
}
var conn0 net.Conn
var err0 error
done := make(chan struct{})
go func() {
conn0, err0 = lst.Accept()
close(done)
}()
conn1, err := net.Dial("tcp", lst.Addr().String())
if err != nil {
return nil, nil, err
}
<-done
if err0 != nil {
return nil, nil, err0
}
return conn0, conn1, nil
}
func bench(b *testing.B, rd io.Reader, wr io.Writer) {
buf := make([]byte, 128*1024)
buf2 := make([]byte, 128*1024)
b.SetBytes(128 * 1024)
b.ResetTimer()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
count := 0
for {
n, _ := rd.Read(buf2)
count += n
if count == 128*1024*b.N {
return
}
}
}()
for i := 0; i < b.N; i++ {
wr.Write(buf)
}
wg.Wait()
}
add a test
package smux
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
_ "net/http/pprof"
"strings"
"sync"
"testing"
"time"
)
func init() {
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
log.SetFlags(log.LstdFlags | log.Lshortfile)
ln, err := net.Listen("tcp", "127.0.0.1:19999")
if err != nil {
// handle error
panic(err)
}
go func() {
for {
conn, err := ln.Accept()
if err != nil {
// handle error
}
go handleConnection(conn)
}
}()
}
func handleConnection(conn net.Conn) {
session, _ := Server(conn, nil)
for {
if stream, err := session.AcceptStream(); err == nil {
go func(s io.ReadWriteCloser) {
buf := make([]byte, 65536)
for {
n, err := s.Read(buf)
if err != nil {
return
}
s.Write(buf[:n])
}
}(stream)
} else {
return
}
}
}
func TestEcho(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var sent string
var received string
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
sent += msg
if n, err := stream.Read(buf); err != nil {
t.Fatal(err)
} else {
received += string(buf[:n])
}
}
if sent != received {
t.Fatal("data mimatch")
}
session.Close()
}
func TestSpeed(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
t.Log(stream.LocalAddr(), stream.RemoteAddr())
start := time.Now()
var wg sync.WaitGroup
wg.Add(1)
go func() {
buf := make([]byte, 1024*1024)
nrecv := 0
for {
n, err := stream.Read(buf)
if err != nil {
t.Fatal(err)
break
} else {
nrecv += n
if nrecv == 4096*4096 {
break
}
}
}
stream.Close()
t.Log("time for 16MB rtt", time.Since(start))
wg.Done()
}()
msg := make([]byte, 8192)
for i := 0; i < 2048; i++ {
stream.Write(msg)
}
wg.Wait()
session.Close()
}
func TestParallel(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
par := 1000
messages := 100
var wg sync.WaitGroup
wg.Add(par)
for i := 0; i < par; i++ {
stream, _ := session.OpenStream()
go func(s *Stream) {
buf := make([]byte, 20)
for j := 0; j < messages; j++ {
msg := fmt.Sprintf("hello%v", j)
s.Write([]byte(msg))
if _, err := s.Read(buf); err != nil {
break
}
}
s.Close()
wg.Done()
}(stream)
}
t.Log("created", session.NumStreams(), "streams")
wg.Wait()
session.Close()
}
func TestCloseThenOpen(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
session.Close()
if _, err := session.OpenStream(); err == nil {
t.Fatal("opened after close")
}
}
func TestStreamDoubleClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
stream.Close()
if err := stream.Close(); err == nil {
t.Log("double close doesn't return error")
}
session.Close()
}
func TestTinyReadBuffer(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
tinybuf := make([]byte, 6)
var sent string
var received string
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
sent += msg
nsent, err := stream.Write([]byte(msg))
if err != nil {
t.Fatal("cannot write")
}
nrecv := 0
for nrecv < nsent {
if n, err := stream.Read(tinybuf); err == nil {
nrecv += n
received += string(tinybuf[:n])
} else {
t.Fatal("cannot read with tiny buffer")
}
}
}
if sent != received {
t.Fatal("data mimatch")
}
session.Close()
}
func TestIsClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
session.Close()
if session.IsClosed() != true {
t.Fatal("still open after close")
}
}
func TestKeepAliveTimeout(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:29999")
if err != nil {
// handle error
panic(err)
}
go func() {
ln.Accept()
}()
cli, err := net.Dial("tcp", "127.0.0.1:29999")
if err != nil {
t.Fatal(err)
}
config := DefaultConfig()
config.KeepAliveInterval = time.Second
config.KeepAliveTimeout = 2 * time.Second
session, _ := Client(cli, config)
<-time.After(3 * time.Second)
if session.IsClosed() != true {
t.Fatal("keepalive-timeout failed")
}
}
func TestServerEcho(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:39999")
if err != nil {
// handle error
panic(err)
}
go func() {
if conn, err := ln.Accept(); err == nil {
session, _ := Server(conn, nil)
if stream, err := session.OpenStream(); err == nil {
const N = 100
buf := make([]byte, 10)
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
if n, err := stream.Read(buf); err != nil {
t.Fatal(err)
} else if string(buf[:n]) != msg {
t.Fatal(err)
}
}
stream.Close()
} else {
t.Fatal(err)
}
} else {
t.Fatal(err)
}
}()
cli, err := net.Dial("tcp", "127.0.0.1:39999")
if err != nil {
t.Fatal(err)
}
if session, err := Client(cli, nil); err == nil {
if stream, err := session.AcceptStream(); err == nil {
buf := make([]byte, 65536)
for {
n, err := stream.Read(buf)
if err != nil {
break
}
stream.Write(buf[:n])
}
} else {
t.Fatal(err)
}
} else {
t.Fatal(err)
}
}
func TestSendWithoutRecv(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
}
buf := make([]byte, 1)
if _, err := stream.Read(buf); err != nil {
t.Fatal(err)
}
stream.Close()
}
func TestWriteAfterClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
stream.Close()
if _, err := stream.Write([]byte("write after close")); err == nil {
t.Fatal("write after close failed")
}
}
func TestReadStreamAfterSessionClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
session.Close()
buf := make([]byte, 10)
if _, err := stream.Read(buf); err != nil {
t.Log(err)
} else {
t.Fatal("read stream after session close succeeded")
}
}
func TestWriteStreamAfterConnectionClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
session.conn.Close()
if _, err := stream.Write([]byte("write after connection close")); err == nil {
t.Fatal("write after connection close failed")
}
}
func TestNumStreamAfterClose(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
if _, err := session.OpenStream(); err == nil {
if session.NumStreams() != 1 {
t.Fatal("wrong number of streams after opened")
}
session.Close()
if session.NumStreams() != 0 {
t.Fatal("wrong number of streams after session closed")
}
} else {
t.Fatal(err)
}
cli.Close()
}
func TestRandomFrame(t *testing.T) {
// pure random
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
for i := 0; i < 100; i++ {
rnd := make([]byte, rand.Uint32()%1024)
io.ReadFull(crand.Reader, rnd)
session.conn.Write(rnd)
}
cli.Close()
// double syn
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(cmdSYN, 1000)
session.writeFrame(f)
}
cli.Close()
// random cmds
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
allcmds := []byte{cmdSYN, cmdRST, cmdPSH, cmdNOP}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(allcmds[rand.Int()%len(allcmds)], rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// random cmds & sids
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(byte(rand.Uint32()), rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// random version
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
for i := 0; i < 100; i++ {
f := newFrame(byte(rand.Uint32()), rand.Uint32())
f.ver = byte(rand.Uint32())
session.writeFrame(f)
}
cli.Close()
// incorrect size
cli, err = net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ = Client(cli, nil)
f := newFrame(byte(rand.Uint32()), rand.Uint32())
rnd := make([]byte, rand.Uint32()%1024)
io.ReadFull(crand.Reader, rnd)
f.data = rnd
buf := make([]byte, headerSize+len(f.data))
buf[0] = f.ver
buf[1] = f.cmd
binary.LittleEndian.PutUint16(buf[2:], uint16(len(rnd)+1)) /// incorrect size
binary.LittleEndian.PutUint32(buf[4:], f.sid)
copy(buf[headerSize:], f.data)
session.conn.Write(buf)
t.Log(rawHeader(buf))
cli.Close()
}
func TestReadDeadline(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var readErr error
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
stream.Write([]byte(msg))
stream.SetReadDeadline(time.Now().Add(-1 * time.Minute))
if _, readErr = stream.Read(buf); readErr != nil {
break
}
}
if readErr != nil {
if !strings.Contains(readErr.Error(), "i/o timeout") {
t.Fatalf("Wrong error: %v", readErr)
}
} else {
t.Fatal("No error when reading with past deadline")
}
session.Close()
}
func TestWriteDeadline(t *testing.T) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
t.Fatal(err)
}
session, _ := Client(cli, nil)
stream, _ := session.OpenStream()
const N = 100
buf := make([]byte, 10)
var writeErr error
for i := 0; i < N; i++ {
stream.SetWriteDeadline(time.Now().Add(-1 * time.Minute))
if _, writeErr = stream.Write(buf); writeErr != nil {
break
}
}
if writeErr != nil {
if !strings.Contains(writeErr.Error(), "i/o timeout") {
t.Fatalf("Wrong error: %v", writeErr)
}
} else {
t.Fatal("No error when writing with past deadline")
}
session.Close()
}
func BenchmarkAcceptClose(b *testing.B) {
cli, err := net.Dial("tcp", "127.0.0.1:19999")
if err != nil {
b.Fatal(err)
}
session, _ := Client(cli, nil)
for i := 0; i < b.N; i++ {
if stream, err := session.OpenStream(); err == nil {
stream.Close()
} else {
b.Fatal(err)
}
}
}
func BenchmarkConnSmux(b *testing.B) {
cs, ss, err := getSmuxStreamPair()
if err != nil {
b.Fatal(err)
}
defer cs.Close()
defer ss.Close()
bench(b, cs, ss)
}
func BenchmarkConnTCP(b *testing.B) {
cs, ss, err := getTCPConnectionPair()
if err != nil {
b.Fatal(err)
}
defer cs.Close()
defer ss.Close()
bench(b, cs, ss)
}
func getSmuxStreamPair() (*Stream, *Stream, error) {
c1, c2, err := getTCPConnectionPair()
if err != nil {
return nil, nil, err
}
s, err := Server(c2, nil)
if err != nil {
return nil, nil, err
}
c, err := Client(c1, nil)
if err != nil {
return nil, nil, err
}
var ss *Stream
done := make(chan error)
go func() {
var rerr error
ss, rerr = s.AcceptStream()
done <- rerr
close(done)
}()
cs, err := c.OpenStream()
if err != nil {
return nil, nil, err
}
err = <-done
if err != nil {
return nil, nil, err
}
return cs, ss, nil
}
func getTCPConnectionPair() (net.Conn, net.Conn, error) {
lst, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, nil, err
}
var conn0 net.Conn
var err0 error
done := make(chan struct{})
go func() {
conn0, err0 = lst.Accept()
close(done)
}()
conn1, err := net.Dial("tcp", lst.Addr().String())
if err != nil {
return nil, nil, err
}
<-done
if err0 != nil {
return nil, nil, err0
}
return conn0, conn1, nil
}
func bench(b *testing.B, rd io.Reader, wr io.Writer) {
buf := make([]byte, 128*1024)
buf2 := make([]byte, 128*1024)
b.SetBytes(128 * 1024)
b.ResetTimer()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
count := 0
for {
n, _ := rd.Read(buf2)
count += n
if count == 128*1024*b.N {
return
}
}
}()
for i := 0; i < b.N; i++ {
wr.Write(buf)
}
wg.Wait()
}
|
// generated by stringer -output=interfaces_string.go -type=BorderType,CommandCategory,DockingType,KeyboardMode; DO NOT EDIT
package wicore
import "fmt"
const _BorderType_name = "BorderNoneBorderSingleBorderDouble"
var _BorderType_index = [...]uint8{10, 22, 34}
func (i BorderType) String() string {
if i < 0 || i >= BorderType(len(_BorderType_index)) {
return fmt.Sprintf("BorderType(%d)", i)
}
hi := _BorderType_index[i]
lo := uint8(0)
if i > 0 {
lo = _BorderType_index[i-1]
}
return _BorderType_name[lo:hi]
}
const _CommandCategory_name = "UnknownCategoryWindowCategoryCommandsCategoryEditorCategoryDebugCategory"
var _CommandCategory_index = [...]uint8{15, 29, 45, 59, 72}
func (i CommandCategory) String() string {
if i < 0 || i >= CommandCategory(len(_CommandCategory_index)) {
return fmt.Sprintf("CommandCategory(%d)", i)
}
hi := _CommandCategory_index[i]
lo := uint8(0)
if i > 0 {
lo = _CommandCategory_index[i-1]
}
return _CommandCategory_name[lo:hi]
}
const _DockingType_name = "DockingUnknownDockingFillDockingFloatingDockingLeftDockingRightDockingTopDockingBottom"
var _DockingType_index = [...]uint8{14, 25, 40, 51, 63, 73, 86}
func (i DockingType) String() string {
if i < 0 || i >= DockingType(len(_DockingType_index)) {
return fmt.Sprintf("DockingType(%d)", i)
}
hi := _DockingType_index[i]
lo := uint8(0)
if i > 0 {
lo = _DockingType_index[i-1]
}
return _DockingType_name[lo:hi]
}
const _KeyboardMode_name = "NormalInsertAllMode"
var _KeyboardMode_index = [...]uint8{6, 12, 19}
func (i KeyboardMode) String() string {
i -= 1
if i < 0 || i >= KeyboardMode(len(_KeyboardMode_index)) {
return fmt.Sprintf("KeyboardMode(%d)", i+1)
}
hi := _KeyboardMode_index[i]
lo := uint8(0)
if i > 0 {
lo = _KeyboardMode_index[i-1]
}
return _KeyboardMode_name[lo:hi]
}
Fix regression in ebdad1588 for interfaces_string.go.
It was due to an old installation of golang.org/x/tools/cmd/stringer.
// generated by stringer -output=interfaces_string.go -type=BorderType,CommandCategory,DockingType,KeyboardMode; DO NOT EDIT
package wicore
import "fmt"
const _BorderType_name = "BorderNoneBorderSingleBorderDouble"
var _BorderType_index = [...]uint8{0, 10, 22, 34}
func (i BorderType) String() string {
if i < 0 || i+1 >= BorderType(len(_BorderType_index)) {
return fmt.Sprintf("BorderType(%d)", i)
}
return _BorderType_name[_BorderType_index[i]:_BorderType_index[i+1]]
}
const _CommandCategory_name = "UnknownCategoryWindowCategoryCommandsCategoryEditorCategoryDebugCategory"
var _CommandCategory_index = [...]uint8{0, 15, 29, 45, 59, 72}
func (i CommandCategory) String() string {
if i < 0 || i+1 >= CommandCategory(len(_CommandCategory_index)) {
return fmt.Sprintf("CommandCategory(%d)", i)
}
return _CommandCategory_name[_CommandCategory_index[i]:_CommandCategory_index[i+1]]
}
const _DockingType_name = "DockingUnknownDockingFillDockingFloatingDockingLeftDockingRightDockingTopDockingBottom"
var _DockingType_index = [...]uint8{0, 14, 25, 40, 51, 63, 73, 86}
func (i DockingType) String() string {
if i < 0 || i+1 >= DockingType(len(_DockingType_index)) {
return fmt.Sprintf("DockingType(%d)", i)
}
return _DockingType_name[_DockingType_index[i]:_DockingType_index[i+1]]
}
const _KeyboardMode_name = "NormalInsertAllMode"
var _KeyboardMode_index = [...]uint8{0, 6, 12, 19}
func (i KeyboardMode) String() string {
i -= 1
if i < 0 || i+1 >= KeyboardMode(len(_KeyboardMode_index)) {
return fmt.Sprintf("KeyboardMode(%d)", i+1)
}
return _KeyboardMode_name[_KeyboardMode_index[i]:_KeyboardMode_index[i+1]]
}
|
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Ben Darnell
package storage_test
import (
"bytes"
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/testutils/gossiputil"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/stop"
"github.com/cockroachdb/cockroach/util/timeutil"
)
// mustGetInt decodes an int64 value from the bytes field of the receiver
// and panics if the bytes field is not 0 or 8 bytes in length.
func mustGetInt(v *roachpb.Value) int64 {
if v == nil {
return 0
}
i, err := v.GetInt()
if err != nil {
panic(err)
}
return i
}
// TestStoreRecoverFromEngine verifies that the store recovers all ranges and their contents
// after being stopped and recreated.
func TestStoreRecoverFromEngine(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key1 := roachpb.Key("a")
key2 := roachpb.Key("z")
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
var rangeID2 roachpb.RangeID
get := func(store *storage.Store, rangeID roachpb.RangeID, key roachpb.Key) int64 {
args := getArgs(key)
resp, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &args)
if err != nil {
t.Fatal(err)
}
return mustGetInt(resp.(*roachpb.GetResponse).Value)
}
validate := func(store *storage.Store) {
if val := get(store, rangeID, key1); val != 13 {
t.Errorf("key %q: expected 13 but got %v", key1, val)
}
if val := get(store, rangeID2, key2); val != 28 {
t.Errorf("key %q: expected 28 but got %v", key2, val)
}
}
// First, populate the store with data across two ranges. Each range contains commands
// that both predate and postdate the split.
func() {
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithEngine(t, eng, clock, true, nil, stopper)
increment := func(rangeID roachpb.RangeID, key roachpb.Key, value int64) (*roachpb.IncrementResponse, *roachpb.Error) {
args := incrementArgs(key, value)
resp, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &args)
incResp, _ := resp.(*roachpb.IncrementResponse)
return incResp, err
}
if _, err := increment(rangeID, key1, 2); err != nil {
t.Fatal(err)
}
if _, err := increment(rangeID, key2, 5); err != nil {
t.Fatal(err)
}
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 = store.LookupReplica(roachpb.RKey(key2), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
if _, err := increment(rangeID, key1, 11); err != nil {
t.Fatal(err)
}
if _, err := increment(rangeID2, key2, 23); err != nil {
t.Fatal(err)
}
validate(store)
}()
// Now create a new store with the same engine and make sure the expected data is present.
// We must use the same clock because a newly-created manual clock will be behind the one
// we wrote with and so will see stale MVCC data.
store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)
// Raft processing is initialized lazily; issue a no-op write request on each key to
// ensure that is has been started.
incArgs := incrementArgs(key1, 0)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
t.Fatal(err)
}
incArgs = incrementArgs(key2, 0)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
validate(store)
}
// TestStoreRecoverWithErrors verifies that even commands that fail are marked as
// applied so they are not retried after recovery.
func TestStoreRecoverWithErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
numIncrements := 0
func() {
stopper := stop.NewStopper()
defer stopper.Stop()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
_, ok := filterArgs.Req.(*roachpb.IncrementRequest)
if ok && filterArgs.Req.Header().Key.Equal(roachpb.Key("a")) {
numIncrements++
}
return nil
}
store := createTestStoreWithEngine(t, eng, clock, true, &sCtx, stopper)
// Write a bytes value so the increment will fail.
putArgs := putArgs(roachpb.Key("a"), []byte("asdf"))
if _, err := client.SendWrapped(rg1(store), nil, &putArgs); err != nil {
t.Fatal(err)
}
// Try and fail to increment the key. It is important for this test that the
// failure be the last thing in the raft log when the store is stopped.
incArgs := incrementArgs(roachpb.Key("a"), 42)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err == nil {
t.Fatal("did not get expected error")
}
}()
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
// Recover from the engine.
store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)
// Issue a no-op write to lazily initialize raft on the range.
incArgs := incrementArgs(roachpb.Key("b"), 0)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
t.Fatal(err)
}
// No additional increments were performed on key A during recovery.
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
}
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatalf("range descriptor key %s was not found", key)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsSoon(t, func() error {
meta2, err := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
if err != nil {
t.Fatal(err)
}
meta1, err := keys.Addr(keys.RangeMetaKey(meta2))
if err != nil {
t.Fatal(err)
}
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); err != nil {
return err
} else if !ok {
return errors.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return errors.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestRestoreReplicas ensures that consensus group membership is properly
// persisted to disk and restored when a node is stopped and restarted.
func TestRestoreReplicas(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
firstRng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
// Perform an increment before replication to ensure that commands are not
// repeated on restarts.
incArgs := incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
if err := firstRng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, firstRng.Desc()); err != nil {
t.Fatal(err)
}
// TODO(bdarnell): use the stopper.Quiesce() method. The problem
// right now is that raft isn't creating a task for high-level work
// it's creating while snapshotting and catching up. Ideally we'll
// be able to capture that and then can just invoke
// mtc.stopper.Quiesce() here.
// TODO(bdarnell): initial creation and replication needs to be atomic;
// cutting off the process too soon currently results in a corrupted range.
time.Sleep(500 * time.Millisecond)
mtc.restart()
// Send a command on each store. The original store (the leader still)
// will succeed.
incArgs = incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// The follower will return a not leader error, indicating the command
// should be forwarded to the leader.
incArgs = incrementArgs([]byte("a"), 11)
{
_, pErr := client.SendWrapped(rg1(mtc.stores[1]), nil, &incArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaderError); !ok {
t.Fatalf("expected not leader error; got %s", pErr)
}
}
// Send again, this time to first store.
if _, pErr := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(39), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
// Both replicas have a complete list in Desc.Replicas
for i, store := range mtc.stores {
rng, err := store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
desc := rng.Desc()
if len(desc.Replicas) != 2 {
t.Fatalf("store %d: expected 2 replicas, found %d", i, len(desc.Replicas))
}
if desc.Replicas[0].NodeID != mtc.stores[0].Ident.NodeID {
t.Errorf("store %d: expected replica[0].NodeID == %d, was %d",
i, mtc.stores[0].Ident.NodeID, desc.Replicas[0].NodeID)
}
}
}
func TestFailedReplicaChange(t *testing.T) {
defer leaktest.AfterTest(t)()
var runFilter atomic.Value
runFilter.Store(true)
ctx := storage.TestStoreContext()
mtc := &multiTestContext{}
mtc.storeContext = &ctx
mtc.storeContext.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if runFilter.Load().(bool) {
if et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest); ok && et.Commit {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
}
return nil
}
mtc.Start(t, 2)
defer mtc.Stop()
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc(),
); !testutils.IsError(err, "boom") {
t.Fatalf("did not get expected error: %v", err)
}
// After the aborted transaction, r.Desc was not updated.
// TODO(bdarnell): expose and inspect raft's internal state.
if len(rng.Desc().Replicas) != 1 {
t.Fatalf("expected 1 replica, found %d", len(rng.Desc().Replicas))
}
// The pending config change flag was cleared, so a subsequent attempt
// can succeed.
runFilter.Store(false)
// The first failed replica change has laid down intents. Make sure those
// are pushable by making the transaction abandoned.
mtc.manualClock.Increment(10 * base.DefaultHeartbeatInterval.Nanoseconds())
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc(),
); err != nil {
t.Fatal(err)
}
// Wait for the range to sync to both replicas (mainly so leaktest doesn't
// complain about goroutines involved in the process).
util.SucceedsSoon(t, func() error {
for _, store := range mtc.stores {
rang, err := store.GetReplica(1)
if err != nil {
return err
}
if lr := len(rang.Desc().Replicas); lr <= 1 {
return errors.Errorf("expected > 1 replicas; got %d", lr)
}
}
return nil
})
}
// We can truncate the old log entries and a new replica will be brought up from a snapshot.
func TestReplicateAfterTruncation(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Get that command's log index.
index, err := rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
// Truncate the log at index+1 (log entries < N are removed, so this includes
// the increment).
truncArgs := truncateLogArgs(index + 1)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &truncArgs); err != nil {
t.Fatal(err)
}
// Issue a second command post-truncation.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Once it catches up, the effects of both commands can be seen.
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(16), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
rng2, err := mtc.stores[1].GetReplica(1)
if err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
if mvcc, mvcc2 := rng.GetMVCCStats(), rng2.GetMVCCStats(); mvcc2 != mvcc {
return errors.Errorf("expected stats on new range:\n%+v\nto equal old:\n%+v", mvcc2, mvcc)
}
return nil
})
// Send a third command to verify that the log states are synced up so the
// new node can accept new commands.
incArgs = incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(39), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
util.SucceedsSoon(t, func() error {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
}
return nil
})
}
// getRangeMetadata retrieves the current range descriptor for the target
// range.
func getRangeMetadata(key roachpb.RKey, mtc *multiTestContext, t *testing.T) roachpb.RangeDescriptor {
// Calls to RangeLookup typically use inconsistent reads, but we
// want to do a consistent read here. This is important when we are
// considering one of the metadata ranges: we must not do an
// inconsistent lookup in our own copy of the range.
b := &client.Batch{}
b.AddRawRequest(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(key),
},
MaxRanges: 1,
})
var reply *roachpb.RangeLookupResponse
if err := mtc.dbs[0].Run(b); err != nil {
t.Fatalf("error getting range metadata: %s", err)
} else {
reply = b.RawResponse().Responses[0].GetInner().(*roachpb.RangeLookupResponse)
}
if a, e := len(reply.Ranges), 1; a != e {
t.Fatalf("expected %d range descriptor, got %d", e, a)
}
return reply.Ranges[0]
}
// TestUnreplicateFirstRange verifies that multiTestContext still functions in
// the case where the first range (which contains range metadata) is
// unreplicated from the first store. This situation can arise occasionally in
// tests, as can a similar situation where the first store is no longer the leader of
// the first range; this verifies that those tests will not be affected.
func TestUnreplicateFirstRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
// Replicate the range to store 1.
mtc.replicateRange(rangeID, 1)
// Unreplicate the from from store 0.
mtc.unreplicateRange(rangeID, 0)
// Replicate the range to store 2. The first range is no longer available on
// store 1, and this command will fail if that situation is not properly
// supported.
mtc.replicateRange(rangeID, 2)
}
// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 5)
defer mtc.Stop()
store0 := mtc.stores[0]
// Split off a range from the initial range for testing; there are
// complications if the metadata ranges are removed from store 1, this
// simplifies the test.
splitKey := roachpb.Key("m")
rightKey := roachpb.Key("z")
{
replica := store0.LookupReplica(roachpb.RKeyMin, nil)
mtc.replicateRange(replica.RangeID, 1, 2)
desc := replica.Desc()
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, err := replica.AdminSplit(context.Background(), splitArgs, desc); err != nil {
t.Fatal(err)
}
}
// Replicate the new range to all five stores.
rightKeyAddr, err := keys.Addr(rightKey)
if err != nil {
t.Fatal(err)
}
replica := store0.LookupReplica(rightKeyAddr, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 3, 4)
// Initialize the gossip network.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
maxTimeout := time.After(10 * time.Second)
succeeded := false
for !succeeded {
select {
case <-maxTimeout:
t.Fatalf("Failed to achieve proper replication within 10 seconds")
case <-time.After(10 * time.Millisecond):
mtc.expireLeaderLeases()
rangeDesc := getRangeMetadata(rightKeyAddr, mtc, t)
if count := len(rangeDesc.Replicas); count < 3 {
t.Fatalf("Removed too many replicas; expected at least 3 replicas, found %d", count)
} else if count == 3 {
succeeded = true
break
}
// Run replication scans on every store; only the store with the
// leader lease will actually do anything. If we did not wait
// for the scan to complete here it could be interrupted by the
// next call to expireLeaderLeases.
for _, store := range mtc.stores {
store.ForceReplicationScanAndProcess()
}
}
}
// Expire leader leases one more time, so that any remaining resolutions can
// get a leader lease.
// TODO(bdarnell): understand why some tests need this.
mtc.expireLeaderLeases()
}
// TestChangeReplicasDuplicateError tests that a replica change aborts if
// another change has been made to the RangeDescriptor since it was initiated.
func TestChangeReplicasDescriptorInvariant(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
repl, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
addReplica := func(storeNum int, desc *roachpb.RangeDescriptor) error {
return repl.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[storeNum].Ident.NodeID,
StoreID: mtc.stores[storeNum].Ident.StoreID,
},
desc,
)
}
// Retain the descriptor for the range at this point.
origDesc := repl.Desc()
// Add replica to the second store, which should succeed.
if err := addReplica(1, origDesc); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
r := mtc.stores[1].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
return nil
})
// Attempt to add replica to the third store with the original descriptor.
// This should fail because the descriptor is stale.
if err := addReplica(2, origDesc); !testutils.IsError(err, `change replicas of \d+ failed`) {
t.Fatalf("got unexpected error: %v", err)
}
// Both addReplica calls attempted to use origDesc.NextReplicaID.
// The failed second call should not have overwritten the cached
// replica descriptor from the successful first call.
if rd, err := mtc.stores[0].ReplicaDescriptor(origDesc.RangeID, origDesc.NextReplicaID); err != nil {
t.Fatalf("failed to look up replica %s", origDesc.NextReplicaID)
} else if a, e := rd.StoreID, mtc.stores[1].Ident.StoreID; a != e {
t.Fatalf("expected replica %s to point to store %s, but got %s", origDesc.NextReplicaID, a, e)
}
// Add to third store with fresh descriptor.
if err := addReplica(2, repl.Desc()); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
r := mtc.stores[2].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
return nil
})
}
// TestProgressWithDownNode verifies that a surviving quorum can make progress
// with a downed node.
func TestProgressWithDownNode(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2)
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Verify that the first increment propagates to all the engines.
verify := func(expected []int64) {
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), mtc.clock.Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(expected, values) {
return errors.Errorf("expected %v, got %v", expected, values)
}
return nil
})
}
verify([]int64{5, 5, 5})
// Stop one of the replicas and issue a new increment.
mtc.stopStore(1)
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// The new increment can be seen on both live replicas.
verify([]int64{16, 5, 16})
// Once the downed node is restarted, it will catch up.
mtc.restartStore(1)
verify([]int64{16, 16, 16})
}
func TestReplicateAddAndRemove(t *testing.T) {
defer leaktest.AfterTest(t)()
testFunc := func(addFirst bool) {
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
// Replicate the initial range to three of the four nodes.
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 3, 1)
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify := func(expected []int64) {
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), mtc.clock.Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(expected, values) {
return errors.Errorf("addFirst: %t, expected %v, got %v", addFirst, expected, values)
}
return nil
})
}
// The first increment is visible on all three replicas.
verify([]int64{5, 5, 0, 5})
// Stop a store and replace it.
mtc.stopStore(1)
if addFirst {
mtc.replicateRange(rangeID, 2)
mtc.unreplicateRange(rangeID, 1)
} else {
mtc.unreplicateRange(rangeID, 1)
mtc.replicateRange(rangeID, 2)
}
verify([]int64{5, 5, 5, 5})
// Ensure that the rest of the group can make progress.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify([]int64{16, 5, 16, 16})
// Bring the downed store back up (required for a clean shutdown).
mtc.restartStore(1)
// Node 1 never sees the increment that was added while it was
// down. Perform another increment on the live nodes to verify.
incArgs = incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify([]int64{39, 5, 39, 39})
// Wait out the leader lease and the unleased duration to make the replica GC'able.
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold + 1))
mtc.stores[1].ForceReplicaGCScanAndProcess()
// The removed store no longer has any of the data from the range.
verify([]int64{39, 0, 39, 39})
desc := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil).Desc()
replicaIDsByStore := map[roachpb.StoreID]roachpb.ReplicaID{}
for _, rep := range desc.Replicas {
replicaIDsByStore[rep.StoreID] = rep.ReplicaID
}
expected := map[roachpb.StoreID]roachpb.ReplicaID{1: 1, 4: 2, 3: 4}
if !reflect.DeepEqual(expected, replicaIDsByStore) {
t.Fatalf("expected replica IDs to be %v but got %v", expected, replicaIDsByStore)
}
}
// Run the test twice, once adding the replacement before removing
// the downed node, and once removing the downed node first.
testFunc(true)
testFunc(false)
}
// TestRaftHeartbeats verifies that coalesced heartbeats are correctly
// suppressing elections in an idle cluster.
func TestRaftHeartbeats(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
// Capture the initial term and state.
status := mtc.stores[0].RaftStatus(1)
initialTerm := status.Term
if status.SoftState.RaftState != raft.StateLeader {
t.Errorf("expected node 0 to initially be leader but was %s", status.SoftState.RaftState)
}
// Wait for several ticks to elapse.
time.Sleep(5 * mtc.makeContext(0).RaftTickInterval)
status = mtc.stores[0].RaftStatus(1)
if status.SoftState.RaftState != raft.StateLeader {
t.Errorf("expected node 0 to be leader after sleeping but was %s", status.SoftState.RaftState)
}
if status.Term != initialTerm {
t.Errorf("while sleeping, term changed from %d to %d", initialTerm, status.Term)
}
}
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key := roachpb.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11)
if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 1)
if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
util.SucceedsSoon(t, func() error {
getArgs := getArgs(key)
// Reading on non-leader replica should use inconsistent read
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
RangeID: rangeID2,
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(11), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestReplicaRemovalCampaign verifies that a new replica after a split can be
// transferred away/replaced without campaigning the old one.
func TestReplicaRemovalCampaign(t *testing.T) {
defer leaktest.AfterTest(t)()
testData := []struct {
remove bool
expectAdvance bool
}{
{ // Replica removed
remove: true,
expectAdvance: false,
},
{ // Default behavior
remove: false,
expectAdvance: true,
},
}
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key2 := roachpb.Key("z")
for i, td := range testData {
func() {
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Replicate range to enable raft campaigning.
mtc.replicateRange(rangeID, 1)
store0 := mtc.stores[0]
// Make the split.
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
replica2 := store0.LookupReplica(roachpb.RKey(key2), nil)
if td.remove {
// Simulate second replica being transferred by removing it.
if err := store0.RemoveReplica(replica2, *replica2.Desc(), true); err != nil {
t.Fatal(err)
}
}
var latestTerm uint64
if td.expectAdvance {
util.SucceedsSoon(t, func() error {
if raftStatus := replica2.RaftStatus(); raftStatus != nil {
if term := raftStatus.Term; term <= latestTerm {
return errors.Errorf("%d: raft term has not yet advanced: %d", i, term)
} else if latestTerm == 0 {
latestTerm = term
}
} else {
return errors.Errorf("%d: raft group is not yet initialized", i)
}
return nil
})
} else {
for start := timeutil.Now(); timeutil.Since(start) < time.Second; time.Sleep(10 * time.Millisecond) {
if raftStatus := replica2.RaftStatus(); raftStatus != nil {
if term := raftStatus.Term; term > latestTerm {
if latestTerm == 0 {
latestTerm = term
} else {
t.Errorf("%d: raft term unexpectedly advanced: %d", i, term)
break
}
}
}
}
}
}()
}
}
// TestRangeDescriptorSnapshotRace calls Snapshot() repeatedly while
// transactions are performed on the range descriptor.
func TestRangeDescriptorSnapshotRace(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 1)
defer mtc.Stop()
stopper := stop.NewStopper()
defer stopper.Stop()
// Call Snapshot() in a loop and ensure it never fails.
stopper.RunWorker(func() {
for {
select {
case <-stopper.ShouldStop():
return
default:
if rng := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil); rng == nil {
t.Fatal("failed to look up min range")
} else if _, err := rng.GetSnapshot(); err != nil {
t.Fatalf("failed to snapshot min range: %s", err)
}
if rng := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil); rng == nil {
t.Fatal("failed to look up max range")
} else if _, err := rng.GetSnapshot(); err != nil {
t.Fatalf("failed to snapshot max range: %s", err)
}
}
}
})
// Split the range repeatedly, carving chunks off the end of the
// initial range. The bug that this test was designed to find
// usually occurred within the first 5 iterations.
for i := 20; i > 0; i-- {
rng := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
if rng == nil {
t.Fatal("failed to look up min range")
}
desc := rng.Desc()
args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("A%03d", i)))
if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil {
t.Fatal(err)
}
}
// Split again, carving chunks off the beginning of the final range.
for i := 0; i < 20; i++ {
rng := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil)
if rng == nil {
t.Fatal("failed to look up max range")
}
desc := rng.Desc()
args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("B%03d", i)))
if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil {
t.Fatal(err)
}
}
}
// TestRaftAfterRemoveRange verifies that the raft state removes
// a remote node correctly after the Replica was removed from the Store.
func TestRaftAfterRemoveRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Make the split.
splitArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID := roachpb.RangeID(2)
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 2)
mtc.unreplicateRange(rangeID, 1)
// Wait for the removal to be processed.
util.SucceedsSoon(t, func() error {
_, err := mtc.stores[1].GetReplica(rangeID)
if _, ok := err.(*roachpb.RangeNotFoundError); ok {
return nil
} else if err != nil {
return err
}
return errors.Errorf("range still exists")
})
replica1 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[1].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[1].StoreID()),
StoreID: mtc.stores[1].StoreID(),
}
replica2 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[2].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[2].StoreID()),
StoreID: mtc.stores[2].StoreID(),
}
if err := mtc.transports[2].Send(&storage.RaftMessageRequest{
GroupID: 0,
ToReplica: replica1,
FromReplica: replica2,
Message: raftpb.Message{
From: uint64(replica2.ReplicaID),
To: uint64(replica1.ReplicaID),
Type: raftpb.MsgHeartbeat,
}}); err != nil {
t.Fatal(err)
}
// Execute another replica change to ensure that raft has processed
// the heartbeat just sent.
mtc.replicateRange(roachpb.RangeID(1), 1)
// Expire leases to ensure any remaining intent resolutions can complete.
// TODO(bdarnell): understand why some tests need this.
mtc.expireLeaderLeases()
}
// TestRaftRemoveRace adds and removes a replica repeatedly in an
// attempt to reproduce a race
// (https://github.com/cockroachdb/cockroach/issues/1911). Note that
// 10 repetitions is not enough to reliably reproduce the problem, but
// it's better than any other tests we have for this (increasing the
// number of repetitions adds an unacceptable amount of test runtime).
func TestRaftRemoveRace(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2)
for i := 0; i < 10; i++ {
mtc.unreplicateRange(rangeID, 2)
mtc.replicateRange(rangeID, 2)
}
}
// TestStoreRangeRemoveDead verifies that if a store becomes dead, the
// ReplicateQueue will notice and remove any replicas on it.
func TestStoreRangeRemoveDead(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
mtc.timeUntilStoreDead = storage.TestTimeUntilStoreDead
mtc.Start(t, 3)
defer mtc.Stop()
var sgs []*gossiputil.StoreGossiper
for _, g := range mtc.gossips {
sgs = append(sgs, gossiputil.NewStoreGossiper(g))
}
// Replicate the range to all stores.
replica := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
mtc.replicateRange(replica.RangeID, 1, 2)
// Initialize the gossip network.
var storeIDs []roachpb.StoreID
for _, s := range mtc.stores {
storeIDs = append(storeIDs, s.StoreID())
}
// Gossip all stores and wait for callbacks to be run. This is
// tricky since we have multiple gossip objects communicating
// asynchronously. We use StoreGossiper to track callbacks but we
// need to set up all the callback tracking before any stores are
// gossiped.
var readyWG sync.WaitGroup
var doneWG sync.WaitGroup
readyWG.Add(len(sgs))
doneWG.Add(len(sgs))
for _, sg := range sgs {
go func(sg *gossiputil.StoreGossiper) {
ready := false
sg.GossipWithFunction(storeIDs, func() {
if !ready {
readyWG.Done()
ready = true
}
})
doneWG.Done()
}(sg)
}
readyWG.Wait()
for _, s := range mtc.stores {
s.GossipStore()
}
doneWG.Wait()
rangeDesc := getRangeMetadata(roachpb.RKeyMin, mtc, t)
if e, a := 3, len(rangeDesc.Replicas); e != a {
t.Fatalf("expected %d replicas, only found %d, rangeDesc: %+v", e, a, rangeDesc)
}
// This can't use SucceedsSoon as using the backoff mechanic won't work
// as it requires a specific cadence of re-gossiping the alive stores to
// maintain their alive status.
tickerDur := storage.TestTimeUntilStoreDead / 2
ticker := time.NewTicker(tickerDur)
defer ticker.Stop()
maxTime := 5 * time.Second
maxTimeout := time.After(maxTime)
for len(getRangeMetadata(roachpb.RKeyMin, mtc, t).Replicas) > 2 {
select {
case <-maxTimeout:
t.Fatalf("Failed to remove the dead replica within %s", maxTime)
case <-ticker.C:
mtc.manualClock.Increment(int64(tickerDur))
// Keep gossiping the alive stores.
mtc.stores[0].GossipStore()
mtc.stores[1].GossipStore()
// Force the repair queues on all alive stores to run.
mtc.stores[0].ForceReplicationScanAndProcess()
mtc.stores[1].ForceReplicationScanAndProcess()
}
}
}
// TestStoreRangeRebalance verifies that the replication queue will take
// rebalancing opportunities and add a new replica on another store.
func TestStoreRangeRebalance(t *testing.T) {
defer leaktest.AfterTest(t)()
// Start multiTestContext with replica rebalancing enabled.
mtc := &multiTestContext{
storeContext: &storage.StoreContext{},
}
*mtc.storeContext = storage.TestStoreContext()
mtc.storeContext.AllocatorOptions = storage.AllocatorOptions{
AllowRebalance: true,
Deterministic: true,
}
// Four stores.
mtc.Start(t, 4)
defer mtc.Stop()
// Replicate the first range to the first three stores.
store0 := mtc.stores[0]
replica := store0.LookupReplica(roachpb.RKeyMin, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 1, 2)
// Initialize the gossip network with fake capacity data.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
desc.Capacity.RangeCount = 1
// Make sure store[1] is chosen as removal target.
if desc.StoreID == mtc.stores[1].StoreID() {
desc.Capacity.RangeCount = 4
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
// This can't use SucceedsSoon as using the exponential backoff mechanic
// won't work well with the forced replication scans.
maxTimeout := time.After(5 * time.Second)
succeeded := false
for !succeeded {
select {
case <-maxTimeout:
t.Fatal("Failed to rebalance replica within 5 seconds")
case <-time.After(10 * time.Millisecond):
// Look up the official range descriptor, make sure fourth store is on it.
rangeDesc := getRangeMetadata(roachpb.RKeyMin, mtc, t)
// Test if we have already succeeded.
for _, repl := range rangeDesc.Replicas {
if repl.StoreID == mtc.stores[3].StoreID() {
succeeded = true
}
}
if succeeded {
break
}
mtc.expireLeaderLeases()
mtc.stores[1].ForceReplicationScanAndProcess()
}
}
}
// TestReplicateRogueRemovedNode ensures that a rogue removed node
// (i.e. a node that has been removed from the range but doesn't know
// it yet because it was down or partitioned away when it happened)
// cannot cause other removed nodes to recreate their ranges.
func TestReplicateRogueRemovedNode(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// First put the range on all three nodes.
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1, 2)
// Put some data in the range so we'll have something to test for.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for all nodes to catch up.
mtc.waitForValues(roachpb.Key("a"), []int64{5, 5, 5})
// Stop node 2; while it is down remove the range from nodes 2 and 1.
mtc.stopStore(2)
mtc.unreplicateRange(raftID, 2)
mtc.unreplicateRange(raftID, 1)
// Make a write on node 0; this will not be replicated because 0 is the only node left.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for the replica to be GC'd on node 1.
// Store 0 has two writes, 1 has erased everything, and 2 still has the first write.
// A single pass of ForceReplicaGCScanAndProcess is not enough, since the replica
// may be recreated by a stray raft message, so we run the GC scan inside the loop.
// TODO(bdarnell): if the call to RemoveReplica in replicaGCQueue.process can be
// moved under the lock, then the GC scan can be moved out of this loop.
util.SucceedsSoon(t, func() error {
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold) + 1)
mtc.stores[1].ForceReplicaGCScanAndProcess()
actual := mtc.readIntFromEngines(roachpb.Key("a"))
expected := []int64{16, 0, 5}
if !reflect.DeepEqual(expected, actual) {
return errors.Errorf("expected %v, got %v", expected, actual)
}
return nil
})
// Bring node 2 back up.
mtc.restartStore(2)
// Try to issue a command on node 2. It should not be able to commit
// (so we add it asynchronously).
var startWG sync.WaitGroup
startWG.Add(1)
var finishWG sync.WaitGroup
finishWG.Add(1)
go func() {
rng, err := mtc.stores[2].GetReplica(raftID)
if err != nil {
t.Fatal(err)
}
incArgs := incrementArgs([]byte("a"), 23)
startWG.Done()
defer finishWG.Done()
if _, err := client.SendWrappedWith(rng, nil, roachpb.Header{Timestamp: mtc.stores[2].Clock().Now()}, &incArgs); err == nil {
t.Fatal("expected error during shutdown")
}
}()
startWG.Wait()
// Sleep a bit to let the command proposed on node 2 proceed if it's
// going to. Prior to the introduction of replica tombstones, this
// would lead to split-brain: Node 2 would wake up node 1 and they
// would form a quorum, even though node 0 had removed them both.
// Now the tombstone on node 1 prevents it from rejoining the rogue
// copy of the group.
time.Sleep(100 * time.Millisecond)
util.SucceedsSoon(t, func() error {
actual := mtc.readIntFromEngines(roachpb.Key("a"))
// Normally, replica GC has not happened yet on store 2, so we
// expect {16, 0, 5}. However, it is possible (on a
// slow/overloaded machine) for the end of the ChangeReplicas
// transaction to be queued up inside the raft transport for long
// enough that it doesn't arrive until after store 2 has been
// restarted, so it is able to trigger an early GC on the
// restarted node, resulting in {16, 0, 0}.
// TODO(bdarnell): When #5789 is fixed, the probabilities flip and
// {16, 0, 0} becomes the expected case. When this happens
// we should just combine this check with the following one.
expected1 := []int64{16, 0, 5}
expected2 := []int64{16, 0, 0}
if !reflect.DeepEqual(expected1, actual) && !reflect.DeepEqual(expected2, actual) {
return errors.Errorf("expected %v or %v, got %v", expected1, expected2, actual)
}
return nil
})
// Run garbage collection on node 2. The lack of an active leader
// lease will cause GC to do a consistent range lookup, where it
// will see that the range has been moved and delete the old
// replica.
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold) + 1)
mtc.stores[2].ForceReplicaGCScanAndProcess()
mtc.waitForValues(roachpb.Key("a"), []int64{16, 0, 0})
// Now that the group has been GC'd, the goroutine that was
// attempting to write has finished (with an error).
finishWG.Wait()
}
func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
// Move the first range from the first node to the other three.
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2, 3)
mtc.unreplicateRange(rangeID, 0)
mtc.expireLeaderLeases()
// Write on the second node, to ensure that the other nodes have
// established leadership after the first node's removal.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(mtc.distSenders[1], nil, &incArgs); err != nil {
t.Fatal(err)
}
// Save the current term, which is the latest among the live stores.
findTerm := func() uint64 {
var term uint64
for i := 1; i < 4; i++ {
s := mtc.stores[i].RaftStatus(rangeID)
if s.Term > term {
term = s.Term
}
}
return term
}
term := findTerm()
if term == 0 {
t.Fatalf("expected non-zero term")
}
replica0 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[0].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[0].StoreID()),
StoreID: mtc.stores[0].StoreID(),
}
replica1 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[1].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[1].StoreID()),
StoreID: mtc.stores[1].StoreID(),
}
// Simulate an election triggered by the removed node.
if err := mtc.transports[0].Send(&storage.RaftMessageRequest{
GroupID: rangeID,
ToReplica: replica1,
FromReplica: replica0,
Message: raftpb.Message{
From: uint64(replica0.ReplicaID),
To: uint64(replica1.ReplicaID),
Type: raftpb.MsgVote,
Term: term + 1,
},
}); err != nil {
t.Fatal(err)
}
// Wait a bit for the message to be processed.
// TODO(bdarnell): This will be easier to test without waiting
// when #5789 is done.
time.Sleep(10 * time.Millisecond)
// The message should have been discarded without triggering an
// election or changing the term.
newTerm := findTerm()
if term != newTerm {
t.Errorf("expected term to be constant, but changed from %v to %v", term, newTerm)
}
}
func TestReplicateReAddAfterDown(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// First put the range on all three nodes.
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1, 2)
// Put some data in the range so we'll have something to test for.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for all nodes to catch up.
mtc.waitForValues(roachpb.Key("a"), []int64{5, 5, 5})
// Stop node 2; while it is down remove the range from it. Since the node is
// down it won't see the removal and clean up its replica.
mtc.stopStore(2)
mtc.unreplicateRange(raftID, 2)
// Perform another write.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 5})
// Bring it back up and re-add the range. There is a race when the
// store applies its removal and re-addition back to back: the
// replica may or may not have (asynchronously) garbage collected
// its data in between. Whether the existing data is reused or the
// replica gets recreated, the replica ID is changed by this
// process. An ill-timed GC has been known to cause bugs including
// https://github.com/cockroachdb/cockroach/issues/2873.
mtc.restartStore(2)
mtc.replicateRange(raftID, 2)
// The range should be synced back up.
mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 16})
}
// TestLeaderRemoveSelf verifies that a leader can remove itself
// without panicking and future access to the range returns a
// RangeNotFoundError (not RaftGroupDeletedError, and even before
// the ReplicaGCQueue has run).
func TestLeaderRemoveSelf(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the replica GC queue. This verifies that the replica is
// considered removed even before the gc queue has run, and also
// helps avoid a deadlock at shutdown.
mtc.stores[0].DisableReplicaGCQueue(true)
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1)
// Remove the replica from first store.
mtc.unreplicateRange(raftID, 0)
getArgs := getArgs([]byte("a"))
// Force the read command request a new lease.
clock := mtc.clocks[0]
header := roachpb.Header{}
header.Timestamp = clock.Update(clock.Now().Add(
storage.LeaderLeaseExpiration(mtc.stores[0], clock), 0))
// Expect get a RangeNotFoundError.
_, pErr := client.SendWrappedWith(rg1(mtc.stores[0]), nil, header, &getArgs)
if _, ok := pErr.GetDetail().(*roachpb.RangeNotFoundError); !ok {
t.Fatalf("expect get RangeNotFoundError, actual get %v ", pErr)
}
}
// TestRemoveRangeWithoutGC ensures that we do not panic when a
// replica has been removed but not yet GC'd (and therefore
// does not have an active raft group).
func TestRemoveRangeWithoutGC(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the GC queue and move the range from store 0 to 1.
mtc.stores[0].DisableReplicaGCQueue(true)
const rangeID roachpb.RangeID = 1
mtc.replicateRange(rangeID, 1)
mtc.unreplicateRange(rangeID, 0)
// Wait for store 0 to process the removal.
util.SucceedsSoon(t, func() error {
rep, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
desc := rep.Desc()
if len(desc.Replicas) != 1 {
return errors.Errorf("range has %d replicas", len(desc.Replicas))
}
return nil
})
// The replica's data is still on disk even though the Replica
// object is removed.
var desc roachpb.RangeDescriptor
descKey := keys.RangeDescriptorKey(roachpb.RKeyMin)
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatal("expected range descriptor to be present")
}
// Stop and restart the store to reset the replica's raftGroup
// pointer to nil. As long as the store has not been restarted it
// can continue to use its last known replica ID.
mtc.stopStore(0)
mtc.restartStore(0)
// Turn off the GC queue to ensure that the replica is deleted at
// startup instead of by the scanner. This is not 100% guaranteed
// since the scanner could have already run at this point, but it
// should be enough to prevent us from accidentally relying on the
// scanner.
mtc.stores[0].DisableReplicaGCQueue(true)
// The Replica object is not recreated.
if _, err := mtc.stores[0].GetReplica(rangeID); err == nil {
t.Fatalf("expected replica to be missing")
}
// And the data is no longer on disk.
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if ok {
t.Fatal("expected range descriptor to be absent")
}
}
// TestCheckConsistencyMultiStore creates a Db with three stores ]
// with three way replication. A value is added to the Db, and a
// consistency check is run.
func TestCheckConsistencyMultiStore(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
// Write something to the DB.
putArgs := putArgs([]byte("a"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &putArgs); err != nil {
t.Fatal(err)
}
// Run consistency check.
checkArgs := roachpb.CheckConsistencyRequest{
Span: roachpb.Span{
// span of keys that include "a".
Key: []byte("a"),
EndKey: []byte("aa"),
},
}
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{
Timestamp: mtc.stores[0].Clock().Now(),
}, &checkArgs); err != nil {
t.Fatal(err)
}
}
func TestCheckInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
// Write something to the DB.
pArgs := putArgs([]byte("a"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("c"), []byte("d"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Write some arbitrary data only to store 1. Inconsistent key "e"!
key := []byte("e")
var val roachpb.Value
val.SetInt(42)
timestamp := mtc.stores[1].Clock().Timestamp()
if err := engine.MVCCPut(context.Background(), mtc.stores[1].Engine(), nil, key, timestamp, val, nil); err != nil {
t.Fatal(err)
}
// The consistency check will panic on store 1.
notify := make(chan struct{}, 1)
mtc.stores[1].TestingKnobs().BadChecksumPanic = func(diff []storage.ReplicaSnapshotDiff) {
if len(diff) != 1 {
t.Errorf("diff length = %d, diff = %v", len(diff), diff)
}
d := diff[0]
if d.Leader != false || !bytes.Equal([]byte("e"), d.Key) || !timestamp.Equal(d.Timestamp) {
t.Errorf("diff = %v", d)
}
notify <- struct{}{}
}
// Run consistency check.
checkArgs := roachpb.CheckConsistencyRequest{
Span: roachpb.Span{
// span of keys that include "a" & "c".
Key: []byte("a"),
EndKey: []byte("z"),
},
}
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &checkArgs); err != nil {
t.Fatal(err)
}
select {
case <-notify:
case <-time.After(5 * time.Second):
t.Fatal("didn't receive notification from VerifyChecksum() that should have panicked")
}
}
func TestTransferRaftLeadership(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
status := rng.RaftStatus()
if status.Lead != 1 {
t.Fatalf("raft leader should be 1, but got %v", status.Lead)
}
mtc.expireLeaderLeases()
// Force the read command request a new lease.
getArgs := getArgs([]byte("a"))
_, pErr := client.SendWrapped(rg1(mtc.stores[1]), nil, &getArgs)
if pErr != nil {
t.Fatalf("expect get nil, actual get %v ", pErr)
}
// Wait for raft leadership transferring to be finished.
util.SucceedsSoon(t, func() error {
status = rng.RaftStatus()
if status.Lead != 2 {
return errors.Errorf("expected raft leader be 2; got %d", status.Lead)
}
return nil
})
}
storage: deflake TestStoreRangeRemoveDead
We were too fancy with the test. IIRC, we couldn't just gossip stores when I
originally wrote it and we needed to use the store gossiper. But now we can
just rely on stores gossiping themselves.
This test will fail during a makestress with a TESTFLAGS="-test.timeout 5s".
This was caused by calling GossipWithFunction without actually gossiping in the
passed in function itself. So it would wait until the gossipStores loop would
occur to actually make progress within the gossipWithFunc. If any of the
gorountines were slow to call GossipWithFunction, it might miss the gossiped
store descriptor from the gossip loop. After one minute, the store will gossip
itself, causing the test to proceed (but possibly timing out on circle). This
test could be fixed by repeating the store gossiping loop or by actually
gossiping the stores inside the gossipWithFunc call. But as mentioned, this
storeGossiper setup is unneeded entirely.
Fixes #6319
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Ben Darnell
package storage_test
import (
"bytes"
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/testutils/gossiputil"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/stop"
"github.com/cockroachdb/cockroach/util/timeutil"
)
// mustGetInt decodes an int64 value from the bytes field of the receiver
// and panics if the bytes field is not 0 or 8 bytes in length.
func mustGetInt(v *roachpb.Value) int64 {
if v == nil {
return 0
}
i, err := v.GetInt()
if err != nil {
panic(err)
}
return i
}
// TestStoreRecoverFromEngine verifies that the store recovers all ranges and their contents
// after being stopped and recreated.
func TestStoreRecoverFromEngine(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key1 := roachpb.Key("a")
key2 := roachpb.Key("z")
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
var rangeID2 roachpb.RangeID
get := func(store *storage.Store, rangeID roachpb.RangeID, key roachpb.Key) int64 {
args := getArgs(key)
resp, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &args)
if err != nil {
t.Fatal(err)
}
return mustGetInt(resp.(*roachpb.GetResponse).Value)
}
validate := func(store *storage.Store) {
if val := get(store, rangeID, key1); val != 13 {
t.Errorf("key %q: expected 13 but got %v", key1, val)
}
if val := get(store, rangeID2, key2); val != 28 {
t.Errorf("key %q: expected 28 but got %v", key2, val)
}
}
// First, populate the store with data across two ranges. Each range contains commands
// that both predate and postdate the split.
func() {
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithEngine(t, eng, clock, true, nil, stopper)
increment := func(rangeID roachpb.RangeID, key roachpb.Key, value int64) (*roachpb.IncrementResponse, *roachpb.Error) {
args := incrementArgs(key, value)
resp, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &args)
incResp, _ := resp.(*roachpb.IncrementResponse)
return incResp, err
}
if _, err := increment(rangeID, key1, 2); err != nil {
t.Fatal(err)
}
if _, err := increment(rangeID, key2, 5); err != nil {
t.Fatal(err)
}
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 = store.LookupReplica(roachpb.RKey(key2), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
if _, err := increment(rangeID, key1, 11); err != nil {
t.Fatal(err)
}
if _, err := increment(rangeID2, key2, 23); err != nil {
t.Fatal(err)
}
validate(store)
}()
// Now create a new store with the same engine and make sure the expected data is present.
// We must use the same clock because a newly-created manual clock will be behind the one
// we wrote with and so will see stale MVCC data.
store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)
// Raft processing is initialized lazily; issue a no-op write request on each key to
// ensure that is has been started.
incArgs := incrementArgs(key1, 0)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
t.Fatal(err)
}
incArgs = incrementArgs(key2, 0)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
validate(store)
}
// TestStoreRecoverWithErrors verifies that even commands that fail are marked as
// applied so they are not retried after recovery.
func TestStoreRecoverWithErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
numIncrements := 0
func() {
stopper := stop.NewStopper()
defer stopper.Stop()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
_, ok := filterArgs.Req.(*roachpb.IncrementRequest)
if ok && filterArgs.Req.Header().Key.Equal(roachpb.Key("a")) {
numIncrements++
}
return nil
}
store := createTestStoreWithEngine(t, eng, clock, true, &sCtx, stopper)
// Write a bytes value so the increment will fail.
putArgs := putArgs(roachpb.Key("a"), []byte("asdf"))
if _, err := client.SendWrapped(rg1(store), nil, &putArgs); err != nil {
t.Fatal(err)
}
// Try and fail to increment the key. It is important for this test that the
// failure be the last thing in the raft log when the store is stopped.
incArgs := incrementArgs(roachpb.Key("a"), 42)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err == nil {
t.Fatal("did not get expected error")
}
}()
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
// Recover from the engine.
store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)
// Issue a no-op write to lazily initialize raft on the range.
incArgs := incrementArgs(roachpb.Key("b"), 0)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
t.Fatal(err)
}
// No additional increments were performed on key A during recovery.
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
}
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatalf("range descriptor key %s was not found", key)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsSoon(t, func() error {
meta2, err := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
if err != nil {
t.Fatal(err)
}
meta1, err := keys.Addr(keys.RangeMetaKey(meta2))
if err != nil {
t.Fatal(err)
}
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); err != nil {
return err
} else if !ok {
return errors.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return errors.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestRestoreReplicas ensures that consensus group membership is properly
// persisted to disk and restored when a node is stopped and restarted.
func TestRestoreReplicas(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
firstRng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
// Perform an increment before replication to ensure that commands are not
// repeated on restarts.
incArgs := incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
if err := firstRng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, firstRng.Desc()); err != nil {
t.Fatal(err)
}
// TODO(bdarnell): use the stopper.Quiesce() method. The problem
// right now is that raft isn't creating a task for high-level work
// it's creating while snapshotting and catching up. Ideally we'll
// be able to capture that and then can just invoke
// mtc.stopper.Quiesce() here.
// TODO(bdarnell): initial creation and replication needs to be atomic;
// cutting off the process too soon currently results in a corrupted range.
time.Sleep(500 * time.Millisecond)
mtc.restart()
// Send a command on each store. The original store (the leader still)
// will succeed.
incArgs = incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// The follower will return a not leader error, indicating the command
// should be forwarded to the leader.
incArgs = incrementArgs([]byte("a"), 11)
{
_, pErr := client.SendWrapped(rg1(mtc.stores[1]), nil, &incArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaderError); !ok {
t.Fatalf("expected not leader error; got %s", pErr)
}
}
// Send again, this time to first store.
if _, pErr := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(39), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
// Both replicas have a complete list in Desc.Replicas
for i, store := range mtc.stores {
rng, err := store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
desc := rng.Desc()
if len(desc.Replicas) != 2 {
t.Fatalf("store %d: expected 2 replicas, found %d", i, len(desc.Replicas))
}
if desc.Replicas[0].NodeID != mtc.stores[0].Ident.NodeID {
t.Errorf("store %d: expected replica[0].NodeID == %d, was %d",
i, mtc.stores[0].Ident.NodeID, desc.Replicas[0].NodeID)
}
}
}
func TestFailedReplicaChange(t *testing.T) {
defer leaktest.AfterTest(t)()
var runFilter atomic.Value
runFilter.Store(true)
ctx := storage.TestStoreContext()
mtc := &multiTestContext{}
mtc.storeContext = &ctx
mtc.storeContext.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if runFilter.Load().(bool) {
if et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest); ok && et.Commit {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
}
return nil
}
mtc.Start(t, 2)
defer mtc.Stop()
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc(),
); !testutils.IsError(err, "boom") {
t.Fatalf("did not get expected error: %v", err)
}
// After the aborted transaction, r.Desc was not updated.
// TODO(bdarnell): expose and inspect raft's internal state.
if len(rng.Desc().Replicas) != 1 {
t.Fatalf("expected 1 replica, found %d", len(rng.Desc().Replicas))
}
// The pending config change flag was cleared, so a subsequent attempt
// can succeed.
runFilter.Store(false)
// The first failed replica change has laid down intents. Make sure those
// are pushable by making the transaction abandoned.
mtc.manualClock.Increment(10 * base.DefaultHeartbeatInterval.Nanoseconds())
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc(),
); err != nil {
t.Fatal(err)
}
// Wait for the range to sync to both replicas (mainly so leaktest doesn't
// complain about goroutines involved in the process).
util.SucceedsSoon(t, func() error {
for _, store := range mtc.stores {
rang, err := store.GetReplica(1)
if err != nil {
return err
}
if lr := len(rang.Desc().Replicas); lr <= 1 {
return errors.Errorf("expected > 1 replicas; got %d", lr)
}
}
return nil
})
}
// We can truncate the old log entries and a new replica will be brought up from a snapshot.
func TestReplicateAfterTruncation(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Get that command's log index.
index, err := rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
// Truncate the log at index+1 (log entries < N are removed, so this includes
// the increment).
truncArgs := truncateLogArgs(index + 1)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &truncArgs); err != nil {
t.Fatal(err)
}
// Issue a second command post-truncation.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Once it catches up, the effects of both commands can be seen.
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(16), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
rng2, err := mtc.stores[1].GetReplica(1)
if err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
if mvcc, mvcc2 := rng.GetMVCCStats(), rng2.GetMVCCStats(); mvcc2 != mvcc {
return errors.Errorf("expected stats on new range:\n%+v\nto equal old:\n%+v", mvcc2, mvcc)
}
return nil
})
// Send a third command to verify that the log states are synced up so the
// new node can accept new commands.
incArgs = incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(39), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
util.SucceedsSoon(t, func() error {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
}
return nil
})
}
// getRangeMetadata retrieves the current range descriptor for the target
// range.
func getRangeMetadata(key roachpb.RKey, mtc *multiTestContext, t *testing.T) roachpb.RangeDescriptor {
// Calls to RangeLookup typically use inconsistent reads, but we
// want to do a consistent read here. This is important when we are
// considering one of the metadata ranges: we must not do an
// inconsistent lookup in our own copy of the range.
b := &client.Batch{}
b.AddRawRequest(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(key),
},
MaxRanges: 1,
})
var reply *roachpb.RangeLookupResponse
if err := mtc.dbs[0].Run(b); err != nil {
t.Fatalf("error getting range metadata: %s", err)
} else {
reply = b.RawResponse().Responses[0].GetInner().(*roachpb.RangeLookupResponse)
}
if a, e := len(reply.Ranges), 1; a != e {
t.Fatalf("expected %d range descriptor, got %d", e, a)
}
return reply.Ranges[0]
}
// TestUnreplicateFirstRange verifies that multiTestContext still functions in
// the case where the first range (which contains range metadata) is
// unreplicated from the first store. This situation can arise occasionally in
// tests, as can a similar situation where the first store is no longer the leader of
// the first range; this verifies that those tests will not be affected.
func TestUnreplicateFirstRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
// Replicate the range to store 1.
mtc.replicateRange(rangeID, 1)
// Unreplicate the from from store 0.
mtc.unreplicateRange(rangeID, 0)
// Replicate the range to store 2. The first range is no longer available on
// store 1, and this command will fail if that situation is not properly
// supported.
mtc.replicateRange(rangeID, 2)
}
// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 5)
defer mtc.Stop()
store0 := mtc.stores[0]
// Split off a range from the initial range for testing; there are
// complications if the metadata ranges are removed from store 1, this
// simplifies the test.
splitKey := roachpb.Key("m")
rightKey := roachpb.Key("z")
{
replica := store0.LookupReplica(roachpb.RKeyMin, nil)
mtc.replicateRange(replica.RangeID, 1, 2)
desc := replica.Desc()
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, err := replica.AdminSplit(context.Background(), splitArgs, desc); err != nil {
t.Fatal(err)
}
}
// Replicate the new range to all five stores.
rightKeyAddr, err := keys.Addr(rightKey)
if err != nil {
t.Fatal(err)
}
replica := store0.LookupReplica(rightKeyAddr, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 3, 4)
// Initialize the gossip network.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
maxTimeout := time.After(10 * time.Second)
succeeded := false
for !succeeded {
select {
case <-maxTimeout:
t.Fatalf("Failed to achieve proper replication within 10 seconds")
case <-time.After(10 * time.Millisecond):
mtc.expireLeaderLeases()
rangeDesc := getRangeMetadata(rightKeyAddr, mtc, t)
if count := len(rangeDesc.Replicas); count < 3 {
t.Fatalf("Removed too many replicas; expected at least 3 replicas, found %d", count)
} else if count == 3 {
succeeded = true
break
}
// Run replication scans on every store; only the store with the
// leader lease will actually do anything. If we did not wait
// for the scan to complete here it could be interrupted by the
// next call to expireLeaderLeases.
for _, store := range mtc.stores {
store.ForceReplicationScanAndProcess()
}
}
}
// Expire leader leases one more time, so that any remaining resolutions can
// get a leader lease.
// TODO(bdarnell): understand why some tests need this.
mtc.expireLeaderLeases()
}
// TestChangeReplicasDuplicateError tests that a replica change aborts if
// another change has been made to the RangeDescriptor since it was initiated.
func TestChangeReplicasDescriptorInvariant(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
repl, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
addReplica := func(storeNum int, desc *roachpb.RangeDescriptor) error {
return repl.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[storeNum].Ident.NodeID,
StoreID: mtc.stores[storeNum].Ident.StoreID,
},
desc,
)
}
// Retain the descriptor for the range at this point.
origDesc := repl.Desc()
// Add replica to the second store, which should succeed.
if err := addReplica(1, origDesc); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
r := mtc.stores[1].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
return nil
})
// Attempt to add replica to the third store with the original descriptor.
// This should fail because the descriptor is stale.
if err := addReplica(2, origDesc); !testutils.IsError(err, `change replicas of \d+ failed`) {
t.Fatalf("got unexpected error: %v", err)
}
// Both addReplica calls attempted to use origDesc.NextReplicaID.
// The failed second call should not have overwritten the cached
// replica descriptor from the successful first call.
if rd, err := mtc.stores[0].ReplicaDescriptor(origDesc.RangeID, origDesc.NextReplicaID); err != nil {
t.Fatalf("failed to look up replica %s", origDesc.NextReplicaID)
} else if a, e := rd.StoreID, mtc.stores[1].Ident.StoreID; a != e {
t.Fatalf("expected replica %s to point to store %s, but got %s", origDesc.NextReplicaID, a, e)
}
// Add to third store with fresh descriptor.
if err := addReplica(2, repl.Desc()); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
r := mtc.stores[2].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return errors.Errorf("expected replica for keys \"a\" - \"b\"")
}
return nil
})
}
// TestProgressWithDownNode verifies that a surviving quorum can make progress
// with a downed node.
func TestProgressWithDownNode(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2)
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Verify that the first increment propagates to all the engines.
verify := func(expected []int64) {
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), mtc.clock.Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(expected, values) {
return errors.Errorf("expected %v, got %v", expected, values)
}
return nil
})
}
verify([]int64{5, 5, 5})
// Stop one of the replicas and issue a new increment.
mtc.stopStore(1)
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// The new increment can be seen on both live replicas.
verify([]int64{16, 5, 16})
// Once the downed node is restarted, it will catch up.
mtc.restartStore(1)
verify([]int64{16, 16, 16})
}
func TestReplicateAddAndRemove(t *testing.T) {
defer leaktest.AfterTest(t)()
testFunc := func(addFirst bool) {
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
// Replicate the initial range to three of the four nodes.
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 3, 1)
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify := func(expected []int64) {
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), mtc.clock.Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(expected, values) {
return errors.Errorf("addFirst: %t, expected %v, got %v", addFirst, expected, values)
}
return nil
})
}
// The first increment is visible on all three replicas.
verify([]int64{5, 5, 0, 5})
// Stop a store and replace it.
mtc.stopStore(1)
if addFirst {
mtc.replicateRange(rangeID, 2)
mtc.unreplicateRange(rangeID, 1)
} else {
mtc.unreplicateRange(rangeID, 1)
mtc.replicateRange(rangeID, 2)
}
verify([]int64{5, 5, 5, 5})
// Ensure that the rest of the group can make progress.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify([]int64{16, 5, 16, 16})
// Bring the downed store back up (required for a clean shutdown).
mtc.restartStore(1)
// Node 1 never sees the increment that was added while it was
// down. Perform another increment on the live nodes to verify.
incArgs = incrementArgs([]byte("a"), 23)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
verify([]int64{39, 5, 39, 39})
// Wait out the leader lease and the unleased duration to make the replica GC'able.
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold + 1))
mtc.stores[1].ForceReplicaGCScanAndProcess()
// The removed store no longer has any of the data from the range.
verify([]int64{39, 0, 39, 39})
desc := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil).Desc()
replicaIDsByStore := map[roachpb.StoreID]roachpb.ReplicaID{}
for _, rep := range desc.Replicas {
replicaIDsByStore[rep.StoreID] = rep.ReplicaID
}
expected := map[roachpb.StoreID]roachpb.ReplicaID{1: 1, 4: 2, 3: 4}
if !reflect.DeepEqual(expected, replicaIDsByStore) {
t.Fatalf("expected replica IDs to be %v but got %v", expected, replicaIDsByStore)
}
}
// Run the test twice, once adding the replacement before removing
// the downed node, and once removing the downed node first.
testFunc(true)
testFunc(false)
}
// TestRaftHeartbeats verifies that coalesced heartbeats are correctly
// suppressing elections in an idle cluster.
func TestRaftHeartbeats(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
// Capture the initial term and state.
status := mtc.stores[0].RaftStatus(1)
initialTerm := status.Term
if status.SoftState.RaftState != raft.StateLeader {
t.Errorf("expected node 0 to initially be leader but was %s", status.SoftState.RaftState)
}
// Wait for several ticks to elapse.
time.Sleep(5 * mtc.makeContext(0).RaftTickInterval)
status = mtc.stores[0].RaftStatus(1)
if status.SoftState.RaftState != raft.StateLeader {
t.Errorf("expected node 0 to be leader after sleeping but was %s", status.SoftState.RaftState)
}
if status.Term != initialTerm {
t.Errorf("while sleeping, term changed from %d to %d", initialTerm, status.Term)
}
}
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key := roachpb.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11)
if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 1)
if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
util.SucceedsSoon(t, func() error {
getArgs := getArgs(key)
// Reading on non-leader replica should use inconsistent read
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
RangeID: rangeID2,
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return errors.Errorf("failed to read data: %s", err)
} else if e, v := int64(11), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return errors.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
// TestReplicaRemovalCampaign verifies that a new replica after a split can be
// transferred away/replaced without campaigning the old one.
func TestReplicaRemovalCampaign(t *testing.T) {
defer leaktest.AfterTest(t)()
testData := []struct {
remove bool
expectAdvance bool
}{
{ // Replica removed
remove: true,
expectAdvance: false,
},
{ // Default behavior
remove: false,
expectAdvance: true,
},
}
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key2 := roachpb.Key("z")
for i, td := range testData {
func() {
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Replicate range to enable raft campaigning.
mtc.replicateRange(rangeID, 1)
store0 := mtc.stores[0]
// Make the split.
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
replica2 := store0.LookupReplica(roachpb.RKey(key2), nil)
if td.remove {
// Simulate second replica being transferred by removing it.
if err := store0.RemoveReplica(replica2, *replica2.Desc(), true); err != nil {
t.Fatal(err)
}
}
var latestTerm uint64
if td.expectAdvance {
util.SucceedsSoon(t, func() error {
if raftStatus := replica2.RaftStatus(); raftStatus != nil {
if term := raftStatus.Term; term <= latestTerm {
return errors.Errorf("%d: raft term has not yet advanced: %d", i, term)
} else if latestTerm == 0 {
latestTerm = term
}
} else {
return errors.Errorf("%d: raft group is not yet initialized", i)
}
return nil
})
} else {
for start := timeutil.Now(); timeutil.Since(start) < time.Second; time.Sleep(10 * time.Millisecond) {
if raftStatus := replica2.RaftStatus(); raftStatus != nil {
if term := raftStatus.Term; term > latestTerm {
if latestTerm == 0 {
latestTerm = term
} else {
t.Errorf("%d: raft term unexpectedly advanced: %d", i, term)
break
}
}
}
}
}
}()
}
}
// TestRangeDescriptorSnapshotRace calls Snapshot() repeatedly while
// transactions are performed on the range descriptor.
func TestRangeDescriptorSnapshotRace(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 1)
defer mtc.Stop()
stopper := stop.NewStopper()
defer stopper.Stop()
// Call Snapshot() in a loop and ensure it never fails.
stopper.RunWorker(func() {
for {
select {
case <-stopper.ShouldStop():
return
default:
if rng := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil); rng == nil {
t.Fatal("failed to look up min range")
} else if _, err := rng.GetSnapshot(); err != nil {
t.Fatalf("failed to snapshot min range: %s", err)
}
if rng := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil); rng == nil {
t.Fatal("failed to look up max range")
} else if _, err := rng.GetSnapshot(); err != nil {
t.Fatalf("failed to snapshot max range: %s", err)
}
}
}
})
// Split the range repeatedly, carving chunks off the end of the
// initial range. The bug that this test was designed to find
// usually occurred within the first 5 iterations.
for i := 20; i > 0; i-- {
rng := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
if rng == nil {
t.Fatal("failed to look up min range")
}
desc := rng.Desc()
args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("A%03d", i)))
if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil {
t.Fatal(err)
}
}
// Split again, carving chunks off the beginning of the final range.
for i := 0; i < 20; i++ {
rng := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil)
if rng == nil {
t.Fatal("failed to look up max range")
}
desc := rng.Desc()
args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("B%03d", i)))
if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil {
t.Fatal(err)
}
}
}
// TestRaftAfterRemoveRange verifies that the raft state removes
// a remote node correctly after the Replica was removed from the Store.
func TestRaftAfterRemoveRange(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Make the split.
splitArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID := roachpb.RangeID(2)
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 2)
mtc.unreplicateRange(rangeID, 1)
// Wait for the removal to be processed.
util.SucceedsSoon(t, func() error {
_, err := mtc.stores[1].GetReplica(rangeID)
if _, ok := err.(*roachpb.RangeNotFoundError); ok {
return nil
} else if err != nil {
return err
}
return errors.Errorf("range still exists")
})
replica1 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[1].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[1].StoreID()),
StoreID: mtc.stores[1].StoreID(),
}
replica2 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[2].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[2].StoreID()),
StoreID: mtc.stores[2].StoreID(),
}
if err := mtc.transports[2].Send(&storage.RaftMessageRequest{
GroupID: 0,
ToReplica: replica1,
FromReplica: replica2,
Message: raftpb.Message{
From: uint64(replica2.ReplicaID),
To: uint64(replica1.ReplicaID),
Type: raftpb.MsgHeartbeat,
}}); err != nil {
t.Fatal(err)
}
// Execute another replica change to ensure that raft has processed
// the heartbeat just sent.
mtc.replicateRange(roachpb.RangeID(1), 1)
// Expire leases to ensure any remaining intent resolutions can complete.
// TODO(bdarnell): understand why some tests need this.
mtc.expireLeaderLeases()
}
// TestRaftRemoveRace adds and removes a replica repeatedly in an
// attempt to reproduce a race
// (https://github.com/cockroachdb/cockroach/issues/1911). Note that
// 10 repetitions is not enough to reliably reproduce the problem, but
// it's better than any other tests we have for this (increasing the
// number of repetitions adds an unacceptable amount of test runtime).
func TestRaftRemoveRace(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2)
for i := 0; i < 10; i++ {
mtc.unreplicateRange(rangeID, 2)
mtc.replicateRange(rangeID, 2)
}
}
// TestStoreRangeRemoveDead verifies that if a store becomes dead, the
// ReplicateQueue will notice and remove any replicas on it.
func TestStoreRangeRemoveDead(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
mtc.timeUntilStoreDead = storage.TestTimeUntilStoreDead
mtc.Start(t, 3)
defer mtc.Stop()
// Replicate the range to all stores.
replica := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
mtc.replicateRange(replica.RangeID, 1, 2)
for _, s := range mtc.stores {
s.GossipStore()
}
rangeDesc := getRangeMetadata(roachpb.RKeyMin, mtc, t)
if e, a := 3, len(rangeDesc.Replicas); e != a {
t.Fatalf("expected %d replicas, only found %d, rangeDesc: %+v", e, a, rangeDesc)
}
// This can't use SucceedsSoon as using the backoff mechanic won't work
// as it requires a specific cadence of re-gossiping the alive stores to
// maintain their alive status.
tickerDur := storage.TestTimeUntilStoreDead / 2
ticker := time.NewTicker(tickerDur)
defer ticker.Stop()
maxTime := 5 * time.Second
maxTimeout := time.After(maxTime)
for len(getRangeMetadata(roachpb.RKeyMin, mtc, t).Replicas) > 2 {
select {
case <-maxTimeout:
t.Fatalf("Failed to remove the dead replica within %s", maxTime)
case <-ticker.C:
mtc.manualClock.Increment(int64(tickerDur))
// Keep gossiping the alive stores.
mtc.stores[0].GossipStore()
mtc.stores[1].GossipStore()
// Force the repair queues on all alive stores to run.
mtc.stores[0].ForceReplicationScanAndProcess()
mtc.stores[1].ForceReplicationScanAndProcess()
}
}
}
// TestStoreRangeRebalance verifies that the replication queue will take
// rebalancing opportunities and add a new replica on another store.
func TestStoreRangeRebalance(t *testing.T) {
defer leaktest.AfterTest(t)()
// Start multiTestContext with replica rebalancing enabled.
mtc := &multiTestContext{
storeContext: &storage.StoreContext{},
}
*mtc.storeContext = storage.TestStoreContext()
mtc.storeContext.AllocatorOptions = storage.AllocatorOptions{
AllowRebalance: true,
Deterministic: true,
}
// Four stores.
mtc.Start(t, 4)
defer mtc.Stop()
// Replicate the first range to the first three stores.
store0 := mtc.stores[0]
replica := store0.LookupReplica(roachpb.RKeyMin, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 1, 2)
// Initialize the gossip network with fake capacity data.
storeDescs := make([]*roachpb.StoreDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
desc.Capacity.RangeCount = 1
// Make sure store[1] is chosen as removal target.
if desc.StoreID == mtc.stores[1].StoreID() {
desc.Capacity.RangeCount = 4
}
storeDescs = append(storeDescs, desc)
}
for _, g := range mtc.gossips {
gossiputil.NewStoreGossiper(g).GossipStores(storeDescs, t)
}
// This can't use SucceedsSoon as using the exponential backoff mechanic
// won't work well with the forced replication scans.
maxTimeout := time.After(5 * time.Second)
succeeded := false
for !succeeded {
select {
case <-maxTimeout:
t.Fatal("Failed to rebalance replica within 5 seconds")
case <-time.After(10 * time.Millisecond):
// Look up the official range descriptor, make sure fourth store is on it.
rangeDesc := getRangeMetadata(roachpb.RKeyMin, mtc, t)
// Test if we have already succeeded.
for _, repl := range rangeDesc.Replicas {
if repl.StoreID == mtc.stores[3].StoreID() {
succeeded = true
}
}
if succeeded {
break
}
mtc.expireLeaderLeases()
mtc.stores[1].ForceReplicationScanAndProcess()
}
}
}
// TestReplicateRogueRemovedNode ensures that a rogue removed node
// (i.e. a node that has been removed from the range but doesn't know
// it yet because it was down or partitioned away when it happened)
// cannot cause other removed nodes to recreate their ranges.
func TestReplicateRogueRemovedNode(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// First put the range on all three nodes.
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1, 2)
// Put some data in the range so we'll have something to test for.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for all nodes to catch up.
mtc.waitForValues(roachpb.Key("a"), []int64{5, 5, 5})
// Stop node 2; while it is down remove the range from nodes 2 and 1.
mtc.stopStore(2)
mtc.unreplicateRange(raftID, 2)
mtc.unreplicateRange(raftID, 1)
// Make a write on node 0; this will not be replicated because 0 is the only node left.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for the replica to be GC'd on node 1.
// Store 0 has two writes, 1 has erased everything, and 2 still has the first write.
// A single pass of ForceReplicaGCScanAndProcess is not enough, since the replica
// may be recreated by a stray raft message, so we run the GC scan inside the loop.
// TODO(bdarnell): if the call to RemoveReplica in replicaGCQueue.process can be
// moved under the lock, then the GC scan can be moved out of this loop.
util.SucceedsSoon(t, func() error {
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold) + 1)
mtc.stores[1].ForceReplicaGCScanAndProcess()
actual := mtc.readIntFromEngines(roachpb.Key("a"))
expected := []int64{16, 0, 5}
if !reflect.DeepEqual(expected, actual) {
return errors.Errorf("expected %v, got %v", expected, actual)
}
return nil
})
// Bring node 2 back up.
mtc.restartStore(2)
// Try to issue a command on node 2. It should not be able to commit
// (so we add it asynchronously).
var startWG sync.WaitGroup
startWG.Add(1)
var finishWG sync.WaitGroup
finishWG.Add(1)
go func() {
rng, err := mtc.stores[2].GetReplica(raftID)
if err != nil {
t.Fatal(err)
}
incArgs := incrementArgs([]byte("a"), 23)
startWG.Done()
defer finishWG.Done()
if _, err := client.SendWrappedWith(rng, nil, roachpb.Header{Timestamp: mtc.stores[2].Clock().Now()}, &incArgs); err == nil {
t.Fatal("expected error during shutdown")
}
}()
startWG.Wait()
// Sleep a bit to let the command proposed on node 2 proceed if it's
// going to. Prior to the introduction of replica tombstones, this
// would lead to split-brain: Node 2 would wake up node 1 and they
// would form a quorum, even though node 0 had removed them both.
// Now the tombstone on node 1 prevents it from rejoining the rogue
// copy of the group.
time.Sleep(100 * time.Millisecond)
util.SucceedsSoon(t, func() error {
actual := mtc.readIntFromEngines(roachpb.Key("a"))
// Normally, replica GC has not happened yet on store 2, so we
// expect {16, 0, 5}. However, it is possible (on a
// slow/overloaded machine) for the end of the ChangeReplicas
// transaction to be queued up inside the raft transport for long
// enough that it doesn't arrive until after store 2 has been
// restarted, so it is able to trigger an early GC on the
// restarted node, resulting in {16, 0, 0}.
// TODO(bdarnell): When #5789 is fixed, the probabilities flip and
// {16, 0, 0} becomes the expected case. When this happens
// we should just combine this check with the following one.
expected1 := []int64{16, 0, 5}
expected2 := []int64{16, 0, 0}
if !reflect.DeepEqual(expected1, actual) && !reflect.DeepEqual(expected2, actual) {
return errors.Errorf("expected %v or %v, got %v", expected1, expected2, actual)
}
return nil
})
// Run garbage collection on node 2. The lack of an active leader
// lease will cause GC to do a consistent range lookup, where it
// will see that the range has been moved and delete the old
// replica.
mtc.expireLeaderLeases()
mtc.manualClock.Increment(int64(
storage.ReplicaGCQueueInactivityThreshold) + 1)
mtc.stores[2].ForceReplicaGCScanAndProcess()
mtc.waitForValues(roachpb.Key("a"), []int64{16, 0, 0})
// Now that the group has been GC'd, the goroutine that was
// attempting to write has finished (with an error).
finishWG.Wait()
}
func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
// Move the first range from the first node to the other three.
rangeID := roachpb.RangeID(1)
mtc.replicateRange(rangeID, 1, 2, 3)
mtc.unreplicateRange(rangeID, 0)
mtc.expireLeaderLeases()
// Write on the second node, to ensure that the other nodes have
// established leadership after the first node's removal.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(mtc.distSenders[1], nil, &incArgs); err != nil {
t.Fatal(err)
}
// Save the current term, which is the latest among the live stores.
findTerm := func() uint64 {
var term uint64
for i := 1; i < 4; i++ {
s := mtc.stores[i].RaftStatus(rangeID)
if s.Term > term {
term = s.Term
}
}
return term
}
term := findTerm()
if term == 0 {
t.Fatalf("expected non-zero term")
}
replica0 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[0].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[0].StoreID()),
StoreID: mtc.stores[0].StoreID(),
}
replica1 := roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(mtc.stores[1].StoreID()),
NodeID: roachpb.NodeID(mtc.stores[1].StoreID()),
StoreID: mtc.stores[1].StoreID(),
}
// Simulate an election triggered by the removed node.
if err := mtc.transports[0].Send(&storage.RaftMessageRequest{
GroupID: rangeID,
ToReplica: replica1,
FromReplica: replica0,
Message: raftpb.Message{
From: uint64(replica0.ReplicaID),
To: uint64(replica1.ReplicaID),
Type: raftpb.MsgVote,
Term: term + 1,
},
}); err != nil {
t.Fatal(err)
}
// Wait a bit for the message to be processed.
// TODO(bdarnell): This will be easier to test without waiting
// when #5789 is done.
time.Sleep(10 * time.Millisecond)
// The message should have been discarded without triggering an
// election or changing the term.
newTerm := findTerm()
if term != newTerm {
t.Errorf("expected term to be constant, but changed from %v to %v", term, newTerm)
}
}
func TestReplicateReAddAfterDown(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// First put the range on all three nodes.
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1, 2)
// Put some data in the range so we'll have something to test for.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for all nodes to catch up.
mtc.waitForValues(roachpb.Key("a"), []int64{5, 5, 5})
// Stop node 2; while it is down remove the range from it. Since the node is
// down it won't see the removal and clean up its replica.
mtc.stopStore(2)
mtc.unreplicateRange(raftID, 2)
// Perform another write.
incArgs = incrementArgs([]byte("a"), 11)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 5})
// Bring it back up and re-add the range. There is a race when the
// store applies its removal and re-addition back to back: the
// replica may or may not have (asynchronously) garbage collected
// its data in between. Whether the existing data is reused or the
// replica gets recreated, the replica ID is changed by this
// process. An ill-timed GC has been known to cause bugs including
// https://github.com/cockroachdb/cockroach/issues/2873.
mtc.restartStore(2)
mtc.replicateRange(raftID, 2)
// The range should be synced back up.
mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 16})
}
// TestLeaderRemoveSelf verifies that a leader can remove itself
// without panicking and future access to the range returns a
// RangeNotFoundError (not RaftGroupDeletedError, and even before
// the ReplicaGCQueue has run).
func TestLeaderRemoveSelf(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the replica GC queue. This verifies that the replica is
// considered removed even before the gc queue has run, and also
// helps avoid a deadlock at shutdown.
mtc.stores[0].DisableReplicaGCQueue(true)
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1)
// Remove the replica from first store.
mtc.unreplicateRange(raftID, 0)
getArgs := getArgs([]byte("a"))
// Force the read command request a new lease.
clock := mtc.clocks[0]
header := roachpb.Header{}
header.Timestamp = clock.Update(clock.Now().Add(
storage.LeaderLeaseExpiration(mtc.stores[0], clock), 0))
// Expect get a RangeNotFoundError.
_, pErr := client.SendWrappedWith(rg1(mtc.stores[0]), nil, header, &getArgs)
if _, ok := pErr.GetDetail().(*roachpb.RangeNotFoundError); !ok {
t.Fatalf("expect get RangeNotFoundError, actual get %v ", pErr)
}
}
// TestRemoveRangeWithoutGC ensures that we do not panic when a
// replica has been removed but not yet GC'd (and therefore
// does not have an active raft group).
func TestRemoveRangeWithoutGC(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the GC queue and move the range from store 0 to 1.
mtc.stores[0].DisableReplicaGCQueue(true)
const rangeID roachpb.RangeID = 1
mtc.replicateRange(rangeID, 1)
mtc.unreplicateRange(rangeID, 0)
// Wait for store 0 to process the removal.
util.SucceedsSoon(t, func() error {
rep, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
desc := rep.Desc()
if len(desc.Replicas) != 1 {
return errors.Errorf("range has %d replicas", len(desc.Replicas))
}
return nil
})
// The replica's data is still on disk even though the Replica
// object is removed.
var desc roachpb.RangeDescriptor
descKey := keys.RangeDescriptorKey(roachpb.RKeyMin)
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatal("expected range descriptor to be present")
}
// Stop and restart the store to reset the replica's raftGroup
// pointer to nil. As long as the store has not been restarted it
// can continue to use its last known replica ID.
mtc.stopStore(0)
mtc.restartStore(0)
// Turn off the GC queue to ensure that the replica is deleted at
// startup instead of by the scanner. This is not 100% guaranteed
// since the scanner could have already run at this point, but it
// should be enough to prevent us from accidentally relying on the
// scanner.
mtc.stores[0].DisableReplicaGCQueue(true)
// The Replica object is not recreated.
if _, err := mtc.stores[0].GetReplica(rangeID); err == nil {
t.Fatalf("expected replica to be missing")
}
// And the data is no longer on disk.
if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if ok {
t.Fatal("expected range descriptor to be absent")
}
}
// TestCheckConsistencyMultiStore creates a Db with three stores ]
// with three way replication. A value is added to the Db, and a
// consistency check is run.
func TestCheckConsistencyMultiStore(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
// Write something to the DB.
putArgs := putArgs([]byte("a"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &putArgs); err != nil {
t.Fatal(err)
}
// Run consistency check.
checkArgs := roachpb.CheckConsistencyRequest{
Span: roachpb.Span{
// span of keys that include "a".
Key: []byte("a"),
EndKey: []byte("aa"),
},
}
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{
Timestamp: mtc.stores[0].Clock().Now(),
}, &checkArgs); err != nil {
t.Fatal(err)
}
}
func TestCheckInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
// Write something to the DB.
pArgs := putArgs([]byte("a"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("c"), []byte("d"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Write some arbitrary data only to store 1. Inconsistent key "e"!
key := []byte("e")
var val roachpb.Value
val.SetInt(42)
timestamp := mtc.stores[1].Clock().Timestamp()
if err := engine.MVCCPut(context.Background(), mtc.stores[1].Engine(), nil, key, timestamp, val, nil); err != nil {
t.Fatal(err)
}
// The consistency check will panic on store 1.
notify := make(chan struct{}, 1)
mtc.stores[1].TestingKnobs().BadChecksumPanic = func(diff []storage.ReplicaSnapshotDiff) {
if len(diff) != 1 {
t.Errorf("diff length = %d, diff = %v", len(diff), diff)
}
d := diff[0]
if d.Leader != false || !bytes.Equal([]byte("e"), d.Key) || !timestamp.Equal(d.Timestamp) {
t.Errorf("diff = %v", d)
}
notify <- struct{}{}
}
// Run consistency check.
checkArgs := roachpb.CheckConsistencyRequest{
Span: roachpb.Span{
// span of keys that include "a" & "c".
Key: []byte("a"),
EndKey: []byte("z"),
},
}
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &checkArgs); err != nil {
t.Fatal(err)
}
select {
case <-notify:
case <-time.After(5 * time.Second):
t.Fatal("didn't receive notification from VerifyChecksum() that should have panicked")
}
}
func TestTransferRaftLeadership(t *testing.T) {
defer leaktest.AfterTest(t)()
const numStores = 3
mtc := startMultiTestContext(t, numStores)
defer mtc.Stop()
// Setup replication of range 1 on store 0 to stores 1 and 2.
mtc.replicateRange(1, 1, 2)
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
status := rng.RaftStatus()
if status.Lead != 1 {
t.Fatalf("raft leader should be 1, but got %v", status.Lead)
}
mtc.expireLeaderLeases()
// Force the read command request a new lease.
getArgs := getArgs([]byte("a"))
_, pErr := client.SendWrapped(rg1(mtc.stores[1]), nil, &getArgs)
if pErr != nil {
t.Fatalf("expect get nil, actual get %v ", pErr)
}
// Wait for raft leadership transferring to be finished.
util.SucceedsSoon(t, func() error {
status = rng.RaftStatus()
if status.Lead != 2 {
return errors.Errorf("expected raft leader be 2; got %d", status.Lead)
}
return nil
})
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package dependency
import (
"time"
"github.com/juju/errors"
"github.com/juju/loggo"
"launchpad.net/tomb"
"github.com/juju/juju/worker"
)
var logger = loggo.GetLogger("juju.worker.dependency")
// NewEngine returns an Engine that will maintain any Installed Manifolds until
// either the engine is killed or one of the manifolds' workers returns an error
// that satisfies isFatal. The caller takes responsibility for the returned Engine.
func NewEngine(isFatal IsFatalFunc, errorDelay, bounceDelay time.Duration) Engine {
engine := &engine{
isFatal: isFatal,
errorDelay: errorDelay,
bounceDelay: bounceDelay,
manifolds: map[string]Manifold{},
dependents: map[string][]string{},
current: map[string]workerInfo{},
install: make(chan installTicket),
started: make(chan startedTicket),
stopped: make(chan stoppedTicket),
}
go func() {
defer engine.tomb.Done()
engine.tomb.Kill(engine.loop())
}()
return engine
}
// engine maintains workers corresponding to its installed manifolds, and
// restarts them whenever their inputs change.
type engine struct {
tomb tomb.Tomb
// isFatal allows errors generated by workers to stop the engine.
isFatal IsFatalFunc
// errorDelay controls how long the engine waits before restarting a worker
// that encountered an unknown error.
errorDelay time.Duration
// bounceDelay controls how long the engine waits before restarting a worker
// that was deliberately shut down because its dependencies changed.
bounceDelay time.Duration
// manifolds holds the installed manifolds by name.
manifolds map[string]Manifold
// dependents holds, for each named manifold, those that depend on it.
dependents map[string][]string
// current holds the active worker information for each installed manifold.
current map[string]workerInfo
// install, started, and stopped each communicate requests and changes into
// the loop goroutine.
install chan installTicket
started chan startedTicket
stopped chan stoppedTicket
}
// loop serializes manifold install operations and worker start/stop notifications.
// It's notable for its oneShotDying var, which is necessary because any number of
// start/stop notification could be in flight at the point the engine needs to stop;
// we need to handle all those, and any subsequent messages, until the main loop is
// confident that every worker has stopped. (The usual pattern -- to defer a cleanup
// method to run before tomb.Done in NewEngine -- is not cleanly applicable, because
// it needs to duplicate that start/stop message handling; better to localise that
// in this method.)
func (engine *engine) loop() error {
oneShotDying := engine.tomb.Dying()
for {
select {
case <-oneShotDying:
oneShotDying = nil
for name := range engine.current {
engine.stop(name)
}
case ticket := <-engine.install:
// This is safe so long as the Install method reads the result.
ticket.result <- engine.gotInstall(ticket.name, ticket.manifold)
case ticket := <-engine.started:
engine.gotStarted(ticket.name, ticket.worker)
case ticket := <-engine.stopped:
engine.gotStopped(ticket.name, ticket.error)
}
if engine.isDying() {
if engine.allStopped() {
return tomb.ErrDying
}
}
}
}
// Kill is part of the worker.Worker interface.
func (engine *engine) Kill() {
engine.tomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (engine *engine) Wait() error {
return engine.tomb.Wait()
}
// Install is part of the Engine interface.
func (engine *engine) Install(name string, manifold Manifold) error {
result := make(chan error)
select {
case <-engine.tomb.Dying():
return errors.New("engine is shutting down")
case engine.install <- installTicket{name, manifold, result}:
// This is safe so long as the loop sends a result.
return <-result
}
}
// gotInstall handles the params originally supplied to Install. It must only be
// called from the loop goroutine.
func (engine *engine) gotInstall(name string, manifold Manifold) error {
logger.Infof("installing %s manifold...", name)
if _, found := engine.manifolds[name]; found {
return errors.Errorf("%s manifold already installed", name)
}
for _, input := range manifold.Inputs {
if _, found := engine.manifolds[input]; !found {
logger.Infof("%s manifold depends on unknown %s manifold", name, input)
}
}
engine.manifolds[name] = manifold
for _, input := range manifold.Inputs {
engine.dependents[input] = append(engine.dependents[input], name)
}
engine.current[name] = workerInfo{}
engine.start(name, 0)
return nil
}
// start invokes a runWorker goroutine for the manifold with the supplied name. It
// must only be called from the loop goroutine.
func (engine *engine) start(name string, delay time.Duration) {
// Check preconditions.
manifold, found := engine.manifolds[name]
if !found {
engine.tomb.Kill(errors.Errorf("fatal: unknown manifold %s", name))
}
// Copy current info and check more preconditions.
info := engine.current[name]
if !info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: trying to start a second %s manifold worker", name))
}
// Final check that we're not shutting down yet...
if engine.isDying() {
logger.Infof("not starting %s manifold worker (shutting down)", name)
return
}
// ...then update the info, copy it back to the engine, and start a worker
// goroutine based on current known state.
info.starting = true
engine.current[name] = info
getResource := engine.getResourceFunc(manifold.Inputs)
go engine.runWorker(name, delay, manifold.Start, getResource)
}
// getResourceFunc returns a GetResourceFunc backed by a snapshot of current
// worker state, restricted to those workers declared in inputs. It must only
// be called from the loop goroutine; see inside for a detailed dicsussion of
// why we took this appproach.
func (engine *engine) getResourceFunc(inputs []string) GetResourceFunc {
// We snapshot the resources available at invocation time, rather than adding an
// additional communicate-resource-request channel. The latter approach is not
// unreasonable... but is prone to inelegant scrambles when starting several
// dependent workers at once. For example:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B
// * A communicates its worker back to loop; main thread bounces B
// * B asks for A, gets A, doesn't react to bounce (*)
// * B communicates its worker back to loop; loop kills it immediately in
// response to earlier bounce
// * loop starts worker B again, now everything's fine; but, still, yuck.
// This is not a happy path to take by default.
//
// The problem, of course, is in the (*); the main thread *does* know that B
// needs to bounce soon anyway, and it *could* communicate that fact back via
// an error over a channel back into getResource; the StartFunc could then
// just return (say) that ErrResourceChanged and avoid the hassle of creating
// a worker. But that adds a whole layer of complexity (and unpredictability
// in tests, which is not much fun) for very little benefit.
//
// In the analogous scenario with snapshotted dependencies, we see a happier
// picture at startup time:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B with empty resource snapshot
// * A communicates its worker back to loop; main thread bounces B
// * B's StartFunc asks for A, gets nothing, returns ErrUnmetDependencies
// * loop restarts worker B with an up-to-date snapshot, B works fine
//
// We assume that, in the common case, most workers run without error most
// of the time; and, thus, that the vast majority of worker startups will
// happen as an agent starts. Furthermore, most of them will have simple
// hard dependencies, and their Start funcs will be easy to write; the only
// components that may be impacted by such a strategy will be those workers
// which still want to run (with reduced functionality) with some dependency
// unmet.
//
// Those may indeed suffer the occasional extra bounce as the system comes
// to stability as it starts, or after a change; but workers *must* be
// written for resilience in the face of arbitrary bounces *anyway*, so it
// shouldn't be harmful
outputs := map[string]OutputFunc{}
workers := map[string]worker.Worker{}
for _, resourceName := range inputs {
outputs[resourceName] = engine.manifolds[resourceName].Output
workers[resourceName] = engine.current[resourceName].worker
}
return func(resourceName string, out interface{}) bool {
switch {
case workers[resourceName] == nil:
return false
case outputs[resourceName] == nil:
return out == nil
}
return outputs[resourceName](workers[resourceName], out)
}
}
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) {
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, func() error {
logger.Infof("starting %s manifold worker in %s...", name, delay)
select {
case <-time.After(delay):
case <-engine.tomb.Dying():
logger.Infof("not starting %s manifold worker (shutting down)", name)
return tomb.ErrDying
}
logger.Infof("starting %s manifold worker", name)
worker, err := start(getResource)
if err != nil {
logger.Infof("failed to start %s manifold worker: %v", name, err)
return err
}
logger.Infof("running %s manifold worker: %v", name, worker)
select {
case <-engine.tomb.Dying():
logger.Infof("stopping %s manifold worker (shutting down)", name)
worker.Kill()
case engine.started <- startedTicket{name, worker}:
logger.Infof("registered %s manifold worker", name)
}
return worker.Wait()
}()}
}
// gotStarted updates the engine to reflect the creation of a worker. It must
// only be called from the loop goroutine.
func (engine *engine) gotStarted(name string, worker worker.Worker) {
// Copy current info; check preconditions and abort the workers if we've
// already been asked to stop it.
info := engine.current[name]
switch {
case info.worker != nil:
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker start", name))
fallthrough
case info.stopping, engine.isDying():
logger.Infof("%s manifold worker no longer required", name)
worker.Kill()
default:
// It's fine to use this worker; update info and copy back.
logger.Infof("%s manifold worker started: %v", name, worker)
info.starting = false
info.worker = worker
engine.current[name] = info
// Any manifold that declares this one as an input needs to be restarted.
engine.bounceDependents(name)
}
}
// gotStopped updates the engine to reflect the demise of (or failure to create)
// a worker. It must only be called from the loop goroutine.
func (engine *engine) gotStopped(name string, err error) {
logger.Infof("%s manifold worker stopped: %v", name, err)
// Copy current info and check for reasons to stop the engine.
info := engine.current[name]
if info.stopped() {
engine.tomb.Kill(errors.New("fatal: unexpected %s manifold worker stop"))
} else if engine.isFatal(err) {
engine.tomb.Kill(err)
}
// Reset engine info; and bail out if we can be sure there's no need to bounce.
engine.current[name] = workerInfo{}
if engine.isDying() {
logger.Infof("permanently stopped %s manifold worker (shutting down)", name)
return
}
// If we told the worker to stop, we should start it again immediately,
// whatever else happened.
if info.stopping {
engine.start(name, engine.bounceDelay)
} else {
// If we didn't stop it ourselves, we need to interpret the error.
switch err {
case nil:
// Nothing went wrong; the task completed successfully. Nothing
// needs to be done (unless the inputs change, in which case it
// gets to check again).
case ErrUnmetDependencies:
// The task can't even start with the current state. Nothing more
// can be done (until the inputs change, in which case we retry
// anyway).
default:
// Something went wrong but we don't know what. Try again soon.
engine.start(name, engine.errorDelay)
}
}
// Manifolds that declared a dependency on this one only need to be notified
// if the worker has changed; if it was already nil, nobody needs to know.
if info.worker != nil {
engine.bounceDependents(name)
}
}
// stop ensures that any running or starting worker will be stopped in the
// near future. It must only be called from the loop goroutine.
func (engine *engine) stop(name string) {
// If already stopping or stopped, just don't do anything.
info := engine.current[name]
if info.stopping || info.stopped() {
return
}
// Update info, kill worker if present, and copy info back to engine.
info.stopping = true
if info.worker != nil {
info.worker.Kill()
}
engine.current[name] = info
}
// isDying returns true if the engine is shutting down. It's safe to call it
// from any goroutine.
func (engine *engine) isDying() bool {
select {
case <-engine.tomb.Dying():
return true
default:
return false
}
}
// allStopped returns true if no workers are running or starting. It must only
// be called from the loop goroutine.
func (engine *engine) allStopped() bool {
for _, info := range engine.current {
if !info.stopped() {
return false
}
}
return true
}
// bounceDependents starts every stopped dependent of the named manifold, and
// stops every started one (and trusts the rest of the engine to restart them).
// It must only be called from the loop goroutine.
func (engine *engine) bounceDependents(name string) {
logger.Infof("restarting dependents of %s manifold", name)
for _, dependentName := range engine.dependents[name] {
if engine.current[dependentName].stopped() {
engine.start(dependentName, engine.bounceDelay)
} else {
engine.stop(dependentName)
}
}
}
// workerInfo stores what an engine needs to know about the worker for a given
// Manifold.
type workerInfo struct {
starting bool
stopping bool
worker worker.Worker
}
// stopped returns true unless the worker is either assigned or starting.
func (info workerInfo) stopped() bool {
switch {
case info.worker != nil:
return false
case info.starting:
return false
}
return true
}
// installTicket is used by engine to induce installation of a named manifold
// and pass on any errors encountered in the process.
type installTicket struct {
name string
manifold Manifold
result chan<- error
}
// startedTicket is used by engine to notify the loop of the creation of the
// worker for a particular manifold.
type startedTicket struct {
name string
worker worker.Worker
}
// stoppedTicket is used by engine to notify the loop of the demise of (or
// failure to create) the worker for a particular manifold.
type stoppedTicket struct {
name string
error error
}
trivials
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package dependency
import (
"time"
"github.com/juju/errors"
"github.com/juju/loggo"
"launchpad.net/tomb"
"github.com/juju/juju/worker"
)
var logger = loggo.GetLogger("juju.worker.dependency")
// NewEngine returns an Engine that will maintain any Installed Manifolds until
// either the engine is killed or one of the manifolds' workers returns an error
// that satisfies isFatal. The caller takes responsibility for the returned Engine.
func NewEngine(isFatal IsFatalFunc, errorDelay, bounceDelay time.Duration) Engine {
engine := &engine{
isFatal: isFatal,
errorDelay: errorDelay,
bounceDelay: bounceDelay,
manifolds: map[string]Manifold{},
dependents: map[string][]string{},
current: map[string]workerInfo{},
install: make(chan installTicket),
started: make(chan startedTicket),
stopped: make(chan stoppedTicket),
}
go func() {
defer engine.tomb.Done()
engine.tomb.Kill(engine.loop())
}()
return engine
}
// engine maintains workers corresponding to its installed manifolds, and
// restarts them whenever their inputs change.
type engine struct {
tomb tomb.Tomb
// isFatal allows errors generated by workers to stop the engine.
isFatal IsFatalFunc
// errorDelay controls how long the engine waits before restarting a worker
// that encountered an unknown error.
errorDelay time.Duration
// bounceDelay controls how long the engine waits before restarting a worker
// that was deliberately shut down because its dependencies changed.
bounceDelay time.Duration
// manifolds holds the installed manifolds by name.
manifolds map[string]Manifold
// dependents holds, for each named manifold, those that depend on it.
dependents map[string][]string
// current holds the active worker information for each installed manifold.
current map[string]workerInfo
// install, started, and stopped each communicate requests and changes into
// the loop goroutine.
install chan installTicket
started chan startedTicket
stopped chan stoppedTicket
}
// loop serializes manifold install operations and worker start/stop notifications.
// It's notable for its oneShotDying var, which is necessary because any number of
// start/stop notification could be in flight at the point the engine needs to stop;
// we need to handle all those, and any subsequent messages, until the main loop is
// confident that every worker has stopped. (The usual pattern -- to defer a cleanup
// method to run before tomb.Done in NewEngine -- is not cleanly applicable, because
// it needs to duplicate that start/stop message handling; better to localise that
// in this method.)
func (engine *engine) loop() error {
oneShotDying := engine.tomb.Dying()
for {
select {
case <-oneShotDying:
oneShotDying = nil
for name := range engine.current {
engine.stop(name)
}
case ticket := <-engine.install:
// This is safe so long as the Install method reads the result.
ticket.result <- engine.gotInstall(ticket.name, ticket.manifold)
case ticket := <-engine.started:
engine.gotStarted(ticket.name, ticket.worker)
case ticket := <-engine.stopped:
engine.gotStopped(ticket.name, ticket.error)
}
if engine.isDying() {
if engine.allStopped() {
return tomb.ErrDying
}
}
}
}
// Kill is part of the worker.Worker interface.
func (engine *engine) Kill() {
engine.tomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (engine *engine) Wait() error {
return engine.tomb.Wait()
}
// Install is part of the Engine interface.
func (engine *engine) Install(name string, manifold Manifold) error {
result := make(chan error)
select {
case <-engine.tomb.Dying():
return errors.New("engine is shutting down")
case engine.install <- installTicket{name, manifold, result}:
// This is safe so long as the loop sends a result.
return <-result
}
}
// gotInstall handles the params originally supplied to Install. It must only be
// called from the loop goroutine.
func (engine *engine) gotInstall(name string, manifold Manifold) error {
logger.Infof("installing %s manifold...", name)
if _, found := engine.manifolds[name]; found {
return errors.Errorf("%s manifold already installed", name)
}
for _, input := range manifold.Inputs {
if _, found := engine.manifolds[input]; !found {
logger.Infof("%s manifold depends on unknown %s manifold", name, input)
}
}
engine.manifolds[name] = manifold
for _, input := range manifold.Inputs {
engine.dependents[input] = append(engine.dependents[input], name)
}
engine.current[name] = workerInfo{}
engine.start(name, 0)
return nil
}
// start invokes a runWorker goroutine for the manifold with the supplied name. It
// must only be called from the loop goroutine.
func (engine *engine) start(name string, delay time.Duration) {
// Check preconditions.
manifold, found := engine.manifolds[name]
if !found {
engine.tomb.Kill(errors.Errorf("fatal: unknown manifold %s", name))
}
// Copy current info and check more preconditions.
info := engine.current[name]
if !info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: trying to start a second %s manifold worker", name))
}
// Final check that we're not shutting down yet...
if engine.isDying() {
logger.Infof("not starting %s manifold worker (shutting down)", name)
return
}
// ...then update the info, copy it back to the engine, and start a worker
// goroutine based on current known state.
info.starting = true
engine.current[name] = info
getResource := engine.getResourceFunc(manifold.Inputs)
go engine.runWorker(name, delay, manifold.Start, getResource)
}
// getResourceFunc returns a GetResourceFunc backed by a snapshot of current
// worker state, restricted to those workers declared in inputs. It must only
// be called from the loop goroutine; see inside for a detailed dicsussion of
// why we took this appproach.
func (engine *engine) getResourceFunc(inputs []string) GetResourceFunc {
// We snapshot the resources available at invocation time, rather than adding an
// additional communicate-resource-request channel. The latter approach is not
// unreasonable... but is prone to inelegant scrambles when starting several
// dependent workers at once. For example:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B
// * A communicates its worker back to loop; main thread bounces B
// * B asks for A, gets A, doesn't react to bounce (*)
// * B communicates its worker back to loop; loop kills it immediately in
// response to earlier bounce
// * loop starts worker B again, now everything's fine; but, still, yuck.
// This is not a happy path to take by default.
//
// The problem, of course, is in the (*); the main thread *does* know that B
// needs to bounce soon anyway, and it *could* communicate that fact back via
// an error over a channel back into getResource; the StartFunc could then
// just return (say) that ErrResourceChanged and avoid the hassle of creating
// a worker. But that adds a whole layer of complexity (and unpredictability
// in tests, which is not much fun) for very little benefit.
//
// In the analogous scenario with snapshotted dependencies, we see a happier
// picture at startup time:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B with empty resource snapshot
// * A communicates its worker back to loop; main thread bounces B
// * B's StartFunc asks for A, gets nothing, returns ErrUnmetDependencies
// * loop restarts worker B with an up-to-date snapshot, B works fine
//
// We assume that, in the common case, most workers run without error most
// of the time; and, thus, that the vast majority of worker startups will
// happen as an agent starts. Furthermore, most of them will have simple
// hard dependencies, and their Start funcs will be easy to write; the only
// components that may be impacted by such a strategy will be those workers
// which still want to run (with reduced functionality) with some dependency
// unmet.
//
// Those may indeed suffer the occasional extra bounce as the system comes
// to stability as it starts, or after a change; but workers *must* be
// written for resilience in the face of arbitrary bounces *anyway*, so it
// shouldn't be harmful
outputs := map[string]OutputFunc{}
workers := map[string]worker.Worker{}
for _, resourceName := range inputs {
outputs[resourceName] = engine.manifolds[resourceName].Output
workers[resourceName] = engine.current[resourceName].worker
}
return func(resourceName string, out interface{}) bool {
switch {
case workers[resourceName] == nil:
return false
case outputs[resourceName] == nil:
return out == nil
}
return outputs[resourceName](workers[resourceName], out)
}
}
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) {
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, func() error {
logger.Infof("starting %s manifold worker in %s...", name, delay)
select {
case <-time.After(delay):
case <-engine.tomb.Dying():
logger.Infof("not starting %s manifold worker (shutting down)", name)
return tomb.ErrDying
}
logger.Infof("starting %s manifold worker", name)
worker, err := start(getResource)
if err != nil {
logger.Infof("failed to start %s manifold worker: %v", name, err)
return err
}
logger.Infof("running %s manifold worker", name)
select {
case <-engine.tomb.Dying():
logger.Infof("stopping %s manifold worker (shutting down)", name)
worker.Kill()
case engine.started <- startedTicket{name, worker}:
logger.Infof("registered %s manifold worker", name)
}
return worker.Wait()
}()}
}
// gotStarted updates the engine to reflect the creation of a worker. It must
// only be called from the loop goroutine.
func (engine *engine) gotStarted(name string, worker worker.Worker) {
// Copy current info; check preconditions and abort the workers if we've
// already been asked to stop it.
info := engine.current[name]
switch {
case info.worker != nil:
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker start", name))
fallthrough
case info.stopping, engine.isDying():
logger.Infof("%s manifold worker no longer required", name)
worker.Kill()
default:
// It's fine to use this worker; update info and copy back.
logger.Infof("%s manifold worker started", name)
info.starting = false
info.worker = worker
engine.current[name] = info
// Any manifold that declares this one as an input needs to be restarted.
engine.bounceDependents(name)
}
}
// gotStopped updates the engine to reflect the demise of (or failure to create)
// a worker. It must only be called from the loop goroutine.
func (engine *engine) gotStopped(name string, err error) {
logger.Infof("%s manifold worker stopped: %v", name, err)
// Copy current info and check for reasons to stop the engine.
info := engine.current[name]
if info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker stop", name))
} else if engine.isFatal(err) {
engine.tomb.Kill(err)
}
// Reset engine info; and bail out if we can be sure there's no need to bounce.
engine.current[name] = workerInfo{}
if engine.isDying() {
logger.Infof("permanently stopped %s manifold worker (shutting down)", name)
return
}
// If we told the worker to stop, we should start it again immediately,
// whatever else happened.
if info.stopping {
engine.start(name, engine.bounceDelay)
} else {
// If we didn't stop it ourselves, we need to interpret the error.
switch err {
case nil:
// Nothing went wrong; the task completed successfully. Nothing
// needs to be done (unless the inputs change, in which case it
// gets to check again).
case ErrUnmetDependencies:
// The task can't even start with the current state. Nothing more
// can be done (until the inputs change, in which case we retry
// anyway).
default:
// Something went wrong but we don't know what. Try again soon.
engine.start(name, engine.errorDelay)
}
}
// Manifolds that declared a dependency on this one only need to be notified
// if the worker has changed; if it was already nil, nobody needs to know.
if info.worker != nil {
engine.bounceDependents(name)
}
}
// stop ensures that any running or starting worker will be stopped in the
// near future. It must only be called from the loop goroutine.
func (engine *engine) stop(name string) {
// If already stopping or stopped, just don't do anything.
info := engine.current[name]
if info.stopping || info.stopped() {
return
}
// Update info, kill worker if present, and copy info back to engine.
info.stopping = true
if info.worker != nil {
info.worker.Kill()
}
engine.current[name] = info
}
// isDying returns true if the engine is shutting down. It's safe to call it
// from any goroutine.
func (engine *engine) isDying() bool {
select {
case <-engine.tomb.Dying():
return true
default:
return false
}
}
// allStopped returns true if no workers are running or starting. It must only
// be called from the loop goroutine.
func (engine *engine) allStopped() bool {
for _, info := range engine.current {
if !info.stopped() {
return false
}
}
return true
}
// bounceDependents starts every stopped dependent of the named manifold, and
// stops every started one (and trusts the rest of the engine to restart them).
// It must only be called from the loop goroutine.
func (engine *engine) bounceDependents(name string) {
logger.Infof("restarting dependents of %s manifold", name)
for _, dependentName := range engine.dependents[name] {
if engine.current[dependentName].stopped() {
engine.start(dependentName, engine.bounceDelay)
} else {
engine.stop(dependentName)
}
}
}
// workerInfo stores what an engine needs to know about the worker for a given
// Manifold.
type workerInfo struct {
starting bool
stopping bool
worker worker.Worker
}
// stopped returns true unless the worker is either assigned or starting.
func (info workerInfo) stopped() bool {
switch {
case info.worker != nil:
return false
case info.starting:
return false
}
return true
}
// installTicket is used by engine to induce installation of a named manifold
// and pass on any errors encountered in the process.
type installTicket struct {
name string
manifold Manifold
result chan<- error
}
// startedTicket is used by engine to notify the loop of the creation of the
// worker for a particular manifold.
type startedTicket struct {
name string
worker worker.Worker
}
// stoppedTicket is used by engine to notify the loop of the demise of (or
// failure to create) the worker for a particular manifold.
type stoppedTicket struct {
name string
error error
}
|
// Copyright 2016, RadiantBlue Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"mime/multipart"
"net/http"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
"github.com/venicegeo/pz-gocommon/elasticsearch"
"github.com/venicegeo/pz-gocommon/gocommon"
pzlogger "github.com/venicegeo/pz-logger/logger"
pzuuidgen "github.com/venicegeo/pz-uuidgen/uuidgen"
)
//------------------------------------------
type LockedAdminStats struct {
sync.Mutex
WorkflowAdminStats
}
type WorkflowService struct {
eventTypeDB *EventTypeDB
eventDB *EventDB
triggerDB *TriggerDB
alertDB *AlertDB
stats LockedAdminStats
logger pzlogger.IClient
uuidgen pzuuidgen.IClient
sys *piazza.SystemConfig
}
var defaultEventTypePagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "eventTypeId",
Order: piazza.PaginationOrderAscending,
}
var defaultEventPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "eventId",
Order: piazza.PaginationOrderAscending,
}
var defaultTriggerPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "triggerId",
Order: piazza.PaginationOrderAscending,
}
var defaultAlertPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "alertId",
Order: piazza.PaginationOrderAscending,
}
//------------------------------------------
func (service *WorkflowService) Init(
sys *piazza.SystemConfig,
logger pzlogger.IClient,
uuidgen pzuuidgen.IClient,
eventtypesIndex elasticsearch.IIndex,
eventsIndex elasticsearch.IIndex,
triggersIndex elasticsearch.IIndex,
alertsIndex elasticsearch.IIndex) error {
service.sys = sys
service.stats.CreatedOn = time.Now()
var err error
service.logger = logger
service.uuidgen = uuidgen
service.eventTypeDB, err = NewEventTypeDB(service, eventtypesIndex)
if err != nil {
return err
}
service.eventDB, err = NewEventDB(service, eventsIndex)
if err != nil {
return err
}
service.triggerDB, err = NewTriggerDB(service, triggersIndex)
if err != nil {
return err
}
service.alertDB, err = NewAlertDB(service, alertsIndex)
if err != nil {
return err
}
return nil
}
func (s *WorkflowService) newIdent() (piazza.Ident, error) {
uuid, err := s.uuidgen.GetUuid()
if err != nil {
return piazza.NoIdent, err
}
return piazza.Ident(uuid), nil
}
func (service *WorkflowService) lookupEventTypeNameByEventID(id piazza.Ident) (string, error) {
var mapping string = ""
types, err := service.eventDB.Esi.GetTypes()
// log.Printf("types: %v", types)
if err == nil {
for _, typ := range types {
// log.Printf("trying %s\n", typ)
if service.eventDB.Esi.ItemExists(typ, id.String()) {
mapping = typ
break
}
}
} else {
return "", err
}
return mapping, nil
}
func (service *WorkflowService) sendToKafka(jobInstance string, jobID piazza.Ident) error {
//log.Printf("***********************\n")
//log.Printf("%s\n", jobInstance)
kafkaAddress, err := service.sys.GetAddress(piazza.PzKafka)
if err != nil {
return errors.New("Kafka-related failure (1): " + err.Error())
}
space := service.sys.Space
topic := fmt.Sprintf("Request-Job-%s", space)
message := jobInstance
//log.Printf("%s\n", kafkaAddress)
//log.Printf("%s\n", topic)
producer, err := sarama.NewSyncProducer([]string{kafkaAddress}, nil)
if err != nil {
return errors.New("Kafka-related failure (2): " + err.Error())
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalf("Kafka-related failure (3): " + err.Error())
}
}()
msg := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(message), Key: sarama.StringEncoder(jobID)}
partition, offset, err := producer.SendMessage(msg)
_ = partition
_ = offset
if err != nil {
return errors.New("Kafka-related failure (4): " + err.Error())
} else {
//log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
//log.Printf("***********************\n")
return nil
}
func (service *WorkflowService) postToPzGatewayJobService(uri string, params map[string]string) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for key, val := range params {
_ = writer.WriteField(key, val)
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", uri, body)
req.Header.Add("Content-Type", writer.FormDataContentType())
return req, err
}
//------------------------------------------
func statusOK(obj interface{}) *piazza.JsonResponse {
resp := &piazza.JsonResponse{StatusCode: http.StatusOK, Data: obj}
err := resp.SetType()
if err != nil {
return statusInternalServerError(err)
}
return resp
}
func statusCreated(obj interface{}) *piazza.JsonResponse {
resp := &piazza.JsonResponse{StatusCode: http.StatusCreated, Data: obj}
err := resp.SetType()
if err != nil {
return statusInternalServerError(err)
}
return resp
}
func statusBadRequest(err error) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusBadRequest, Message: err.Error()}
}
func statusInternalServerError(err error) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}
}
func statusNotFound(id piazza.Ident) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusNotFound, Message: string(id)}
}
//------------------------------------------
func (service *WorkflowService) GetAdminStats() *piazza.JsonResponse {
service.stats.Lock()
t := service.stats.WorkflowAdminStats
service.stats.Unlock()
return statusOK(t)
}
//------------------------------------------
func (service *WorkflowService) GetEventType(id piazza.Ident) *piazza.JsonResponse {
event, err := service.eventTypeDB.GetOne(piazza.Ident(id))
if err != nil {
return statusNotFound(id)
}
if event == nil {
return statusNotFound(id)
}
return statusOK(event)
}
func (service *WorkflowService) GetAllEventTypes(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultEventTypePagination)
if err != nil {
return statusBadRequest(err)
}
ets, count, err := service.eventTypeDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*ets))
for i, e := range *ets {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostEventType(eventType *EventType) *piazza.JsonResponse {
var err error
//log.Printf("New EventType with id: %s\n", eventType.EventTypeId)
eventType.EventTypeId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
eventType.CreatedOn = time.Now()
id, err := service.eventTypeDB.PostData(eventType, eventType.EventTypeId)
if err != nil {
return statusBadRequest(err)
}
//log.Printf("New EventType with id: %s\n", eventType.EventTypeId)
err = service.eventDB.AddMapping(eventType.Name, eventType.Mapping)
if err != nil {
service.eventTypeDB.DeleteByID(id)
return statusBadRequest(err)
}
//log.Printf("EventType Mapping: %s, Name: %s\n", eventType.Mapping, eventType.Name)
return statusCreated(eventType)
}
func (service *WorkflowService) DeleteEventType(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.eventTypeDB.DeleteByID(piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetEvent(id piazza.Ident) *piazza.JsonResponse {
// eventType := c.Param("eventType")
// event, err := server.eventDB.GetOne(eventType, id)
mapping, err := service.lookupEventTypeNameByEventID(id)
if err != nil {
return statusNotFound(id)
}
//log.Printf("The Mapping is: %s\n", mapping)
event, err := service.eventDB.GetOne(mapping, id)
if err != nil {
return statusNotFound(id)
}
if event == nil {
return statusNotFound(id)
}
return statusOK(event)
}
func (service *WorkflowService) GetAllEvents(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultEventPagination)
if err != nil {
return statusBadRequest(err)
}
// if both specified, "by id"" wins
eventTypeId := params.Get("eventTypeId")
eventTypeName := params.Get("eventTypeName")
query := ""
// Get the eventTypeName corresponding to the eventTypeId
if eventTypeId != "" {
eventType, err := service.eventTypeDB.GetOne(piazza.Ident(eventTypeId))
if err != nil {
return statusBadRequest(err)
}
query = eventType.Name
} else if eventTypeName != "" {
query = eventTypeName
}
m, count, err := service.eventDB.GetAll(query, format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*m))
for i, e := range *m {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostEvent(event *Event) *piazza.JsonResponse {
eventTypeId := event.EventTypeId
eventType, err := service.eventTypeDB.GetOne(eventTypeId)
if err != nil {
return statusBadRequest(err)
}
event.EventId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
event.CreatedOn = time.Now()
_, err = service.eventDB.PostData(eventType.Name, event, event.EventId)
if err != nil {
return statusBadRequest(err)
}
{
// Find triggers associated with event
//log.Printf("Looking for triggers with eventType %s and matching %v", eventType.Name, event.Data)
triggerIDs, err := service.eventDB.PercolateEventData(eventType.Name, event.Data, event.EventId)
if err != nil {
return statusBadRequest(err)
}
// For each trigger, apply the event data and submit job
var waitGroup sync.WaitGroup
results := make(map[piazza.Ident]*piazza.JsonResponse)
for _, triggerID := range *triggerIDs {
waitGroup.Add(1)
go func(triggerID piazza.Ident) {
defer waitGroup.Done()
//log.Printf("\ntriggerID: %v\n", triggerID)
trigger, err := service.triggerDB.GetOne(triggerID)
if err != nil {
results[triggerID] = statusBadRequest(err)
return
}
if trigger == nil {
results[triggerID] = statusNotFound(triggerID)
return
}
if trigger.Disabled == 1 {
results[triggerID] = statusOK(triggerID)
return
}
// Not the best way to do this, but should disallow Triggers from firing if they
// don't have the same Eventtype as the Event
// Would rather have this done via the percolation itself ...
matches := false
for _, eventtype_id := range trigger.Condition.EventTypeIds {
if eventtype_id == eventType.EventTypeId {
matches = true
break
}
}
if matches == false {
return
}
// JobID gets sent through Kafka as the key
Job := trigger.Job
JobID, err := service.newIdent()
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
jobInstance, err := json.Marshal(Job)
jobString := string(jobInstance)
log.Printf("trigger: %v\n", trigger)
log.Printf("\tJob: %v\n\n", jobString)
// Not very robust, need to find a better way
for key, value := range event.Data {
jobString = strings.Replace(jobString, "$"+key, fmt.Sprintf("%v", value), 1)
}
log.Printf("jobInstance: %s\n\n", jobString)
service.logger.Info("job submission: %s\n", jobString)
err = service.sendToKafka(jobString, JobID)
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
// TODO: should really just call service.PostAlert()
err = service.sendAlert(event.EventId, triggerID, JobID)
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
}(triggerID)
}
waitGroup.Wait()
//log.Printf("trigger results: %#v", results)
for _, v := range results {
if v != nil {
return v
}
}
}
return statusCreated(event)
}
func (service *WorkflowService) sendAlert(
eventId piazza.Ident,
triggerId piazza.Ident,
jobId piazza.Ident) error {
// Send alert
newid, err := service.newIdent()
if err != nil {
return err
}
alert := Alert{AlertId: newid, EventId: eventId, TriggerId: triggerId, JobId: jobId}
alert.CreatedOn = time.Now()
log.Printf("Alert issued: %#v", alert)
_, alert_err := service.alertDB.PostData(&alert, alert.AlertId)
if alert_err != nil {
return err
}
return nil
}
func (service *WorkflowService) DeleteEvent(id piazza.Ident) *piazza.JsonResponse {
// eventType := c.Param("eventType")
mapping, err := service.lookupEventTypeNameByEventID(id)
if err != nil {
return statusBadRequest(err)
}
//log.Printf("The Mapping is: %s\n", mapping)
ok, err := service.eventDB.DeleteByID(mapping, piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetTrigger(id piazza.Ident) *piazza.JsonResponse {
trigger, err := service.triggerDB.GetOne(piazza.Ident(id))
if err != nil {
return statusNotFound(id)
}
if trigger == nil {
return statusNotFound(id)
}
return statusOK(trigger)
}
func (service *WorkflowService) GetAllTriggers(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultTriggerPagination)
if err != nil {
return statusBadRequest(err)
}
m, count, err := service.triggerDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*m))
for i, e := range *m {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostTrigger(trigger *Trigger) *piazza.JsonResponse {
var err error
trigger.TriggerId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
trigger.CreatedOn = time.Now()
_, err = service.triggerDB.PostTrigger(trigger, trigger.TriggerId)
if err != nil {
return statusBadRequest(err)
}
return statusCreated(trigger)
}
func (service *WorkflowService) DeleteTrigger(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.triggerDB.DeleteTrigger(piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetAlert(id piazza.Ident) *piazza.JsonResponse {
alert, err := service.alertDB.GetOne(id)
if err != nil {
return statusNotFound(id)
}
if alert == nil {
return statusNotFound(id)
}
return statusOK(alert)
}
func (service *WorkflowService) GetAllAlerts(params *piazza.HttpQueryParams) *piazza.JsonResponse {
triggerId := params.Get("triggerId")
format, err := piazza.NewJsonPagination(params, defaultAlertPagination)
if err != nil {
return statusBadRequest(err)
}
var all *[]Alert
var count int64
if isUuid(triggerId) {
//log.Printf("Getting alerts with trigger %s", triggerId)
all, count, err = service.alertDB.GetAllByTrigger(format, triggerId)
if err != nil {
return statusBadRequest(err)
}
} else if triggerId == "" {
//log.Printf("Getting all alerts %#v", service)
all, count, err = service.alertDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
} else { // Malformed triggerId
return statusBadRequest(errors.New("Malformed triggerId query parameter"))
}
//log.Printf("Making bar")
bar := make([]interface{}, len(*all))
//log.Printf("Adding values to bar")
for i, e := range *all {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostAlert(alert *Alert) *piazza.JsonResponse {
var err error
alert.AlertId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
alert.CreatedOn = time.Now()
_, err = service.alertDB.PostData(&alert, alert.AlertId)
if err != nil {
return statusInternalServerError(err)
}
return statusCreated(alert)
}
func (service *WorkflowService) DeleteAlert(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.alertDB.DeleteByID(id)
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
debugging
// Copyright 2016, RadiantBlue Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"mime/multipart"
"net/http"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
"github.com/venicegeo/pz-gocommon/elasticsearch"
"github.com/venicegeo/pz-gocommon/gocommon"
pzlogger "github.com/venicegeo/pz-logger/logger"
pzuuidgen "github.com/venicegeo/pz-uuidgen/uuidgen"
)
//------------------------------------------
type LockedAdminStats struct {
sync.Mutex
WorkflowAdminStats
}
type WorkflowService struct {
eventTypeDB *EventTypeDB
eventDB *EventDB
triggerDB *TriggerDB
alertDB *AlertDB
stats LockedAdminStats
logger pzlogger.IClient
uuidgen pzuuidgen.IClient
sys *piazza.SystemConfig
}
var defaultEventTypePagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "eventTypeId",
Order: piazza.PaginationOrderAscending,
}
var defaultEventPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "eventId",
Order: piazza.PaginationOrderAscending,
}
var defaultTriggerPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "triggerId",
Order: piazza.PaginationOrderAscending,
}
var defaultAlertPagination = &piazza.JsonPagination{
PerPage: 50,
Page: 0,
SortBy: "alertId",
Order: piazza.PaginationOrderAscending,
}
//------------------------------------------
func (service *WorkflowService) Init(
sys *piazza.SystemConfig,
logger pzlogger.IClient,
uuidgen pzuuidgen.IClient,
eventtypesIndex elasticsearch.IIndex,
eventsIndex elasticsearch.IIndex,
triggersIndex elasticsearch.IIndex,
alertsIndex elasticsearch.IIndex) error {
service.sys = sys
service.stats.CreatedOn = time.Now()
var err error
service.logger = logger
service.uuidgen = uuidgen
service.eventTypeDB, err = NewEventTypeDB(service, eventtypesIndex)
if err != nil {
return err
}
service.eventDB, err = NewEventDB(service, eventsIndex)
if err != nil {
return err
}
service.triggerDB, err = NewTriggerDB(service, triggersIndex)
if err != nil {
return err
}
service.alertDB, err = NewAlertDB(service, alertsIndex)
if err != nil {
return err
}
return nil
}
func (s *WorkflowService) newIdent() (piazza.Ident, error) {
uuid, err := s.uuidgen.GetUuid()
if err != nil {
return piazza.NoIdent, err
}
return piazza.Ident(uuid), nil
}
func (service *WorkflowService) lookupEventTypeNameByEventID(id piazza.Ident) (string, error) {
var mapping string = ""
types, err := service.eventDB.Esi.GetTypes()
// log.Printf("types: %v", types)
if err == nil {
for _, typ := range types {
// log.Printf("trying %s\n", typ)
if service.eventDB.Esi.ItemExists(typ, id.String()) {
mapping = typ
break
}
}
} else {
return "", err
}
return mapping, nil
}
func (service *WorkflowService) sendToKafka(jobInstance string, jobID piazza.Ident) error {
//log.Printf("***********************\n")
//log.Printf("%s\n", jobInstance)
kafkaAddress, err := service.sys.GetAddress(piazza.PzKafka)
if err != nil {
return errors.New("Kafka-related failure (1): " + err.Error())
}
space := service.sys.Space
topic := fmt.Sprintf("Request-Job-%s", space)
message := jobInstance
//log.Printf("%s\n", kafkaAddress)
//log.Printf("%s\n", topic)
producer, err := sarama.NewSyncProducer([]string{kafkaAddress}, nil)
if err != nil {
return errors.New("Kafka-related failure (2): " + err.Error())
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalf("Kafka-related failure (3): " + err.Error())
}
}()
msg := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(message), Key: sarama.StringEncoder(jobID)}
partition, offset, err := producer.SendMessage(msg)
_ = partition
_ = offset
if err != nil {
return errors.New("Kafka-related failure (4): " + err.Error())
} else {
//log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
//log.Printf("***********************\n")
return nil
}
func (service *WorkflowService) postToPzGatewayJobService(uri string, params map[string]string) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for key, val := range params {
_ = writer.WriteField(key, val)
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", uri, body)
req.Header.Add("Content-Type", writer.FormDataContentType())
return req, err
}
//------------------------------------------
func statusOK(obj interface{}) *piazza.JsonResponse {
resp := &piazza.JsonResponse{StatusCode: http.StatusOK, Data: obj}
err := resp.SetType()
if err != nil {
return statusInternalServerError(err)
}
return resp
}
func statusCreated(obj interface{}) *piazza.JsonResponse {
resp := &piazza.JsonResponse{StatusCode: http.StatusCreated, Data: obj}
err := resp.SetType()
if err != nil {
return statusInternalServerError(err)
}
return resp
}
func statusBadRequest(err error) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusBadRequest, Message: err.Error()}
}
func statusInternalServerError(err error) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}
}
func statusNotFound(id piazza.Ident) *piazza.JsonResponse {
return &piazza.JsonResponse{StatusCode: http.StatusNotFound, Message: string(id)}
}
//------------------------------------------
func (service *WorkflowService) GetAdminStats() *piazza.JsonResponse {
service.stats.Lock()
t := service.stats.WorkflowAdminStats
service.stats.Unlock()
return statusOK(t)
}
//------------------------------------------
func (service *WorkflowService) GetEventType(id piazza.Ident) *piazza.JsonResponse {
event, err := service.eventTypeDB.GetOne(piazza.Ident(id))
if err != nil {
return statusNotFound(id)
}
if event == nil {
return statusNotFound(id)
}
return statusOK(event)
}
func (service *WorkflowService) GetAllEventTypes(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultEventTypePagination)
if err != nil {
return statusBadRequest(err)
}
ets, count, err := service.eventTypeDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*ets))
for i, e := range *ets {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostEventType(eventType *EventType) *piazza.JsonResponse {
var err error
//log.Printf("New EventType with id: %s\n", eventType.EventTypeId)
eventType.EventTypeId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
eventType.CreatedOn = time.Now()
id, err := service.eventTypeDB.PostData(eventType, eventType.EventTypeId)
if err != nil {
return statusBadRequest(err)
}
log.Printf("New EventType with id: %s\n", eventType.EventTypeId)
err = service.eventDB.AddMapping(eventType.Name, eventType.Mapping)
if err != nil {
service.eventTypeDB.DeleteByID(id)
return statusBadRequest(err)
}
log.Printf("EventType Mapping: %s, Name: %s\n", eventType.Mapping, eventType.Name)
return statusCreated(eventType)
}
func (service *WorkflowService) DeleteEventType(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.eventTypeDB.DeleteByID(piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetEvent(id piazza.Ident) *piazza.JsonResponse {
// eventType := c.Param("eventType")
// event, err := server.eventDB.GetOne(eventType, id)
mapping, err := service.lookupEventTypeNameByEventID(id)
if err != nil {
return statusNotFound(id)
}
//log.Printf("The Mapping is: %s\n", mapping)
event, err := service.eventDB.GetOne(mapping, id)
if err != nil {
return statusNotFound(id)
}
if event == nil {
return statusNotFound(id)
}
return statusOK(event)
}
func (service *WorkflowService) GetAllEvents(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultEventPagination)
if err != nil {
return statusBadRequest(err)
}
// if both specified, "by id"" wins
eventTypeId := params.Get("eventTypeId")
eventTypeName := params.Get("eventTypeName")
query := ""
// Get the eventTypeName corresponding to the eventTypeId
if eventTypeId != "" {
eventType, err := service.eventTypeDB.GetOne(piazza.Ident(eventTypeId))
if err != nil {
return statusBadRequest(err)
}
query = eventType.Name
} else if eventTypeName != "" {
query = eventTypeName
}
m, count, err := service.eventDB.GetAll(query, format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*m))
for i, e := range *m {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostEvent(event *Event) *piazza.JsonResponse {
eventTypeId := event.EventTypeId
eventType, err := service.eventTypeDB.GetOne(eventTypeId)
if err != nil {
return statusBadRequest(err)
}
event.EventId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
event.CreatedOn = time.Now()
_, err = service.eventDB.PostData(eventType.Name, event, event.EventId)
if err != nil {
return statusBadRequest(err)
}
{
// Find triggers associated with event
//log.Printf("Looking for triggers with eventType %s and matching %v", eventType.Name, event.Data)
triggerIDs, err := service.eventDB.PercolateEventData(eventType.Name, event.Data, event.EventId)
if err != nil {
return statusBadRequest(err)
}
// For each trigger, apply the event data and submit job
var waitGroup sync.WaitGroup
results := make(map[piazza.Ident]*piazza.JsonResponse)
for _, triggerID := range *triggerIDs {
waitGroup.Add(1)
go func(triggerID piazza.Ident) {
defer waitGroup.Done()
//log.Printf("\ntriggerID: %v\n", triggerID)
trigger, err := service.triggerDB.GetOne(triggerID)
if err != nil {
results[triggerID] = statusBadRequest(err)
return
}
if trigger == nil {
results[triggerID] = statusNotFound(triggerID)
return
}
if trigger.Disabled == 1 {
results[triggerID] = statusOK(triggerID)
return
}
// Not the best way to do this, but should disallow Triggers from firing if they
// don't have the same Eventtype as the Event
// Would rather have this done via the percolation itself ...
matches := false
for _, eventtype_id := range trigger.Condition.EventTypeIds {
if eventtype_id == eventType.EventTypeId {
matches = true
break
}
}
if matches == false {
return
}
// JobID gets sent through Kafka as the key
Job := trigger.Job
JobID, err := service.newIdent()
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
jobInstance, err := json.Marshal(Job)
jobString := string(jobInstance)
log.Printf("trigger: %v\n", trigger)
log.Printf("\tJob: %v\n\n", jobString)
// Not very robust, need to find a better way
for key, value := range event.Data {
jobString = strings.Replace(jobString, "$"+key, fmt.Sprintf("%v", value), 1)
}
log.Printf("jobInstance: %s\n\n", jobString)
service.logger.Info("job submission: %s\n", jobString)
err = service.sendToKafka(jobString, JobID)
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
// TODO: should really just call service.PostAlert()
err = service.sendAlert(event.EventId, triggerID, JobID)
if err != nil {
results[triggerID] = statusInternalServerError(err)
return
}
}(triggerID)
}
waitGroup.Wait()
//log.Printf("trigger results: %#v", results)
for _, v := range results {
if v != nil {
return v
}
}
}
return statusCreated(event)
}
func (service *WorkflowService) sendAlert(
eventId piazza.Ident,
triggerId piazza.Ident,
jobId piazza.Ident) error {
// Send alert
newid, err := service.newIdent()
if err != nil {
return err
}
alert := Alert{AlertId: newid, EventId: eventId, TriggerId: triggerId, JobId: jobId}
alert.CreatedOn = time.Now()
log.Printf("Alert issued: %#v", alert)
_, alert_err := service.alertDB.PostData(&alert, alert.AlertId)
if alert_err != nil {
return err
}
return nil
}
func (service *WorkflowService) DeleteEvent(id piazza.Ident) *piazza.JsonResponse {
// eventType := c.Param("eventType")
mapping, err := service.lookupEventTypeNameByEventID(id)
if err != nil {
return statusBadRequest(err)
}
//log.Printf("The Mapping is: %s\n", mapping)
ok, err := service.eventDB.DeleteByID(mapping, piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetTrigger(id piazza.Ident) *piazza.JsonResponse {
trigger, err := service.triggerDB.GetOne(piazza.Ident(id))
if err != nil {
return statusNotFound(id)
}
if trigger == nil {
return statusNotFound(id)
}
return statusOK(trigger)
}
func (service *WorkflowService) GetAllTriggers(params *piazza.HttpQueryParams) *piazza.JsonResponse {
format, err := piazza.NewJsonPagination(params, defaultTriggerPagination)
if err != nil {
return statusBadRequest(err)
}
m, count, err := service.triggerDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
bar := make([]interface{}, len(*m))
for i, e := range *m {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostTrigger(trigger *Trigger) *piazza.JsonResponse {
var err error
trigger.TriggerId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
trigger.CreatedOn = time.Now()
_, err = service.triggerDB.PostTrigger(trigger, trigger.TriggerId)
if err != nil {
return statusBadRequest(err)
}
return statusCreated(trigger)
}
func (service *WorkflowService) DeleteTrigger(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.triggerDB.DeleteTrigger(piazza.Ident(id))
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
//------------------------------------------
func (service *WorkflowService) GetAlert(id piazza.Ident) *piazza.JsonResponse {
alert, err := service.alertDB.GetOne(id)
if err != nil {
return statusNotFound(id)
}
if alert == nil {
return statusNotFound(id)
}
return statusOK(alert)
}
func (service *WorkflowService) GetAllAlerts(params *piazza.HttpQueryParams) *piazza.JsonResponse {
triggerId := params.Get("triggerId")
format, err := piazza.NewJsonPagination(params, defaultAlertPagination)
if err != nil {
return statusBadRequest(err)
}
var all *[]Alert
var count int64
if isUuid(triggerId) {
//log.Printf("Getting alerts with trigger %s", triggerId)
all, count, err = service.alertDB.GetAllByTrigger(format, triggerId)
if err != nil {
return statusBadRequest(err)
}
} else if triggerId == "" {
//log.Printf("Getting all alerts %#v", service)
all, count, err = service.alertDB.GetAll(format)
if err != nil {
return statusBadRequest(err)
}
} else { // Malformed triggerId
return statusBadRequest(errors.New("Malformed triggerId query parameter"))
}
//log.Printf("Making bar")
bar := make([]interface{}, len(*all))
//log.Printf("Adding values to bar")
for i, e := range *all {
bar[i] = e
}
format.Count = int(count)
resp := statusOK(bar)
resp.Pagination = format
return resp
}
func (service *WorkflowService) PostAlert(alert *Alert) *piazza.JsonResponse {
var err error
alert.AlertId, err = service.newIdent()
if err != nil {
return statusBadRequest(err)
}
alert.CreatedOn = time.Now()
_, err = service.alertDB.PostData(&alert, alert.AlertId)
if err != nil {
return statusInternalServerError(err)
}
return statusCreated(alert)
}
func (service *WorkflowService) DeleteAlert(id piazza.Ident) *piazza.JsonResponse {
ok, err := service.alertDB.DeleteByID(id)
if err != nil {
return statusBadRequest(err)
}
if !ok {
return statusNotFound(id)
}
return statusOK(nil)
}
|
// Copyright (c) 2016 Ivan A Kostko (github.com/ivan-kostko)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package CustomErrors
import (
"fmt"
)
//go:generate stringer -type=ErrorType
// Represents enum of predefined error types
type ErrorType int
const (
BasicError ErrorType = iota
InvalidOperation
InvalidArgument
AccessViolation
Nonsupported
)
// Represents custom error as tuple Type + Message.
type Error struct {
Type ErrorType
Message string
}
// Implementation of standart error interface
func (e Error) Error() string {
return fmt.Sprintf("%T{Type:%s, Message:%s}", e, e.Type, e.Message)
}
// Error factory
func NewError(typ ErrorType, msg string) *Error {
return &Error{
Type: typ,
Message: msg,
}
}
NewErrorF
// Copyright (c) 2016 Ivan A Kostko (github.com/ivan-kostko)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package CustomErrors
import (
"fmt"
)
//go:generate stringer -type=ErrorType
// Represents enum of predefined error types
type ErrorType int
const (
BasicError ErrorType = iota
InvalidOperation
InvalidArgument
AccessViolation
Nonsupported
)
// Represents custom error as tuple Type + Message.
type Error struct {
Type ErrorType
Message string
}
// Implementation of standart error interface
func (e Error) Error() string {
return fmt.Sprintf("%T{Type:%s, Message:%s}", e, e.Type, e.Message)
}
// Error factory
func NewError(typ ErrorType, msg string) *Error {
return &Error{
Type: typ,
Message: msg,
}
}
// Error factory generating message in fmt.Sprintf manner
func NewErrorF(typ ErrorType, baseMsg string, args ...interface{}) *Error {
msg := fmt.Sprintf(baseMsg, args...)
return NewError(typ, msg)
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen runs go generate on Unicode- and CLDR-related package in the text
// repositories, taking into account dependencies and versions.
package main
import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"unicode"
"golang.org/x/text/internal/gen"
)
var (
verbose = flag.Bool("v", false, "verbose output")
force = flag.Bool("force", false, "ignore failing dependencies")
excludeList = flag.String("exclude", "",
"comma-separated list of packages to exclude")
// The user can specify a selection of packages to build on the command line.
args []string
)
func exclude(pkg string) bool {
if len(args) > 0 {
return !contains(args, pkg)
}
return contains(strings.Split(*excludeList, ","), pkg)
}
// TODO:
// - Better version handling.
// - Generate tables for the core unicode package?
// - Add generation for encodings. This requires some retooling here and there.
// - Running repo-wide "long" tests.
var vprintf = fmt.Printf
func main() {
gen.Init()
args = flag.Args()
if !*verbose {
// Set vprintf to a no-op.
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
}
// TODO: create temporary cache directory to load files and create and set
// a "cache" option if the user did not specify the UNICODE_DIR environment
// variable. This will prevent duplicate downloads and also will enable long
// tests, which really need to be run after each generated package.
if gen.UnicodeVersion() != unicode.Version {
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
gen.UnicodeVersion,
unicode.Version)
// TODO: use collate to compare. Simple comparison will work, though,
// until Unicode reaches version 10. To avoid circular dependencies, we
// could use the NumericWeighter without using package collate using a
// trivial Weighter implementation.
if gen.UnicodeVersion() < unicode.Version && !*force {
os.Exit(2)
}
}
var (
cldr = generate("unicode/cldr")
language = generate("language", cldr)
internal = generate("internal", language)
norm = generate("unicode/norm")
rangetable = generate("unicode/rangetable")
cases = generate("cases", norm, language, rangetable)
width = generate("width")
bidi = generate("unicode/bidi", norm, rangetable)
_ = generate("secure/precis", norm, rangetable, cases, width, bidi)
_ = generate("encoding/htmlindex", language)
_ = generate("currency", cldr, language, internal)
_ = generate("internal/number", cldr, language, internal)
_ = generate("language/display", cldr, language)
_ = generate("collate", norm, cldr, language, rangetable)
_ = generate("search", norm, cldr, language, rangetable)
)
all.Wait()
if hasErrors {
fmt.Println("FAIL")
os.Exit(1)
}
vprintf("SUCCESS\n")
}
var (
all sync.WaitGroup
hasErrors bool
)
type dependency struct {
sync.WaitGroup
hasErrors bool
}
func generate(pkg string, deps ...*dependency) *dependency {
var wg dependency
if exclude(pkg) {
return &wg
}
wg.Add(1)
all.Add(1)
go func() {
defer wg.Done()
defer all.Done()
// Wait for dependencies to finish.
for _, d := range deps {
d.Wait()
if d.hasErrors && !*force {
fmt.Printf("--- ABORT: %s\n", pkg)
wg.hasErrors = true
return
}
}
vprintf("=== GENERATE %s\n", pkg)
args := []string{"generate"}
if *verbose {
args = append(args, "-v")
}
args = append(args, "./"+pkg)
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
w := &bytes.Buffer{}
cmd.Stderr = w
cmd.Stdout = w
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("=== TEST %s\n", pkg)
args[0] = "test"
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
wt := &bytes.Buffer{}
cmd.Stderr = wt
cmd.Stdout = wt
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
fmt.Print(wt.String())
}()
return &wg
}
func contains(a []string, s string) bool {
for _, e := range a {
if s == e {
return true
}
}
return false
}
func indent(b *bytes.Buffer) string {
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
}
gen.go: fixed bug in version printing
Change-Id: I25e3d7407a037494774810cf0e50741229affae3
Reviewed-on: https://go-review.googlesource.com/24497
Reviewed-by: Nigel Tao <58ae08b3356928ff9c67562b4719487d54fe8348@golang.org>
Run-TryBot: Marcel van Lohuizen <a22d082192f3ccf984644a4f482781f4c2d65180@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen runs go generate on Unicode- and CLDR-related package in the text
// repositories, taking into account dependencies and versions.
package main
import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"unicode"
"golang.org/x/text/internal/gen"
)
var (
verbose = flag.Bool("v", false, "verbose output")
force = flag.Bool("force", false, "ignore failing dependencies")
excludeList = flag.String("exclude", "",
"comma-separated list of packages to exclude")
// The user can specify a selection of packages to build on the command line.
args []string
)
func exclude(pkg string) bool {
if len(args) > 0 {
return !contains(args, pkg)
}
return contains(strings.Split(*excludeList, ","), pkg)
}
// TODO:
// - Better version handling.
// - Generate tables for the core unicode package?
// - Add generation for encodings. This requires some retooling here and there.
// - Running repo-wide "long" tests.
var vprintf = fmt.Printf
func main() {
gen.Init()
args = flag.Args()
if !*verbose {
// Set vprintf to a no-op.
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
}
// TODO: create temporary cache directory to load files and create and set
// a "cache" option if the user did not specify the UNICODE_DIR environment
// variable. This will prevent duplicate downloads and also will enable long
// tests, which really need to be run after each generated package.
if gen.UnicodeVersion() != unicode.Version {
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
gen.UnicodeVersion(),
unicode.Version)
// TODO: use collate to compare. Simple comparison will work, though,
// until Unicode reaches version 10. To avoid circular dependencies, we
// could use the NumericWeighter without using package collate using a
// trivial Weighter implementation.
if gen.UnicodeVersion() < unicode.Version && !*force {
os.Exit(2)
}
}
var (
cldr = generate("unicode/cldr")
language = generate("language", cldr)
internal = generate("internal", language)
norm = generate("unicode/norm")
rangetable = generate("unicode/rangetable")
cases = generate("cases", norm, language, rangetable)
width = generate("width")
bidi = generate("unicode/bidi", norm, rangetable)
_ = generate("secure/precis", norm, rangetable, cases, width, bidi)
_ = generate("encoding/htmlindex", language)
_ = generate("currency", cldr, language, internal)
_ = generate("internal/number", cldr, language, internal)
_ = generate("language/display", cldr, language)
_ = generate("collate", norm, cldr, language, rangetable)
_ = generate("search", norm, cldr, language, rangetable)
)
all.Wait()
if hasErrors {
fmt.Println("FAIL")
os.Exit(1)
}
vprintf("SUCCESS\n")
}
var (
all sync.WaitGroup
hasErrors bool
)
type dependency struct {
sync.WaitGroup
hasErrors bool
}
func generate(pkg string, deps ...*dependency) *dependency {
var wg dependency
if exclude(pkg) {
return &wg
}
wg.Add(1)
all.Add(1)
go func() {
defer wg.Done()
defer all.Done()
// Wait for dependencies to finish.
for _, d := range deps {
d.Wait()
if d.hasErrors && !*force {
fmt.Printf("--- ABORT: %s\n", pkg)
wg.hasErrors = true
return
}
}
vprintf("=== GENERATE %s\n", pkg)
args := []string{"generate"}
if *verbose {
args = append(args, "-v")
}
args = append(args, "./"+pkg)
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
w := &bytes.Buffer{}
cmd.Stderr = w
cmd.Stdout = w
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("=== TEST %s\n", pkg)
args[0] = "test"
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
wt := &bytes.Buffer{}
cmd.Stderr = wt
cmd.Stdout = wt
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
fmt.Print(wt.String())
}()
return &wg
}
func contains(a []string, s string) bool {
for _, e := range a {
if s == e {
return true
}
}
return false
}
func indent(b *bytes.Buffer) string {
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
}
|
package atlas
import (
"math"
"strings"
)
// longestCommonPrefix finds the longest common prefix for all the strings
// given as an argument, or returns the empty string if a prefix can't be
// found.
//
// This function just uses brute force instead of a more optimized algorithm.
func longestCommonPrefix(vs []string) string {
// Find the shortest string
var shortest string
length := math.MaxUint32
for _, v := range vs {
if len(v) < length {
shortest = v
length = len(v)
}
}
// Now go through and find a prefix to all the strings using this
// short string, which itself must contain the prefix.
for i := len(shortest); i > 0; i-- {
// We only care about prefixes with path seps
if shortest[i-1] != '/' {
continue
}
bad := false
prefix := shortest[0 : i]
for _, v := range vs {
if !strings.HasPrefix(v, prefix) {
bad = true
break
}
}
if !bad {
return prefix
}
}
return ""
}
atlas post-processor on 32bit uint32 overflows int
post-processor/atlas/util.go:16: constant 4294967295 overflows int
Signed-off-by: BlackEagle <6c5c99a37e4dd8841971a4b7c7580d6fcba68554@gmail.com>
package atlas
import (
"math"
"strings"
)
// longestCommonPrefix finds the longest common prefix for all the strings
// given as an argument, or returns the empty string if a prefix can't be
// found.
//
// This function just uses brute force instead of a more optimized algorithm.
func longestCommonPrefix(vs []string) string {
var length int64
// Find the shortest string
var shortest string
length = math.MaxUint32
for _, v := range vs {
if int64(len(v)) < length {
shortest = v
length = int64(len(v))
}
}
// Now go through and find a prefix to all the strings using this
// short string, which itself must contain the prefix.
for i := len(shortest); i > 0; i-- {
// We only care about prefixes with path seps
if shortest[i-1] != '/' {
continue
}
bad := false
prefix := shortest[0 : i]
for _, v := range vs {
if !strings.HasPrefix(v, prefix) {
bad = true
break
}
}
if !bad {
return prefix
}
}
return ""
}
|
package harness
import (
"fmt"
"github.com/revel/revel"
"go/build"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
)
var importErrorPattern = regexp.MustCompile("cannot find package \"([^\"]+)\"")
// Build the app:
// 1. Generate the the main.go file.
// 2. Run the appropriate "go build" command.
// Requires that revel.Init has been called previously.
// Returns the path to the built binary, and an error if there was a problem building it.
func Build() (app *App, compileError *revel.Error) {
// First, clear the generated files (to avoid them messing with ProcessSource).
cleanSource("tmp", "routes")
sourceInfo, compileError := ProcessSource(revel.CodePaths)
if compileError != nil {
return nil, compileError
}
// Add the db.import to the import paths.
if dbImportPath, found := revel.Config.String("db.import"); found {
sourceInfo.InitImportPaths = append(sourceInfo.InitImportPaths, dbImportPath)
}
// Generate two source files.
templateArgs := map[string]interface{}{
"Controllers": sourceInfo.ControllerSpecs(),
"ValidationKeys": sourceInfo.ValidationKeys,
"ImportPaths": calcImportAliases(sourceInfo),
"TestSuites": sourceInfo.TestSuites(),
}
genSource("tmp", "main.go", MAIN, templateArgs)
genSource("routes", "routes.go", ROUTES, templateArgs)
// Read build config.
buildTags := revel.Config.StringDefault("build.tags", "")
// Build the user program (all code under app).
// It relies on the user having "go" installed.
goPath, err := exec.LookPath("go")
if err != nil {
revel.ERROR.Fatalf("Go executable not found in PATH.")
}
pkg, err := build.Default.Import(revel.ImportPath, "", build.FindOnly)
if err != nil {
revel.ERROR.Fatalln("Failure importing", revel.ImportPath)
}
binName := path.Join(pkg.BinDir, path.Base(revel.BasePath))
if runtime.GOOS == "windows" {
binName += ".exe"
}
gotten := make(map[string]struct{})
for {
appVersion := getAppVersion()
versionLinkerFlags := fmt.Sprintf("-X %s/app.APP_VERSION \"%s\"", revel.ImportPath, appVersion)
buildCmd := exec.Command(goPath, "build",
"-ldflags", versionLinkerFlags,
"-tags", buildTags,
"-o", binName, path.Join(revel.ImportPath, "app", "tmp"))
revel.TRACE.Println("Exec:", buildCmd.Args)
output, err := buildCmd.CombinedOutput()
// If the build succeeded, we're done.
if err == nil {
return NewApp(binName), nil
}
revel.ERROR.Println(string(output))
// See if it was an import error that we can go get.
matches := importErrorPattern.FindStringSubmatch(string(output))
if matches == nil {
return nil, newCompileError(output)
}
// Ensure we haven't already tried to go get it.
pkgName := matches[1]
if _, alreadyTried := gotten[pkgName]; alreadyTried {
return nil, newCompileError(output)
}
gotten[pkgName] = struct{}{}
// Execute "go get <pkg>"
getCmd := exec.Command(goPath, "get", pkgName)
revel.TRACE.Println("Exec:", getCmd.Args)
getOutput, err := getCmd.CombinedOutput()
if err != nil {
revel.ERROR.Println(string(getOutput))
return nil, newCompileError(output)
}
// Success getting the import, attempt to build again.
}
revel.ERROR.Fatalf("Not reachable")
return nil, nil
}
// Try to define a version string for the compiled app
// The following is tried (first match returns):
// - Read a version explicitly specified in the APP_VERSION environment
// variable
// - Read the output of "git describe" if the source is in a git repository
// If no version can be determined, an empty string is returned.
func getAppVersion() string {
if version := os.Getenv("APP_VERSION"); version != "" {
return version
}
if gitPath, err := exec.LookPath("git"); err == nil {
gitCmd := exec.Command(gitPath, "describe", "--always", "--dirty")
revel.TRACE.Println("Exec:", gitCmd.Args)
output, err := gitCmd.Output()
if err != nil {
revel.WARN.Println("Cannot determine git repository version:", err)
return ""
}
return "git-" + strings.TrimSpace(string(output))
}
return ""
}
func cleanSource(dirs ...string) {
for _, dir := range dirs {
tmpPath := path.Join(revel.AppPath, dir)
err := os.RemoveAll(tmpPath)
if err != nil {
revel.ERROR.Println("Failed to remove dir:", err)
}
}
}
// genSource renders the given template to produce source code, which it writes
// to the given directory and file.
func genSource(dir, filename, templateSource string, args map[string]interface{}) {
sourceCode := revel.ExecuteTemplate(
template.Must(template.New("").Parse(templateSource)),
args)
// Create a fresh dir.
tmpPath := path.Join(revel.AppPath, dir)
err := os.RemoveAll(tmpPath)
if err != nil {
revel.ERROR.Println("Failed to remove dir:", err)
}
err = os.Mkdir(tmpPath, 0777)
if err != nil {
revel.ERROR.Fatalf("Failed to make tmp directory: %v", err)
}
// Create the file
file, err := os.Create(path.Join(tmpPath, filename))
defer file.Close()
if err != nil {
revel.ERROR.Fatalf("Failed to create file: %v", err)
}
_, err = file.WriteString(sourceCode)
if err != nil {
revel.ERROR.Fatalf("Failed to write to file: %v", err)
}
}
// Looks through all the method args and returns a set of unique import paths
// that cover all the method arg types.
// Additionally, assign package aliases when necessary to resolve ambiguity.
func calcImportAliases(src *SourceInfo) map[string]string {
aliases := make(map[string]string)
typeArrays := [][]*TypeInfo{src.ControllerSpecs(), src.TestSuites()}
for _, specs := range typeArrays {
for _, spec := range specs {
addAlias(aliases, spec.ImportPath, spec.PackageName)
for _, methSpec := range spec.MethodSpecs {
for _, methArg := range methSpec.Args {
if methArg.ImportPath == "" {
continue
}
addAlias(aliases, methArg.ImportPath, methArg.TypeExpr.PkgName)
}
}
}
}
// Add the "InitImportPaths", with alias "_"
for _, importPath := range src.InitImportPaths {
if _, ok := aliases[importPath]; !ok {
aliases[importPath] = "_"
}
}
return aliases
}
func addAlias(aliases map[string]string, importPath, pkgName string) {
alias, ok := aliases[importPath]
if ok {
return
}
alias = makePackageAlias(aliases, pkgName)
aliases[importPath] = alias
}
func makePackageAlias(aliases map[string]string, pkgName string) string {
i := 0
alias := pkgName
for containsValue(aliases, alias) {
alias = fmt.Sprintf("%s%d", pkgName, i)
i++
}
return alias
}
func containsValue(m map[string]string, val string) bool {
for _, v := range m {
if v == val {
return true
}
}
return false
}
// Parse the output of the "go build" command.
// Return a detailed Error.
func newCompileError(output []byte) *revel.Error {
errorMatch := regexp.MustCompile(`(?m)^([^:#]+):(\d+):(\d+:)? (.*)$`).
FindSubmatch(output)
if errorMatch == nil {
revel.ERROR.Println("Failed to parse build errors:\n", string(output))
return &revel.Error{
SourceType: "Go code",
Title: "Go Compilation Error",
Description: "See console for build error.",
}
}
// Read the source for the offending file.
var (
relFilename = string(errorMatch[1]) // e.g. "src/revel/sample/app/controllers/app.go"
absFilename, _ = filepath.Abs(relFilename)
line, _ = strconv.Atoi(string(errorMatch[2]))
description = string(errorMatch[4])
compileError = &revel.Error{
SourceType: "Go code",
Title: "Go Compilation Error",
Path: relFilename,
Description: description,
Line: line,
}
)
fileStr, err := revel.ReadLines(absFilename)
if err != nil {
compileError.MetaError = absFilename + ": " + err.Error()
revel.ERROR.Println(compileError.MetaError)
return compileError
}
compileError.SourceLines = fileStr
return compileError
}
const MAIN = `// GENERATED CODE - DO NOT EDIT
package main
import (
"flag"
"reflect"
"github.com/revel/revel"{{range $k, $v := $.ImportPaths}}
{{$v}} "{{$k}}"{{end}}
)
var (
runMode *string = flag.String("runMode", "", "Run mode.")
port *int = flag.Int("port", 0, "By default, read from app.conf")
importPath *string = flag.String("importPath", "", "Go Import Path for the app.")
srcPath *string = flag.String("srcPath", "", "Path to the source root.")
// So compiler won't complain if the generated code doesn't reference reflect package...
_ = reflect.Invalid
)
func main() {
flag.Parse()
revel.Init(*runMode, *importPath, *srcPath)
revel.INFO.Println("Running revel server")
{{range $i, $c := .Controllers}}
revel.RegisterController((*{{index $.ImportPaths .ImportPath}}.{{.StructName}})(nil),
[]*revel.MethodType{
{{range .MethodSpecs}}&revel.MethodType{
Name: "{{.Name}}",
Args: []*revel.MethodArg{ {{range .Args}}
&revel.MethodArg{Name: "{{.Name}}", Type: reflect.TypeOf((*{{index $.ImportPaths .ImportPath | .TypeExpr.TypeName}})(nil)) },{{end}}
},
RenderArgNames: map[int][]string{ {{range .RenderCalls}}
{{.Line}}: []string{ {{range .Names}}
"{{.}}",{{end}}
},{{end}}
},
},
{{end}}
})
{{end}}
revel.DefaultValidationKeys = map[string]map[int]string{ {{range $path, $lines := .ValidationKeys}}
"{{$path}}": { {{range $line, $key := $lines}}
{{$line}}: "{{$key}}",{{end}}
},{{end}}
}
revel.TestSuites = []interface{}{ {{range .TestSuites}}
(*{{index $.ImportPaths .ImportPath}}.{{.StructName}})(nil),{{end}}
}
revel.Run(*port)
}
`
const ROUTES = `// GENERATED CODE - DO NOT EDIT
package routes
import "github.com/revel/revel"
{{range $i, $c := .Controllers}}
type t{{.StructName}} struct {}
var {{.StructName}} t{{.StructName}}
{{range .MethodSpecs}}
func (_ t{{$c.StructName}}) {{.Name}}({{range .Args}}
{{.Name}} {{if .ImportPath}}interface{}{{else}}{{.TypeExpr.TypeName ""}}{{end}},{{end}}
) string {
args := make(map[string]string)
{{range .Args}}
revel.Unbind(args, "{{.Name}}", {{.Name}}){{end}}
return revel.MainRouter.Reverse("{{$c.StructName}}.{{.Name}}", args).Url
}
{{end}}
{{end}}
`
Tell git where to find the repository when using git to find app version.
package harness
import (
"fmt"
"github.com/revel/revel"
"go/build"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
)
var importErrorPattern = regexp.MustCompile("cannot find package \"([^\"]+)\"")
// Build the app:
// 1. Generate the the main.go file.
// 2. Run the appropriate "go build" command.
// Requires that revel.Init has been called previously.
// Returns the path to the built binary, and an error if there was a problem building it.
func Build() (app *App, compileError *revel.Error) {
// First, clear the generated files (to avoid them messing with ProcessSource).
cleanSource("tmp", "routes")
sourceInfo, compileError := ProcessSource(revel.CodePaths)
if compileError != nil {
return nil, compileError
}
// Add the db.import to the import paths.
if dbImportPath, found := revel.Config.String("db.import"); found {
sourceInfo.InitImportPaths = append(sourceInfo.InitImportPaths, dbImportPath)
}
// Generate two source files.
templateArgs := map[string]interface{}{
"Controllers": sourceInfo.ControllerSpecs(),
"ValidationKeys": sourceInfo.ValidationKeys,
"ImportPaths": calcImportAliases(sourceInfo),
"TestSuites": sourceInfo.TestSuites(),
}
genSource("tmp", "main.go", MAIN, templateArgs)
genSource("routes", "routes.go", ROUTES, templateArgs)
// Read build config.
buildTags := revel.Config.StringDefault("build.tags", "")
// Build the user program (all code under app).
// It relies on the user having "go" installed.
goPath, err := exec.LookPath("go")
if err != nil {
revel.ERROR.Fatalf("Go executable not found in PATH.")
}
pkg, err := build.Default.Import(revel.ImportPath, "", build.FindOnly)
if err != nil {
revel.ERROR.Fatalln("Failure importing", revel.ImportPath)
}
binName := path.Join(pkg.BinDir, path.Base(revel.BasePath))
if runtime.GOOS == "windows" {
binName += ".exe"
}
gotten := make(map[string]struct{})
for {
appVersion := getAppVersion()
versionLinkerFlags := fmt.Sprintf("-X %s/app.APP_VERSION \"%s\"", revel.ImportPath, appVersion)
buildCmd := exec.Command(goPath, "build",
"-ldflags", versionLinkerFlags,
"-tags", buildTags,
"-o", binName, path.Join(revel.ImportPath, "app", "tmp"))
revel.TRACE.Println("Exec:", buildCmd.Args)
output, err := buildCmd.CombinedOutput()
// If the build succeeded, we're done.
if err == nil {
return NewApp(binName), nil
}
revel.ERROR.Println(string(output))
// See if it was an import error that we can go get.
matches := importErrorPattern.FindStringSubmatch(string(output))
if matches == nil {
return nil, newCompileError(output)
}
// Ensure we haven't already tried to go get it.
pkgName := matches[1]
if _, alreadyTried := gotten[pkgName]; alreadyTried {
return nil, newCompileError(output)
}
gotten[pkgName] = struct{}{}
// Execute "go get <pkg>"
getCmd := exec.Command(goPath, "get", pkgName)
revel.TRACE.Println("Exec:", getCmd.Args)
getOutput, err := getCmd.CombinedOutput()
if err != nil {
revel.ERROR.Println(string(getOutput))
return nil, newCompileError(output)
}
// Success getting the import, attempt to build again.
}
revel.ERROR.Fatalf("Not reachable")
return nil, nil
}
// Try to define a version string for the compiled app
// The following is tried (first match returns):
// - Read a version explicitly specified in the APP_VERSION environment
// variable
// - Read the output of "git describe" if the source is in a git repository
// If no version can be determined, an empty string is returned.
func getAppVersion() string {
if version := os.Getenv("APP_VERSION"); version != "" {
return version
}
if gitPath, err := exec.LookPath("git"); err == nil {
var gitdir = "--git-dir=" + path.Join(revel.BasePath, ".git")
gitCmd := exec.Command(gitPath, gitdir, "describe", "--always", "--dirty")
revel.TRACE.Println("Exec:", gitCmd.Args)
output, err := gitCmd.Output()
if err != nil {
revel.WARN.Println("Cannot determine git repository version:", err)
return ""
}
return "git-" + strings.TrimSpace(string(output))
}
return ""
}
func cleanSource(dirs ...string) {
for _, dir := range dirs {
tmpPath := path.Join(revel.AppPath, dir)
err := os.RemoveAll(tmpPath)
if err != nil {
revel.ERROR.Println("Failed to remove dir:", err)
}
}
}
// genSource renders the given template to produce source code, which it writes
// to the given directory and file.
func genSource(dir, filename, templateSource string, args map[string]interface{}) {
sourceCode := revel.ExecuteTemplate(
template.Must(template.New("").Parse(templateSource)),
args)
// Create a fresh dir.
tmpPath := path.Join(revel.AppPath, dir)
err := os.RemoveAll(tmpPath)
if err != nil {
revel.ERROR.Println("Failed to remove dir:", err)
}
err = os.Mkdir(tmpPath, 0777)
if err != nil {
revel.ERROR.Fatalf("Failed to make tmp directory: %v", err)
}
// Create the file
file, err := os.Create(path.Join(tmpPath, filename))
defer file.Close()
if err != nil {
revel.ERROR.Fatalf("Failed to create file: %v", err)
}
_, err = file.WriteString(sourceCode)
if err != nil {
revel.ERROR.Fatalf("Failed to write to file: %v", err)
}
}
// Looks through all the method args and returns a set of unique import paths
// that cover all the method arg types.
// Additionally, assign package aliases when necessary to resolve ambiguity.
func calcImportAliases(src *SourceInfo) map[string]string {
aliases := make(map[string]string)
typeArrays := [][]*TypeInfo{src.ControllerSpecs(), src.TestSuites()}
for _, specs := range typeArrays {
for _, spec := range specs {
addAlias(aliases, spec.ImportPath, spec.PackageName)
for _, methSpec := range spec.MethodSpecs {
for _, methArg := range methSpec.Args {
if methArg.ImportPath == "" {
continue
}
addAlias(aliases, methArg.ImportPath, methArg.TypeExpr.PkgName)
}
}
}
}
// Add the "InitImportPaths", with alias "_"
for _, importPath := range src.InitImportPaths {
if _, ok := aliases[importPath]; !ok {
aliases[importPath] = "_"
}
}
return aliases
}
func addAlias(aliases map[string]string, importPath, pkgName string) {
alias, ok := aliases[importPath]
if ok {
return
}
alias = makePackageAlias(aliases, pkgName)
aliases[importPath] = alias
}
func makePackageAlias(aliases map[string]string, pkgName string) string {
i := 0
alias := pkgName
for containsValue(aliases, alias) {
alias = fmt.Sprintf("%s%d", pkgName, i)
i++
}
return alias
}
func containsValue(m map[string]string, val string) bool {
for _, v := range m {
if v == val {
return true
}
}
return false
}
// Parse the output of the "go build" command.
// Return a detailed Error.
func newCompileError(output []byte) *revel.Error {
errorMatch := regexp.MustCompile(`(?m)^([^:#]+):(\d+):(\d+:)? (.*)$`).
FindSubmatch(output)
if errorMatch == nil {
revel.ERROR.Println("Failed to parse build errors:\n", string(output))
return &revel.Error{
SourceType: "Go code",
Title: "Go Compilation Error",
Description: "See console for build error.",
}
}
// Read the source for the offending file.
var (
relFilename = string(errorMatch[1]) // e.g. "src/revel/sample/app/controllers/app.go"
absFilename, _ = filepath.Abs(relFilename)
line, _ = strconv.Atoi(string(errorMatch[2]))
description = string(errorMatch[4])
compileError = &revel.Error{
SourceType: "Go code",
Title: "Go Compilation Error",
Path: relFilename,
Description: description,
Line: line,
}
)
fileStr, err := revel.ReadLines(absFilename)
if err != nil {
compileError.MetaError = absFilename + ": " + err.Error()
revel.ERROR.Println(compileError.MetaError)
return compileError
}
compileError.SourceLines = fileStr
return compileError
}
const MAIN = `// GENERATED CODE - DO NOT EDIT
package main
import (
"flag"
"reflect"
"github.com/revel/revel"{{range $k, $v := $.ImportPaths}}
{{$v}} "{{$k}}"{{end}}
)
var (
runMode *string = flag.String("runMode", "", "Run mode.")
port *int = flag.Int("port", 0, "By default, read from app.conf")
importPath *string = flag.String("importPath", "", "Go Import Path for the app.")
srcPath *string = flag.String("srcPath", "", "Path to the source root.")
// So compiler won't complain if the generated code doesn't reference reflect package...
_ = reflect.Invalid
)
func main() {
flag.Parse()
revel.Init(*runMode, *importPath, *srcPath)
revel.INFO.Println("Running revel server")
{{range $i, $c := .Controllers}}
revel.RegisterController((*{{index $.ImportPaths .ImportPath}}.{{.StructName}})(nil),
[]*revel.MethodType{
{{range .MethodSpecs}}&revel.MethodType{
Name: "{{.Name}}",
Args: []*revel.MethodArg{ {{range .Args}}
&revel.MethodArg{Name: "{{.Name}}", Type: reflect.TypeOf((*{{index $.ImportPaths .ImportPath | .TypeExpr.TypeName}})(nil)) },{{end}}
},
RenderArgNames: map[int][]string{ {{range .RenderCalls}}
{{.Line}}: []string{ {{range .Names}}
"{{.}}",{{end}}
},{{end}}
},
},
{{end}}
})
{{end}}
revel.DefaultValidationKeys = map[string]map[int]string{ {{range $path, $lines := .ValidationKeys}}
"{{$path}}": { {{range $line, $key := $lines}}
{{$line}}: "{{$key}}",{{end}}
},{{end}}
}
revel.TestSuites = []interface{}{ {{range .TestSuites}}
(*{{index $.ImportPaths .ImportPath}}.{{.StructName}})(nil),{{end}}
}
revel.Run(*port)
}
`
const ROUTES = `// GENERATED CODE - DO NOT EDIT
package routes
import "github.com/revel/revel"
{{range $i, $c := .Controllers}}
type t{{.StructName}} struct {}
var {{.StructName}} t{{.StructName}}
{{range .MethodSpecs}}
func (_ t{{$c.StructName}}) {{.Name}}({{range .Args}}
{{.Name}} {{if .ImportPath}}interface{}{{else}}{{.TypeExpr.TypeName ""}}{{end}},{{end}}
) string {
args := make(map[string]string)
{{range .Args}}
revel.Unbind(args, "{{.Name}}", {{.Name}}){{end}}
return revel.MainRouter.Reverse("{{$c.StructName}}.{{.Name}}", args).Url
}
{{end}}
{{end}}
`
|
package poker
import (
"reflect"
"testing"
)
// Define a function BestHand([]string) ([]string, error).
//
// Also define a testVersion with a value that matches
// the targetTestVersion here.
const targetTestVersion = 4
var validTestCases = []struct {
name string
hands []string
best []string
}{
{
name: "single hand is always best",
hands: []string{"3♡ 10♢ 7♧ 8♤ A♢"},
best: []string{"3♡ 10♢ 7♧ 8♤ A♢"},
},
{
name: "highest card",
hands: []string{"3♢ 2♢ 5♤ 6♤ 9♡", "3♡ 2♡ 5♧ 6♢ 10♡"},
best: []string{"3♡ 2♡ 5♧ 6♢ 10♡"},
},
{
name: "highest card with mostly same cards",
hands: []string{"4♢ 2♢ 5♤ 6♤ 9♡", "4♡ 3♤ 5♧ 6♢ 9♢"},
best: []string{"4♡ 3♤ 5♧ 6♢ 9♢"},
},
{
name: "pair beats lower",
hands: []string{"4♢ 3♤ 4♤ J♤ K♤", "A♡ K♡ J♢ 10♧ 9♡"},
best: []string{"4♢ 3♤ 4♤ J♤ K♤"},
},
{
name: "best pair",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "3♢ 3♡ 5♤ 6♤ 9♡"},
best: []string{"4♡ 2♡ 5♧ 4♢ 10♡"},
},
{
name: "best pair with same pair and highest cards",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♤ 4♧ 5♡ 10♢ 3♡"},
best: []string{"4♤ 4♧ 5♡ 10♢ 3♡"},
},
{
name: "two pair beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"2♢ 8♡ 5♢ 2♡ 8♧"},
},
{
name: "best two pair",
hands: []string{
"4♢ J♧ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♢ J♧ 4♤ J♤ K♤"},
},
{
name: "best two pair with equal highest pair",
hands: []string{
"4♢ J♧ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ J♡ 5♢ 3♡ J♢",
},
best: []string{"4♢ J♧ 4♤ J♤ K♤"},
},
{
name: "best two pair with equal pairs",
hands: []string{
"4♢ J♧ 4♤ J♤ 2♤",
"A♡ K♡ J♢ 10♧ 9♡",
"4♧ J♡ 5♢ 4♡ J♢",
},
best: []string{"4♧ J♡ 5♢ 4♡ J♢"},
},
{
name: "three of a kind beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"3♢ 8♡ 3♡ 3♧ 9♧"},
},
{
name: "best three of a kind",
hands: []string{
"4♢ 3♤ 4♤ J♤ 4♡",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♢ 3♤ 4♤ J♤ 4♡"},
},
{
name: "straight beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"Q♡ K♡ J♢ 10♧ 9♡"},
},
{
name: "straight includes ace as one",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"2♤ 3♡ A♤ 5♤ 4♤",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"2♤ 3♡ A♤ 5♤ 4♤"},
},
{
name: "best straight",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"A♢ K♧ 10♢ J♢ Q♢",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"A♢ K♧ 10♢ J♢ Q♢"},
},
{
name: "flush beats lower",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♤ 3♤ 8♤ J♤ K♤"},
},
{
name: "best flush",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♢ A♢ 4♢ 7♢",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"3♢ 8♢ A♢ 4♢ 7♢"},
},
{
name: "full house beats lower",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ A♡ 3♡ 3♧ A♧",
},
best: []string{"2♢ 8♡ 8♢ 2♡ 8♧"},
},
{
name: "best full house",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"5♡ 5♢ A♧ 5♧ A♢",
"3♢ A♡ 3♡ 3♧ A♧",
},
best: []string{"2♢ 8♡ 8♢ 2♡ 8♧"},
},
{
name: "four of a kind beats lower",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
},
best: []string{"3♢ 3♡ 3♤ 3♧ A♧"},
},
{
name: "best four of a kind",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 2♧ 8♢ 2♡ 2♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
},
best: []string{"3♢ 3♡ 3♤ 3♧ A♧"},
},
{
name: "straight flush beats lower",
hands: []string{
"4♤ 5♢ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ 8♡ 10♡ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
"2♤ 3♤ A♤ 5♤ 4♤",
},
best: []string{"2♤ 3♤ A♤ 5♤ 4♤"},
},
{
name: "best straight flush is royal flush",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♡ 10♡ 9♡",
"Q♢ K♢ J♢ 10♢ A♢",
},
best: []string{"Q♢ K♢ J♢ 10♢ A♢"},
},
{
name: "tie for best pair",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♧ 10♢ 5♤ 2♤ 4♤"},
best: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♧ 10♢ 5♤ 2♤ 4♤"},
},
{
name: "tie of three",
hands: []string{
"A♡ 2♡ 3♡ 4♡ 5♡",
"A♤ 2♤ 3♤ 4♤ 5♤",
"5♧ 4♧ 3♧ 2♧ A♧",
"A♢ 2♢ 6♢ 4♢ 5♢",
},
best: []string{
"A♡ 2♡ 3♡ 4♡ 5♡",
"A♤ 2♤ 3♤ 4♤ 5♤",
"5♧ 4♧ 3♧ 2♧ A♧",
},
},
}
var invalidTestCases = []struct {
name string
hand string
}{
{
name: "1 is an invalid card rank",
hand: "1♢ 2♡ 3♡ 4♡ 5♡",
},
{
name: "11 is an invalid card rank",
hand: "11♢ 2♡ 3♡ 4♡ 5♡",
},
{
name: "too few cards",
hand: "2♡ 3♡ 4♡ 5♡",
},
{
name: "too many cards",
hand: "2♡ 3♡ 4♡ 5♡ 6♡ 7♡",
},
{
name: "lack of rank",
hand: "11♢ 2♡ ♡ 4♡ 5♡",
},
{
name: "lack of suit",
hand: "2♡ 3♡ 4 5♡ 7♡",
},
{
name: "H is an invalid suit",
hand: "2♡ 3♡ 4H 5♡ 7♡",
},
{
name: "♥ is an invalid suit",
hand: "2♡ 3♡ 4♥ 5♡ 7♡",
},
{
name: "lack of spacing",
hand: "2♡ 3♡ 5♡7♡ 8♡",
},
{
name: "double suits after rank",
hand: "2♡ 3♡ 5♡♡ 8♡ 9♡",
},
}
func TestBestHandValid(t *testing.T) {
for _, tt := range validTestCases {
actual, err := BestHand(tt.hands)
if err != nil {
var _ error = err
t.Fatalf("Got unexpected error in valid case %q: %v", tt.name, err)
}
if !reflect.DeepEqual(actual, tt.best) {
t.Fatalf("Mismatch in result of valid case %q: got %#v, want %#v",
tt.name, actual, tt.best)
}
}
}
func TestBestHandInvalid(t *testing.T) {
for _, tt := range invalidTestCases {
_, err := BestHand([]string{tt.hand})
if err == nil {
t.Fatalf("Did not get an error for invalid case %q", tt.name)
}
}
}
func TestTestVersion(t *testing.T) {
if testVersion != targetTestVersion {
t.Fatalf("Found testVersion = %v, want %v", testVersion, targetTestVersion)
}
}
func BenchmarkBestHand(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tt := range validTestCases {
BestHand(tt.hands)
}
}
}
poker: Ensure test versioning consistency with other exercises (#569)
* Floated testtestversion function to start of tests
See #470
package poker
import (
"reflect"
"testing"
)
// Define a function BestHand([]string) ([]string, error).
//
// Also define a testVersion with a value that matches
// the targetTestVersion here.
const targetTestVersion = 4
var validTestCases = []struct {
name string
hands []string
best []string
}{
{
name: "single hand is always best",
hands: []string{"3♡ 10♢ 7♧ 8♤ A♢"},
best: []string{"3♡ 10♢ 7♧ 8♤ A♢"},
},
{
name: "highest card",
hands: []string{"3♢ 2♢ 5♤ 6♤ 9♡", "3♡ 2♡ 5♧ 6♢ 10♡"},
best: []string{"3♡ 2♡ 5♧ 6♢ 10♡"},
},
{
name: "highest card with mostly same cards",
hands: []string{"4♢ 2♢ 5♤ 6♤ 9♡", "4♡ 3♤ 5♧ 6♢ 9♢"},
best: []string{"4♡ 3♤ 5♧ 6♢ 9♢"},
},
{
name: "pair beats lower",
hands: []string{"4♢ 3♤ 4♤ J♤ K♤", "A♡ K♡ J♢ 10♧ 9♡"},
best: []string{"4♢ 3♤ 4♤ J♤ K♤"},
},
{
name: "best pair",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "3♢ 3♡ 5♤ 6♤ 9♡"},
best: []string{"4♡ 2♡ 5♧ 4♢ 10♡"},
},
{
name: "best pair with same pair and highest cards",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♤ 4♧ 5♡ 10♢ 3♡"},
best: []string{"4♤ 4♧ 5♡ 10♢ 3♡"},
},
{
name: "two pair beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"2♢ 8♡ 5♢ 2♡ 8♧"},
},
{
name: "best two pair",
hands: []string{
"4♢ J♧ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♢ J♧ 4♤ J♤ K♤"},
},
{
name: "best two pair with equal highest pair",
hands: []string{
"4♢ J♧ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ J♡ 5♢ 3♡ J♢",
},
best: []string{"4♢ J♧ 4♤ J♤ K♤"},
},
{
name: "best two pair with equal pairs",
hands: []string{
"4♢ J♧ 4♤ J♤ 2♤",
"A♡ K♡ J♢ 10♧ 9♡",
"4♧ J♡ 5♢ 4♡ J♢",
},
best: []string{"4♧ J♡ 5♢ 4♡ J♢"},
},
{
name: "three of a kind beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"3♢ 8♡ 3♡ 3♧ 9♧"},
},
{
name: "best three of a kind",
hands: []string{
"4♢ 3♤ 4♤ J♤ 4♡",
"A♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♢ 3♤ 4♤ J♤ 4♡"},
},
{
name: "straight beats lower",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"Q♡ K♡ J♢ 10♧ 9♡"},
},
{
name: "straight includes ace as one",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"2♤ 3♡ A♤ 5♤ 4♤",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"2♤ 3♡ A♤ 5♤ 4♤"},
},
{
name: "best straight",
hands: []string{
"4♢ 3♤ 4♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"A♢ K♧ 10♢ J♢ Q♢",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"A♢ K♧ 10♢ J♢ Q♢"},
},
{
name: "flush beats lower",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♡ 3♡ 3♧ 9♧",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"4♤ 3♤ 8♤ J♤ K♤"},
},
{
name: "best flush",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 8♢ A♢ 4♢ 7♢",
"2♢ 8♡ 5♢ 2♡ 8♧",
},
best: []string{"3♢ 8♢ A♢ 4♢ 7♢"},
},
{
name: "full house beats lower",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ A♡ 3♡ 3♧ A♧",
},
best: []string{"2♢ 8♡ 8♢ 2♡ 8♧"},
},
{
name: "best full house",
hands: []string{
"4♤ 3♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"5♡ 5♢ A♧ 5♧ A♢",
"3♢ A♡ 3♡ 3♧ A♧",
},
best: []string{"2♢ 8♡ 8♢ 2♡ 8♧"},
},
{
name: "four of a kind beats lower",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
},
best: []string{"3♢ 3♡ 3♤ 3♧ A♧"},
},
{
name: "best four of a kind",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 2♧ 8♢ 2♡ 2♤",
"Q♡ K♡ J♢ 10♧ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
},
best: []string{"3♢ 3♡ 3♤ 3♧ A♧"},
},
{
name: "straight flush beats lower",
hands: []string{
"4♤ 5♢ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ 8♡ 10♡ 9♡",
"3♢ 3♡ 3♤ 3♧ A♧",
"2♤ 3♤ A♤ 5♤ 4♤",
},
best: []string{"2♤ 3♤ A♤ 5♤ 4♤"},
},
{
name: "best straight flush is royal flush",
hands: []string{
"4♤ 5♤ 8♤ J♤ K♤",
"2♢ 8♡ 8♢ 2♡ 8♧",
"Q♡ K♡ J♡ 10♡ 9♡",
"Q♢ K♢ J♢ 10♢ A♢",
},
best: []string{"Q♢ K♢ J♢ 10♢ A♢"},
},
{
name: "tie for best pair",
hands: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♧ 10♢ 5♤ 2♤ 4♤"},
best: []string{"4♡ 2♡ 5♧ 4♢ 10♡", "4♧ 10♢ 5♤ 2♤ 4♤"},
},
{
name: "tie of three",
hands: []string{
"A♡ 2♡ 3♡ 4♡ 5♡",
"A♤ 2♤ 3♤ 4♤ 5♤",
"5♧ 4♧ 3♧ 2♧ A♧",
"A♢ 2♢ 6♢ 4♢ 5♢",
},
best: []string{
"A♡ 2♡ 3♡ 4♡ 5♡",
"A♤ 2♤ 3♤ 4♤ 5♤",
"5♧ 4♧ 3♧ 2♧ A♧",
},
},
}
var invalidTestCases = []struct {
name string
hand string
}{
{
name: "1 is an invalid card rank",
hand: "1♢ 2♡ 3♡ 4♡ 5♡",
},
{
name: "11 is an invalid card rank",
hand: "11♢ 2♡ 3♡ 4♡ 5♡",
},
{
name: "too few cards",
hand: "2♡ 3♡ 4♡ 5♡",
},
{
name: "too many cards",
hand: "2♡ 3♡ 4♡ 5♡ 6♡ 7♡",
},
{
name: "lack of rank",
hand: "11♢ 2♡ ♡ 4♡ 5♡",
},
{
name: "lack of suit",
hand: "2♡ 3♡ 4 5♡ 7♡",
},
{
name: "H is an invalid suit",
hand: "2♡ 3♡ 4H 5♡ 7♡",
},
{
name: "♥ is an invalid suit",
hand: "2♡ 3♡ 4♥ 5♡ 7♡",
},
{
name: "lack of spacing",
hand: "2♡ 3♡ 5♡7♡ 8♡",
},
{
name: "double suits after rank",
hand: "2♡ 3♡ 5♡♡ 8♡ 9♡",
},
}
func TestTestVersion(t *testing.T) {
if testVersion != targetTestVersion {
t.Fatalf("Found testVersion = %v, want %v", testVersion, targetTestVersion)
}
}
func TestBestHandValid(t *testing.T) {
for _, tt := range validTestCases {
actual, err := BestHand(tt.hands)
if err != nil {
var _ error = err
t.Fatalf("Got unexpected error in valid case %q: %v", tt.name, err)
}
if !reflect.DeepEqual(actual, tt.best) {
t.Fatalf("Mismatch in result of valid case %q: got %#v, want %#v",
tt.name, actual, tt.best)
}
}
}
func TestBestHandInvalid(t *testing.T) {
for _, tt := range invalidTestCases {
_, err := BestHand([]string{tt.hand})
if err == nil {
t.Fatalf("Did not get an error for invalid case %q", tt.name)
}
}
}
func BenchmarkBestHand(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tt := range validTestCases {
BestHand(tt.hands)
}
}
}
|
package search
import (
"encoding/json"
"encoding/hex"
"github.com/jinzhu/gorm"
dbModule "github.com/notegio/openrelay/db"
"github.com/notegio/openrelay/types"
"github.com/notegio/openrelay/blockhash"
"github.com/notegio/openrelay/common"
"net/http"
urlModule "net/url"
"fmt"
"strings"
"strconv"
"regexp"
)
func FormatResponse(orders []dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := []byte{}
for _, order := range orders {
orderBytes := order.Bytes()
result = append(result, orderBytes[:]...)
}
return result, "application/octet-stream", nil
} else {
orderList := []types.Order{}
for _, order := range orders {
orderList = append(orderList, order.Order)
}
result, err := json.Marshal(orderList)
return result, "application/json", err
}
}
func FormatSingleResponse(order *dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := order.Bytes()
return result[:], "application/octet-stream", nil
}
result, err := json.Marshal(order)
return result, "application/json", err
}
func applyFilter(query *gorm.DB, queryField, dbField string, queryObject urlModule.Values) (*gorm.DB, error) {
if address := queryObject.Get(queryField); address != "" {
addressBytes, err := common.HexToBytes(address)
if err != nil {
return query, err
}
whereClause := fmt.Sprintf("%v = ?", dbField)
filteredQuery := query.Where(whereClause, common.BytesToOrAddress(addressBytes))
return filteredQuery, filteredQuery.Error
}
return query, nil
}
func applyOrFilter(query *gorm.DB, queryField, dbField1, dbField2 string, queryObject urlModule.Values) (*gorm.DB, error) {
if address := queryObject.Get(queryField); address != "" {
addressBytes, err := common.HexToBytes(address)
if err != nil {
return query, err
}
whereClause := fmt.Sprintf("%v = ? or %v = ?", dbField1, dbField2)
filteredQuery := query.Where(whereClause, common.BytesToOrAddress(addressBytes), common.BytesToOrAddress(addressBytes))
return filteredQuery, filteredQuery.Error
}
return query, nil
}
func returnError(w http.ResponseWriter, err error, code int) {
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
func getPages(queryObject urlModule.Values) (int, int, error){
pageStr := queryObject.Get("page")
if pageStr == "" {
pageStr = "1"
}
perPageStr := queryObject.Get("per_page")
if perPageStr == "" {
perPageStr = "20"
}
pageInt, err := strconv.Atoi(pageStr)
if err != nil {
return 0, 0, err
}
perPageInt, err := strconv.Atoi(perPageStr)
if err != nil {
return 0, 0, err
}
return pageInt, perPageInt, nil
}
func BlockHashDecorator(blockHash blockhash.BlockHash, fn func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
blockHash.Get() // Start the go routines, if necessary
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
hash := queryObject.Get("blockhash")
if hash == "" {
queryObject.Set("blockhash", blockHash.Get())
url := *r.URL
url.RawQuery = queryObject.Encode()
http.Redirect(w, r, (&url).RequestURI(), 307)
return
}
fn(w, r)
}
}
func SearchHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
query := db.Model(&dbModule.Order{})
query, err := applyFilter(query, "exchangeContractAddress", "exchange_address", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "makerTokenAddress", "maker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "takerTokenAddress", "taker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "maker", "maker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "taker", "taker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "feeRecipient", "fee_recipient", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyOrFilter(query, "tokenAddress", "maker_token", "taker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyOrFilter(query, "trader", "maker", "taker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
pageInt, perPageInt, err := getPages(queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query = query.Offset((pageInt - 1) * perPageInt).Limit(perPageInt)
if query.Error != nil {
returnError(w, query.Error, 400)
return
}
if queryObject.Get("makerTokenAddress") != "" && queryObject.Get("takerTokenAddress") != "" {
query := query.Order("price asc, fee_rate asc")
if query.Error != nil {
returnError(w, query.Error, 400)
return
}
}
orders := []dbModule.Order{}
if err := query.Find(&orders).Error; err != nil {
returnError(w, err, 500)
return
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatResponse(orders, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
returnError(w, err, 500)
}
}
}
func OrderHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
orderRegex := regexp.MustCompile(".*/order/0x([0-9a-fA-F]+)")
hashHex := orderRegex.FindStringSubmatch(r.URL.Path)[1]
hashBytes, err := hex.DecodeString(hashHex)
if err != nil {
returnError(w, err, 400)
return
}
order := &dbModule.Order{}
query := db.Model(&dbModule.Order{}).Where("order_hash = ?", hashBytes).First(order)
if query.Error != nil {
returnError(w, query.Error, 500)
return
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatSingleResponse(order, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
returnError(w, err, 500)
}
}
}
func PairHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
tokenAString := queryObject.Get("tokenA")
tokenBString := queryObject.Get("tokenB")
if tokenAString == "" && tokenBString != "" {
tokenAString, tokenBString = tokenBString, ""
}
pageInt, perPageInt, err := getPages(queryObject)
offset := (pageInt - 1) * perPageInt
if err != nil {
returnError(w, err, 400)
return
}
var pairs []dbModule.Pair
if tokenAString == "" {
pairs, err = dbModule.GetAllTokenPairs(db, offset, perPageInt)
if err != nil {
returnError(w, err, 400)
return
}
} else {
tokenABytes, err := common.HexToBytes(tokenAString)
if err != nil {
returnError(w, err, 400)
return
}
tokenAAddress := common.BytesToOrAddress(tokenABytes)
if tokenBString == "" {
pairs, err = dbModule.GetTokenAPairs(db, tokenAAddress, offset, perPageInt)
} else {
tokenBBytes, err := common.HexToBytes(tokenBString)
if err != nil {
returnError(w, err, 400)
return
}
tokenBAddress := common.BytesToOrAddress(tokenBBytes)
pairs, err = dbModule.GetTokenABPairs(db, tokenAAddress, tokenBAddress)
}
if err != nil {
returnError(w, err, 400)
return
}
}
response, err := json.Marshal(pairs)
if err != nil {
returnError(w, err, 500)
return
}
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
}
Improve error handling
package search
import (
"encoding/json"
"encoding/hex"
"github.com/jinzhu/gorm"
dbModule "github.com/notegio/openrelay/db"
"github.com/notegio/openrelay/types"
"github.com/notegio/openrelay/blockhash"
"github.com/notegio/openrelay/common"
"net/http"
urlModule "net/url"
"fmt"
"strings"
"strconv"
"regexp"
"errors"
)
func FormatResponse(orders []dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := []byte{}
for _, order := range orders {
orderBytes := order.Bytes()
result = append(result, orderBytes[:]...)
}
return result, "application/octet-stream", nil
} else {
orderList := []types.Order{}
for _, order := range orders {
orderList = append(orderList, order.Order)
}
result, err := json.Marshal(orderList)
return result, "application/json", err
}
}
func FormatSingleResponse(order *dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := order.Bytes()
return result[:], "application/octet-stream", nil
}
result, err := json.Marshal(order)
return result, "application/json", err
}
func applyFilter(query *gorm.DB, queryField, dbField string, queryObject urlModule.Values) (*gorm.DB, error) {
if address := queryObject.Get(queryField); address != "" {
addressBytes, err := common.HexToBytes(address)
if err != nil {
return query, err
}
whereClause := fmt.Sprintf("%v = ?", dbField)
filteredQuery := query.Where(whereClause, common.BytesToOrAddress(addressBytes))
return filteredQuery, filteredQuery.Error
}
return query, nil
}
func applyOrFilter(query *gorm.DB, queryField, dbField1, dbField2 string, queryObject urlModule.Values) (*gorm.DB, error) {
if address := queryObject.Get(queryField); address != "" {
addressBytes, err := common.HexToBytes(address)
if err != nil {
return query, err
}
whereClause := fmt.Sprintf("%v = ? or %v = ?", dbField1, dbField2)
filteredQuery := query.Where(whereClause, common.BytesToOrAddress(addressBytes), common.BytesToOrAddress(addressBytes))
return filteredQuery, filteredQuery.Error
}
return query, nil
}
func returnError(w http.ResponseWriter, err error, code int) {
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
func getPages(queryObject urlModule.Values) (int, int, error){
pageStr := queryObject.Get("page")
if pageStr == "" {
pageStr = "1"
}
perPageStr := queryObject.Get("per_page")
if perPageStr == "" {
perPageStr = "20"
}
pageInt, err := strconv.Atoi(pageStr)
if err != nil {
return 0, 0, err
}
perPageInt, err := strconv.Atoi(perPageStr)
if err != nil {
return 0, 0, err
}
return pageInt, perPageInt, nil
}
func BlockHashDecorator(blockHash blockhash.BlockHash, fn func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
blockHash.Get() // Start the go routines, if necessary
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
hash := queryObject.Get("blockhash")
if hash == "" {
queryObject.Set("blockhash", strings.Trim(blockHash.Get(), "\""))
url := *r.URL
url.RawQuery = queryObject.Encode()
http.Redirect(w, r, (&url).RequestURI(), 307)
return
}
fn(w, r)
}
}
func SearchHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
query := db.Model(&dbModule.Order{})
query, err := applyFilter(query, "exchangeContractAddress", "exchange_address", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "makerTokenAddress", "maker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "takerTokenAddress", "taker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "maker", "maker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "taker", "taker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyFilter(query, "feeRecipient", "fee_recipient", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyOrFilter(query, "tokenAddress", "maker_token", "taker_token", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query, err = applyOrFilter(query, "trader", "maker", "taker", queryObject)
if err != nil {
returnError(w, err, 400)
return
}
pageInt, perPageInt, err := getPages(queryObject)
if err != nil {
returnError(w, err, 400)
return
}
query = query.Offset((pageInt - 1) * perPageInt).Limit(perPageInt)
if query.Error != nil {
returnError(w, query.Error, 400)
return
}
if queryObject.Get("makerTokenAddress") != "" && queryObject.Get("takerTokenAddress") != "" {
query := query.Order("price asc, fee_rate asc")
if query.Error != nil {
returnError(w, query.Error, 400)
return
}
}
orders := []dbModule.Order{}
if err := query.Find(&orders).Error; err != nil {
returnError(w, err, 500)
return
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatResponse(orders, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
returnError(w, err, 500)
}
}
}
func OrderHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
orderRegex := regexp.MustCompile(".*/order/0x([0-9a-fA-F]+)")
pathMatch := orderRegex.FindStringSubmatch(r.URL.Path)
if len(pathMatch) == 0 {
returnError(w, errors.New("Malformed order hash"), 404)
return
}
hashHex := pathMatch[1]
hashBytes, err := hex.DecodeString(hashHex)
if err != nil {
returnError(w, err, 400)
return
}
order := &dbModule.Order{}
query := db.Model(&dbModule.Order{}).Where("order_hash = ?", hashBytes).First(order)
if query.Error != nil {
if query.Error.Error() == "record not found" {
returnError(w, query.Error, 404)
} else {
returnError(w, query.Error, 500)
}
return
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatSingleResponse(order, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
returnError(w, err, 500)
}
}
}
func PairHandler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
queryObject := r.URL.Query()
tokenAString := queryObject.Get("tokenA")
tokenBString := queryObject.Get("tokenB")
if tokenAString == "" && tokenBString != "" {
tokenAString, tokenBString = tokenBString, ""
}
pageInt, perPageInt, err := getPages(queryObject)
offset := (pageInt - 1) * perPageInt
if err != nil {
returnError(w, err, 400)
return
}
var pairs []dbModule.Pair
if tokenAString == "" {
pairs, err = dbModule.GetAllTokenPairs(db, offset, perPageInt)
if err != nil {
returnError(w, err, 400)
return
}
} else {
tokenABytes, err := common.HexToBytes(tokenAString)
if err != nil {
returnError(w, err, 400)
return
}
tokenAAddress := common.BytesToOrAddress(tokenABytes)
if tokenBString == "" {
pairs, err = dbModule.GetTokenAPairs(db, tokenAAddress, offset, perPageInt)
} else {
tokenBBytes, err := common.HexToBytes(tokenBString)
if err != nil {
returnError(w, err, 400)
return
}
tokenBAddress := common.BytesToOrAddress(tokenBBytes)
pairs, err = dbModule.GetTokenABPairs(db, tokenAAddress, tokenBAddress)
}
if err != nil {
returnError(w, err, 400)
return
}
}
response, err := json.Marshal(pairs)
if err != nil {
returnError(w, err, 500)
return
}
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
}
|
// Package search deals with all search queries going to elasticsearch, and
// returning their result
package search
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"github.com/levenlabs/go-llog"
"github.com/levenlabs/thumper/config"
)
// Hit describes one of the documents matched by a search
type Hit struct {
Index string `json:"_index"` // The index the hit came from
Type string `json:"_type"` // The type the document is
ID string `json:"_id"` // The unique id of the document
Score float64 `json:"_score"` // The document's score relative to the search
Source map[string]interface{} `json:"_source"` // The actual document
}
// HitInfo describes information in the Result related to the actual hits
type HitInfo struct {
HitCount uint64 `json:"total"` // The total number of documents matched
HitMaxScore float64 `json:"max_score"` // The maximum score of all the documents matched
Hits []Hit `json:"hits"` // The actual documents matched
}
// Result describes the returned data from a search search
type Result struct {
TookMS uint64 `json:"took"` // Time search took to complete, in milliseconds
TimedOut bool `json:"timed_out"` // Whether or not the search timed out
HitInfo `json:"hits" luautil:",inline"` // Information related to the actual hits
Aggregations map[string]interface{} `json:"aggregations"` // Information related to aggregations in the query
}
type elasticError struct {
Error string `json:"reason"`
}
// Dict represents a key-value map which may be unmarshalled from a yaml
// document. It is unique in that it enforces all the keys to be strings (where
// the default behavior in the yaml package is to have keys be interface{}), and
// for any embedded objects it find it will decode them into Dicts instead of
// map[interface{}]interface{}
type Dict map[string]interface{}
// UnmarshalYAML is used to unmarshal a yaml string into the Dict. See the
// dict's doc string for more details on what it is used for
func (d *Dict) UnmarshalYAML(unmarshal func(interface{}) error) error {
var m map[interface{}]interface{}
if err := unmarshal(&m); err != nil {
return err
}
var err error
*d, err = mapToDict(m)
return err
}
func mapToDict(m map[interface{}]interface{}) (Dict, error) {
d := Dict{}
for k, v := range m {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("non-string key found: %v", ks)
}
switch vi := v.(type) {
case map[interface{}]interface{}:
vd, err := mapToDict(vi)
if err != nil {
return nil, err
}
d[ks] = vd
case []interface{}:
for i := range vi {
if vid, ok := vi[i].(map[interface{}]interface{}); ok {
vd, err := mapToDict(vid)
if err != nil {
return nil, err
}
vi[i] = vd
}
}
d[ks] = vi
default:
d[ks] = vi
}
}
return d, nil
}
// Search performs a search against the given elasticsearch index for
// documents of the given type. The search must json marshal into a valid
// elasticsearch request body query
// (see https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html)
func Search(index, typ string, search interface{}) (Result, error) {
u := fmt.Sprintf("http://%s/%s/%s/_search", config.ElasticSearchAddr, index, typ)
bodyReq, err := json.Marshal(search)
if err != nil {
return Result{}, err
}
req, err := http.NewRequest("GET", u, bytes.NewBuffer(bodyReq))
if err != nil {
return Result{}, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return Result{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return Result{}, err
}
if resp.StatusCode != 200 {
var e elasticError
if err := json.Unmarshal(body, &e); err != nil {
llog.Error("could not unmarshal error body", llog.KV{
"err": err,
"body": string(body),
})
return Result{}, err
}
return Result{}, errors.New(e.Error)
}
var result Result
if err := json.Unmarshal(body, &result); err != nil {
llog.Error("could not unmarshal search result", llog.KV{
"err": err,
"body": string(body),
})
return result, err
} else if result.TimedOut {
return result, errors.New("search timed out in elasticsearch")
}
return result, nil
}
improve debug logging during search step
// Package search deals with all search queries going to elasticsearch, and
// returning their result
package search
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"github.com/levenlabs/go-llog"
"github.com/levenlabs/thumper/config"
)
// Hit describes one of the documents matched by a search
type Hit struct {
Index string `json:"_index"` // The index the hit came from
Type string `json:"_type"` // The type the document is
ID string `json:"_id"` // The unique id of the document
Score float64 `json:"_score"` // The document's score relative to the search
Source map[string]interface{} `json:"_source"` // The actual document
}
// HitInfo describes information in the Result related to the actual hits
type HitInfo struct {
HitCount uint64 `json:"total"` // The total number of documents matched
HitMaxScore float64 `json:"max_score"` // The maximum score of all the documents matched
Hits []Hit `json:"hits"` // The actual documents matched
}
// Result describes the returned data from a search search
type Result struct {
TookMS uint64 `json:"took"` // Time search took to complete, in milliseconds
TimedOut bool `json:"timed_out"` // Whether or not the search timed out
HitInfo `json:"hits" luautil:",inline"` // Information related to the actual hits
Aggregations map[string]interface{} `json:"aggregations"` // Information related to aggregations in the query
}
type elasticError struct {
Error string `json:"reason"`
}
// Dict represents a key-value map which may be unmarshalled from a yaml
// document. It is unique in that it enforces all the keys to be strings (where
// the default behavior in the yaml package is to have keys be interface{}), and
// for any embedded objects it find it will decode them into Dicts instead of
// map[interface{}]interface{}
type Dict map[string]interface{}
// UnmarshalYAML is used to unmarshal a yaml string into the Dict. See the
// dict's doc string for more details on what it is used for
func (d *Dict) UnmarshalYAML(unmarshal func(interface{}) error) error {
var m map[interface{}]interface{}
if err := unmarshal(&m); err != nil {
return err
}
var err error
*d, err = mapToDict(m)
return err
}
func mapToDict(m map[interface{}]interface{}) (Dict, error) {
d := Dict{}
for k, v := range m {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("non-string key found: %v", ks)
}
switch vi := v.(type) {
case map[interface{}]interface{}:
vd, err := mapToDict(vi)
if err != nil {
return nil, err
}
d[ks] = vd
case []interface{}:
for i := range vi {
if vid, ok := vi[i].(map[interface{}]interface{}); ok {
vd, err := mapToDict(vid)
if err != nil {
return nil, err
}
vi[i] = vd
}
}
d[ks] = vi
default:
d[ks] = vi
}
}
return d, nil
}
// Search performs a search against the given elasticsearch index for
// documents of the given type. The search must json marshal into a valid
// elasticsearch request body query
// (see https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html)
func Search(index, typ string, search interface{}) (Result, error) {
u := fmt.Sprintf("http://%s/%s/%s/_search", config.ElasticSearchAddr, index, typ)
bodyReq, err := json.Marshal(search)
if err != nil {
return Result{}, err
}
req, err := http.NewRequest("GET", u, bytes.NewBuffer(bodyReq))
if err != nil {
return Result{}, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return Result{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return Result{}, err
}
kv := llog.KV{"body": string(body)}
llog.Debug("search results", kv)
if resp.StatusCode != 200 {
var e elasticError
if err := json.Unmarshal(body, &e); err != nil {
llog.Error("could not unmarshal error body", kv, llog.ErrKV(err))
return Result{}, err
}
return Result{}, errors.New(e.Error)
}
var result Result
if err := json.Unmarshal(body, &result); err != nil {
llog.Error("could not unmarshal search result", kv, llog.ErrKV(err))
return result, err
} else if result.TimedOut {
return result, errors.New("search timed out in elasticsearch")
}
return result, nil
}
|
// Copyright 2016 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build js
package vorbis
import (
"bytes"
"io"
"io/ioutil"
"github.com/gopherjs/gopherjs/js"
"github.com/hajimehoshi/ebiten/exp/audio"
)
// TODO: This just uses decodeAudioData can treat audio files other than Ogg/Vorbis.
// TODO: This doesn't work on iOS which doesn't have Ogg/Vorbis decoder.
func Decode(context *audio.Context, src io.Reader) (Stream, error) {
b, err := ioutil.ReadAll(src)
if err != nil {
return nil, err
}
s := &stream{
sampleRate: context.SampleRate(),
}
ch := make(chan struct{})
// TODO: 1 is a correct second argument?
oc := js.Global.Get("OfflineAudioContext").New(2, 1, context.SampleRate())
oc.Call("decodeAudioData", js.NewArrayBuffer(b), func(buf *js.Object) {
defer close(ch)
il := buf.Call("getChannelData", 0).Interface().([]float32)
ir := buf.Call("getChannelData", 1).Interface().([]float32)
b := make([]byte, len(il)*4)
for i := 0; i < len(il); i++ {
l := int16(il[i] * (1 << 15))
r := int16(ir[i] * (1 << 15))
b[4*i] = uint8(l)
b[4*i+1] = uint8(l >> 8)
b[4*i+2] = uint8(r)
b[4*i+3] = uint8(r >> 8)
}
s.buf = bytes.NewReader(b)
})
<-ch
return s, nil
}
audio/vorbis: Enable to decode parallelly in JavaScript
// Copyright 2016 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build js
package vorbis
import (
"bytes"
"io"
"io/ioutil"
"runtime"
"github.com/gopherjs/gopherjs/js"
"github.com/hajimehoshi/ebiten/exp/audio"
)
// TODO: This just uses decodeAudioData can treat audio files other than Ogg/Vorbis.
// TODO: This doesn't work on iOS which doesn't have Ogg/Vorbis decoder.
func Decode(context *audio.Context, src io.Reader) (Stream, error) {
b, err := ioutil.ReadAll(src)
if err != nil {
return nil, err
}
s := &stream{
sampleRate: context.SampleRate(),
}
ch := make(chan struct{})
// TODO: 1 is a correct second argument?
oc := js.Global.Get("OfflineAudioContext").New(2, 1, context.SampleRate())
oc.Call("decodeAudioData", js.NewArrayBuffer(b), func(buf *js.Object) {
go func() {
defer close(ch)
il := buf.Call("getChannelData", 0).Interface().([]float32)
ir := buf.Call("getChannelData", 1).Interface().([]float32)
b := make([]byte, len(il)*4)
for i := 0; i < len(il); i++ {
l := int16(il[i] * (1 << 15))
r := int16(ir[i] * (1 << 15))
b[4*i] = uint8(l)
b[4*i+1] = uint8(l >> 8)
b[4*i+2] = uint8(r)
b[4*i+3] = uint8(r >> 8)
if i%16384 == 0 {
runtime.Gosched()
}
}
s.buf = bytes.NewReader(b)
}()
})
<-ch
return s, nil
}
|
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
"time"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
// IgnoreZeroValue is determining if zero value fields should be
// ignored for hash calculation.
IgnoreZeroValue bool
// SlicesAsSets assumes that a `set` tag is always present for slices.
// Default is false (in which case the tag is used instead)
SlicesAsSets bool
// UseStringer will attempt to use fmt.Stringer aways. If the struct
// doesn't implement fmt.Stringer, it'll fall back to trying usual tricks.
// If this is true, and the "string" tag is also set, the tag takes
// precedense (meaning that if the type doesn't implement fmt.Stringer, we
// panic)
UseStringer bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
ignorezerovalue: opts.IgnoreZeroValue,
sets: opts.SlicesAsSets,
stringer: opts.UseStringer,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
ignorezerovalue bool
sets bool
stringer bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
var timeType = reflect.TypeOf(time.Time{})
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch v.Type() {
case timeType:
w.h.Reset()
b, err := v.Interface().(time.Time).MarshalBinary()
if err != nil {
return 0, err
}
err = binary.Write(w.h, binary.LittleEndian, b)
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
h = hashFinishUnordered(w.h, h)
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
if impl, ok := parent.(Hashable); ok {
return impl.Hash()
}
// If we can address this value, check if the pointer value
// implements our interfaces and use that if so.
if v.CanAddr() {
vptr := v.Addr()
parentptr := vptr.Interface()
if impl, ok := parentptr.(Includable); ok {
include = impl
}
if impl, ok := parentptr.(Hashable); ok {
return impl.Hash()
}
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
if w.ignorezerovalue {
zeroVal := reflect.Zero(reflect.TypeOf(innerV.Interface())).Interface()
if innerV.Interface() == zeroVal {
continue
}
}
// if string is set, use the string value
if tag == "string" || w.stringer {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else if tag == "string" {
// We only show this error if the tag explicitly
// requests a stringer.
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
h = hashFinishUnordered(w.h, h)
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set || w.sets {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
if set {
h = hashFinishUnordered(w.h, h)
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// After mixing a group of unique hashes with hashUpdateUnordered, it's always
// necessary to call hashFinishUnordered. Why? Because hashUpdateUnordered
// is a simple XOR, and calling hashUpdateUnordered on hashes produced by
// hashUpdateUnordered can effectively cancel out a previous change to the hash
// result if the same hash value appears later on. For example, consider:
//
// hashUpdateUnordered(hashUpdateUnordered("A", "B"), hashUpdateUnordered("A", "C")) =
// H("A") ^ H("B")) ^ (H("A") ^ H("C")) =
// (H("A") ^ H("A")) ^ (H("B") ^ H(C)) =
// H(B) ^ H(C) =
// hashUpdateUnordered(hashUpdateUnordered("Z", "B"), hashUpdateUnordered("Z", "C"))
//
// hashFinishUnordered "hardens" the result, so that encountering partially
// overlapping input data later on in a different context won't cancel out.
func hashFinishUnordered(h hash.Hash64, a uint64) uint64 {
h.Reset()
// We just panic if the writes fail
e1 := binary.Write(h, binary.LittleEndian, a)
if e1 != nil {
panic(e1)
}
return h.Sum64()
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)
clarifying comments
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
"time"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
// IgnoreZeroValue is determining if zero value fields should be
// ignored for hash calculation.
IgnoreZeroValue bool
// SlicesAsSets assumes that a `set` tag is always present for slices.
// Default is false (in which case the tag is used instead)
SlicesAsSets bool
// UseStringer will attempt to use fmt.Stringer aways. If the struct
// doesn't implement fmt.Stringer, it'll fall back to trying usual tricks.
// If this is true, and the "string" tag is also set, the tag takes
// precedense (meaning that if the type doesn't implement fmt.Stringer, we
// panic)
UseStringer bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
ignorezerovalue: opts.IgnoreZeroValue,
sets: opts.SlicesAsSets,
stringer: opts.UseStringer,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
ignorezerovalue bool
sets bool
stringer bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
var timeType = reflect.TypeOf(time.Time{})
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch v.Type() {
case timeType:
w.h.Reset()
b, err := v.Interface().(time.Time).MarshalBinary()
if err != nil {
return 0, err
}
err = binary.Write(w.h, binary.LittleEndian, b)
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
// Important: read the docs for hashFinishUnordered
h = hashFinishUnordered(w.h, h)
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
if impl, ok := parent.(Hashable); ok {
return impl.Hash()
}
// If we can address this value, check if the pointer value
// implements our interfaces and use that if so.
if v.CanAddr() {
vptr := v.Addr()
parentptr := vptr.Interface()
if impl, ok := parentptr.(Includable); ok {
include = impl
}
if impl, ok := parentptr.(Hashable); ok {
return impl.Hash()
}
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
if w.ignorezerovalue {
zeroVal := reflect.Zero(reflect.TypeOf(innerV.Interface())).Interface()
if innerV.Interface() == zeroVal {
continue
}
}
// if string is set, use the string value
if tag == "string" || w.stringer {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else if tag == "string" {
// We only show this error if the tag explicitly
// requests a stringer.
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
// Important: read the docs for hashFinishUnordered
h = hashFinishUnordered(w.h, h)
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set || w.sets {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
if set {
// Important: read the docs for hashFinishUnordered
h = hashFinishUnordered(w.h, h)
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// After mixing a group of unique hashes with hashUpdateUnordered, it's always
// necessary to call hashFinishUnordered. Why? Because hashUpdateUnordered
// is a simple XOR, and calling hashUpdateUnordered on hashes produced by
// hashUpdateUnordered can effectively cancel out a previous change to the hash
// result if the same hash value appears later on. For example, consider:
//
// hashUpdateUnordered(hashUpdateUnordered("A", "B"), hashUpdateUnordered("A", "C")) =
// H("A") ^ H("B")) ^ (H("A") ^ H("C")) =
// (H("A") ^ H("A")) ^ (H("B") ^ H(C)) =
// H(B) ^ H(C) =
// hashUpdateUnordered(hashUpdateUnordered("Z", "B"), hashUpdateUnordered("Z", "C"))
//
// hashFinishUnordered "hardens" the result, so that encountering partially
// overlapping input data later on in a different context won't cancel out.
func hashFinishUnordered(h hash.Hash64, a uint64) uint64 {
h.Reset()
// We just panic if the writes fail
e1 := binary.Write(h, binary.LittleEndian, a)
if e1 != nil {
panic(e1)
}
return h.Sum64()
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)
|
package git
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/net/context"
)
// LocalMirror creates or updates a mirror of `url` at `gitDir` using `git clone
// --mirror`.
func LocalMirror(url, gitDir, ref string, timeout time.Duration, messages io.Writer) error {
ctx, done := context.WithTimeout(context.Background(), timeout)
defer done()
if _, err := os.Stat(gitDir); err == nil {
// Repo already exists, don't need to clone it.
if AlreadyHaveRef(gitDir, ref) {
// Sha already exists, don't need to fetch.
// fmt.Fprintf(messages, "Already have ref: %v %v", gitDir, ref)
return nil
}
return Fetch(ctx, gitDir, url, messages)
}
err := os.MkdirAll(filepath.Dir(gitDir), 0777)
if err != nil {
return err
}
return Clone(ctx, url, gitDir, messages)
}
// Clone clones a git repository as mirror.
func Clone(ctx context.Context, url, gitDir string, messages io.Writer) error {
cmd := Command(".", "git", "clone", "-q", "--mirror", url, gitDir)
cmd.Stdout = messages
cmd.Stderr = messages
return ContextRun(ctx, cmd)
}
// Checkout switches branches or restores working tree files.
func Checkout(gitDir, checkoutDir, ref string) error {
err := os.MkdirAll(checkoutDir, 0777)
if err != nil {
return err
}
args := []string{"--work-tree", checkoutDir, "checkout", ref, "--", "."}
err = Command(gitDir, "git", args...).Run()
if err != nil {
return err
}
// Set mtimes to time file is most recently affected by a commit.
// This is annoying but unfortunately git sets the timestamps to now,
// and docker depends on the mtime for cache invalidation.
err = SetMTimes(gitDir, checkoutDir, ref)
if err != nil {
return err
}
return nil
}
// Fetch fetches all branches from a given remote.
func Fetch(ctx context.Context, gitDir, url string, messages io.Writer) error {
cmd := Command(gitDir, "git", "fetch", "-f", url, "*:*")
cmd.Stdout = messages
cmd.Stderr = messages
err := ContextRun(ctx, cmd)
if err != nil {
// git fetch where there is no update is exit status 1.
if err.Error() != "exit status 1" {
return err
}
}
return nil
}
// ShaLike specifies a valid git hash.
var ShaLike = regexp.MustCompile("[0-9a-zA-Z]{40}")
// AlreadyHaveRef returns true if ref is sha-like and is in the object database.
// The "sha-like" condition ensures that refs like `master` are always
// freshened.
func AlreadyHaveRef(gitDir, sha string) bool {
if !ShaLike.MatchString(sha) {
return false
}
cmd := Command(gitDir, "git", "cat-file", "-t", sha)
cmd.Stdout = nil
cmd.Stderr = nil
err := cmd.Run()
return err == nil
}
// HaveFile checks if a git directory has files checked out.
func HaveFile(gitDir, ref, path string) (ok bool, err error) {
cmd := Command(gitDir, "git", "show", fmt.Sprintf("%s:%s", ref, path))
cmd.Stdout = nil // don't want to see the contents
err = cmd.Run()
ok = true
if err != nil {
ok = false
if err.Error() == "exit status 128" {
// This happens if the file doesn't exist.
err = nil
}
}
return ok, err
}
// RevParse parses and formats the git rev of a given git reference.
func RevParse(gitDir, ref string) (sha string, err error) {
cmd := Command(gitDir, "git", "rev-parse", ref)
cmd.Stdout = nil // for cmd.Output
var stdout []byte
stdout, err = cmd.Output()
if err != nil {
return "", err
}
sha = strings.TrimSpace(string(stdout))
return sha, nil
}
// Describe describes a commit given a reference using the most recent tag
// reachable from it.
func Describe(gitDir, ref string) (desc string, err error) {
cmd := Command(gitDir, "git", "describe", "--all", "--tags", "--long", ref)
cmd.Stdout = nil // for cmd.Output
var stdout []byte
stdout, err = cmd.Output()
if err != nil {
return "", err
}
desc = strings.TrimSpace(string(stdout))
desc = strings.TrimPrefix(desc, "heads/")
desc = strings.TrimPrefix(desc, "tags/")
return desc, nil
}
// RecursiveCheckout recursively checks out repositories; similar to "git clone
// --recursive".
func RecursiveCheckout(gitDir, checkoutPath, rev string, timeout time.Duration, messages io.Writer) error {
err := Checkout(gitDir, checkoutPath, rev)
if err != nil {
return fmt.Errorf("failed to checkout: %v", err)
}
err = PrepSubmodules(gitDir, checkoutPath, rev, timeout, messages)
if err != nil {
return fmt.Errorf("failed to prep submodules: %v", err)
}
return nil
}
// Command invokes a `command` in `workdir` with `args`, connecting Stdout and
// Stderr to Stderr.
func Command(workdir, command string, args ...string) *exec.Cmd {
// log.Printf("wd = %s cmd = %s, args = %q", workdir, command, append([]string{}, args...))
cmd := exec.Command(command, args...)
cmd.Dir = workdir
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
return cmd
}
// ContextRun runs cmd within a net Context.
// If the context is cancelled or times out, the process is killed.
func ContextRun(ctx context.Context, cmd *exec.Cmd) error {
errc := make(chan error)
err := cmd.Start()
if err != nil {
return err
}
go func() { errc <- cmd.Wait() }()
select {
case <-ctx.Done():
_ = cmd.Process.Kill()
return ctx.Err()
case err := <-errc:
return err // err may be nil
}
}
Use context from standard library
package git
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
)
// LocalMirror creates or updates a mirror of `url` at `gitDir` using `git clone
// --mirror`.
func LocalMirror(url, gitDir, ref string, timeout time.Duration, messages io.Writer) error {
ctx, done := context.WithTimeout(context.Background(), timeout)
defer done()
if _, err := os.Stat(gitDir); err == nil {
// Repo already exists, don't need to clone it.
if AlreadyHaveRef(gitDir, ref) {
// Sha already exists, don't need to fetch.
// fmt.Fprintf(messages, "Already have ref: %v %v", gitDir, ref)
return nil
}
return Fetch(ctx, gitDir, url, messages)
}
err := os.MkdirAll(filepath.Dir(gitDir), 0777)
if err != nil {
return err
}
return Clone(ctx, url, gitDir, messages)
}
// Clone clones a git repository as mirror.
func Clone(ctx context.Context, url, gitDir string, messages io.Writer) error {
cmd := Command(".", "git", "clone", "-q", "--mirror", url, gitDir)
cmd.Stdout = messages
cmd.Stderr = messages
return ContextRun(ctx, cmd)
}
// Checkout switches branches or restores working tree files.
func Checkout(gitDir, checkoutDir, ref string) error {
err := os.MkdirAll(checkoutDir, 0777)
if err != nil {
return err
}
args := []string{"--work-tree", checkoutDir, "checkout", ref, "--", "."}
err = Command(gitDir, "git", args...).Run()
if err != nil {
return err
}
// Set mtimes to time file is most recently affected by a commit.
// This is annoying but unfortunately git sets the timestamps to now,
// and docker depends on the mtime for cache invalidation.
err = SetMTimes(gitDir, checkoutDir, ref)
if err != nil {
return err
}
return nil
}
// Fetch fetches all branches from a given remote.
func Fetch(ctx context.Context, gitDir, url string, messages io.Writer) error {
cmd := Command(gitDir, "git", "fetch", "-f", url, "*:*")
cmd.Stdout = messages
cmd.Stderr = messages
err := ContextRun(ctx, cmd)
if err != nil {
// git fetch where there is no update is exit status 1.
if err.Error() != "exit status 1" {
return err
}
}
return nil
}
// ShaLike specifies a valid git hash.
var ShaLike = regexp.MustCompile("[0-9a-zA-Z]{40}")
// AlreadyHaveRef returns true if ref is sha-like and is in the object database.
// The "sha-like" condition ensures that refs like `master` are always
// freshened.
func AlreadyHaveRef(gitDir, sha string) bool {
if !ShaLike.MatchString(sha) {
return false
}
cmd := Command(gitDir, "git", "cat-file", "-t", sha)
cmd.Stdout = nil
cmd.Stderr = nil
err := cmd.Run()
return err == nil
}
// HaveFile checks if a git directory has files checked out.
func HaveFile(gitDir, ref, path string) (ok bool, err error) {
cmd := Command(gitDir, "git", "show", fmt.Sprintf("%s:%s", ref, path))
cmd.Stdout = nil // don't want to see the contents
err = cmd.Run()
ok = true
if err != nil {
ok = false
if err.Error() == "exit status 128" {
// This happens if the file doesn't exist.
err = nil
}
}
return ok, err
}
// RevParse parses and formats the git rev of a given git reference.
func RevParse(gitDir, ref string) (sha string, err error) {
cmd := Command(gitDir, "git", "rev-parse", ref)
cmd.Stdout = nil // for cmd.Output
var stdout []byte
stdout, err = cmd.Output()
if err != nil {
return "", err
}
sha = strings.TrimSpace(string(stdout))
return sha, nil
}
// Describe describes a commit given a reference using the most recent tag
// reachable from it.
func Describe(gitDir, ref string) (desc string, err error) {
cmd := Command(gitDir, "git", "describe", "--all", "--tags", "--long", ref)
cmd.Stdout = nil // for cmd.Output
var stdout []byte
stdout, err = cmd.Output()
if err != nil {
return "", err
}
desc = strings.TrimSpace(string(stdout))
desc = strings.TrimPrefix(desc, "heads/")
desc = strings.TrimPrefix(desc, "tags/")
return desc, nil
}
// RecursiveCheckout recursively checks out repositories; similar to "git clone
// --recursive".
func RecursiveCheckout(gitDir, checkoutPath, rev string, timeout time.Duration, messages io.Writer) error {
err := Checkout(gitDir, checkoutPath, rev)
if err != nil {
return fmt.Errorf("failed to checkout: %v", err)
}
err = PrepSubmodules(gitDir, checkoutPath, rev, timeout, messages)
if err != nil {
return fmt.Errorf("failed to prep submodules: %v", err)
}
return nil
}
// Command invokes a `command` in `workdir` with `args`, connecting Stdout and
// Stderr to Stderr.
func Command(workdir, command string, args ...string) *exec.Cmd {
// log.Printf("wd = %s cmd = %s, args = %q", workdir, command, append([]string{}, args...))
cmd := exec.Command(command, args...)
cmd.Dir = workdir
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
return cmd
}
// ContextRun runs cmd within a net Context.
// If the context is cancelled or times out, the process is killed.
func ContextRun(ctx context.Context, cmd *exec.Cmd) error {
errc := make(chan error)
err := cmd.Start()
if err != nil {
return err
}
go func() { errc <- cmd.Wait() }()
select {
case <-ctx.Done():
_ = cmd.Process.Kill()
return ctx.Err()
case err := <-errc:
return err // err may be nil
}
}
|
package git
/*
#include <git2.h>
#include <git2/sys/openssl.h>
#cgo pkg-config: libgit2
*/
import "C"
import (
"bytes"
"encoding/hex"
"errors"
"runtime"
"strings"
"unsafe"
)
type ErrorClass int
const (
ErrClassNone ErrorClass = C.GITERR_NONE
ErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY
ErrClassOs ErrorClass = C.GITERR_OS
ErrClassInvalid ErrorClass = C.GITERR_INVALID
ErrClassReference ErrorClass = C.GITERR_REFERENCE
ErrClassZlib ErrorClass = C.GITERR_ZLIB
ErrClassRepository ErrorClass = C.GITERR_REPOSITORY
ErrClassConfig ErrorClass = C.GITERR_CONFIG
ErrClassRegex ErrorClass = C.GITERR_REGEX
ErrClassOdb ErrorClass = C.GITERR_ODB
ErrClassIndex ErrorClass = C.GITERR_INDEX
ErrClassObject ErrorClass = C.GITERR_OBJECT
ErrClassNet ErrorClass = C.GITERR_NET
ErrClassTag ErrorClass = C.GITERR_TAG
ErrClassTree ErrorClass = C.GITERR_TREE
ErrClassIndexer ErrorClass = C.GITERR_INDEXER
ErrClassSSL ErrorClass = C.GITERR_SSL
ErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE
ErrClassThread ErrorClass = C.GITERR_THREAD
ErrClassStash ErrorClass = C.GITERR_STASH
ErrClassCheckout ErrorClass = C.GITERR_CHECKOUT
ErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD
ErrClassMerge ErrorClass = C.GITERR_MERGE
ErrClassSsh ErrorClass = C.GITERR_SSH
ErrClassFilter ErrorClass = C.GITERR_FILTER
ErrClassRevert ErrorClass = C.GITERR_REVERT
ErrClassCallback ErrorClass = C.GITERR_CALLBACK
)
type ErrorCode int
const (
// No error
ErrOk ErrorCode = C.GIT_OK
// Generic error
ErrGeneric ErrorCode = C.GIT_ERROR
// Requested object could not be found
ErrNotFound ErrorCode = C.GIT_ENOTFOUND
// Object exists preventing operation
ErrExists ErrorCode = C.GIT_EEXISTS
// More than one object matches
ErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS
// Output buffer too short to hold data
ErrBuffs ErrorCode = C.GIT_EBUFS
// GIT_EUSER is a special error that is never generated by libgit2
// code. You can return it from a callback (e.g to stop an iteration)
// to know that it was generated by the callback and not by libgit2.
ErrUser ErrorCode = C.GIT_EUSER
// Operation not allowed on bare repository
ErrBareRepo ErrorCode = C.GIT_EBAREREPO
// HEAD refers to branch with no commits
ErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH
// Merge in progress prevented operation
ErrUnmerged ErrorCode = C.GIT_EUNMERGED
// Reference was not fast-forwardable
ErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD
// Name/ref spec was not in a valid format
ErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC
// Checkout conflicts prevented operation
ErrConflict ErrorCode = C.GIT_ECONFLICT
// Lock file prevented operation
ErrLocked ErrorCode = C.GIT_ELOCKED
// Reference value does not match expected
ErrModified ErrorCode = C.GIT_EMODIFIED
// Authentication failed
ErrAuth ErrorCode = C.GIT_EAUTH
// Server certificate is invalid
ErrCertificate ErrorCode = C.GIT_ECERTIFICATE
// Patch/merge has already been applied
ErrApplied ErrorCode = C.GIT_EAPPLIED
// The requested peel operation is not possible
ErrPeel ErrorCode = C.GIT_EPEEL
// Unexpected EOF
ErrEOF ErrorCode = C.GIT_EEOF
// Uncommitted changes in index prevented operation
ErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED
// The operation is not valid for a directory
ErrDirectory ErrorCode = C.GIT_EDIRECTORY
// A merge conflict exists and cannot continue
ErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT
// Internal only
ErrPassthrough ErrorCode = C.GIT_PASSTHROUGH
// Signals end of iteration with iterator
ErrIterOver ErrorCode = C.GIT_ITEROVER
)
var (
ErrInvalid = errors.New("Invalid state for operation")
)
var pointerHandles *HandleList
func init() {
pointerHandles = NewHandleList()
C.git_libgit2_init()
// This is not something we should be doing, as we may be
// stomping all over someone else's setup. The user should do
// this themselves or use some binding/wrapper which does it
// in such a way that they can be sure they're the only ones
// setting it up.
C.git_openssl_set_locking()
}
// Oid represents the id for a Git object.
type Oid [20]byte
func newOidFromC(coid *C.git_oid) *Oid {
if coid == nil {
return nil
}
oid := new(Oid)
copy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))
return oid
}
func NewOidFromBytes(b []byte) *Oid {
oid := new(Oid)
copy(oid[0:20], b[0:20])
return oid
}
func (oid *Oid) toC() *C.git_oid {
return (*C.git_oid)(unsafe.Pointer(oid))
}
func NewOid(s string) (*Oid, error) {
if len(s) > C.GIT_OID_HEXSZ {
return nil, errors.New("string is too long for oid")
}
o := new(Oid)
slice, error := hex.DecodeString(s)
if error != nil {
return nil, error
}
if len(slice) != 20 {
return nil, &GitError{"Invalid Oid", ErrClassNone, ErrGeneric}
}
copy(o[:], slice[:20])
return o, nil
}
func (oid *Oid) String() string {
return hex.EncodeToString(oid[:])
}
func (oid *Oid) Cmp(oid2 *Oid) int {
return bytes.Compare(oid[:], oid2[:])
}
func (oid *Oid) Copy() *Oid {
ret := new(Oid)
copy(ret[:], oid[:])
return ret
}
func (oid *Oid) Equal(oid2 *Oid) bool {
return bytes.Equal(oid[:], oid2[:])
}
func (oid *Oid) IsZero() bool {
for _, a := range oid {
if a != 0 {
return false
}
}
return true
}
func (oid *Oid) NCmp(oid2 *Oid, n uint) int {
return bytes.Compare(oid[:n], oid2[:n])
}
func ShortenOids(ids []*Oid, minlen int) (int, error) {
shorten := C.git_oid_shorten_new(C.size_t(minlen))
if shorten == nil {
panic("Out of memory")
}
defer C.git_oid_shorten_free(shorten)
var ret C.int
runtime.LockOSThread()
defer runtime.UnlockOSThread()
for _, id := range ids {
buf := make([]byte, 41)
C.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())
buf[40] = 0
ret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))
if ret < 0 {
return int(ret), MakeGitError(ret)
}
}
return int(ret), nil
}
type GitError struct {
Message string
Class ErrorClass
Code ErrorCode
}
func (e GitError) Error() string {
return e.Message
}
func IsErrorClass(err error, c ErrorClass) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Class == c
}
return false
}
func IsErrorCode(err error, c ErrorCode) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Code == c
}
return false
}
func MakeGitError(errorCode C.int) error {
var errMessage string
var errClass ErrorClass
if errorCode != C.GIT_ITEROVER {
err := C.giterr_last()
if err != nil {
errMessage = C.GoString(err.message)
errClass = ErrorClass(err.klass)
} else {
errClass = ErrClassInvalid
}
}
return &GitError{errMessage, errClass, ErrorCode(errorCode)}
}
func MakeGitError2(err int) error {
return MakeGitError(C.int(err))
}
func cbool(b bool) C.int {
if b {
return C.int(1)
}
return C.int(0)
}
func ucbool(b bool) C.uint {
if b {
return C.uint(1)
}
return C.uint(0)
}
func Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {
ceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))
defer C.free(unsafe.Pointer(ceildirs))
cstart := C.CString(start)
defer C.free(unsafe.Pointer(cstart))
var buf C.git_buf
defer C.git_buf_free(&buf)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(buf.ptr), nil
}
Error out if we detect an incompatible libgit2 version
The master version supports only v0.24 so let's enforce that via the
compiler.
package git
/*
#include <git2.h>
#include <git2/sys/openssl.h>
#cgo pkg-config: libgit2
#if LIBGIT2_VER_MAJOR != 0 || LIBGIT2_VER_MINOR != 24
# error "Invalid libgit2 version; this git2go supports libgit2 v0.24"
#endif
*/
import "C"
import (
"bytes"
"encoding/hex"
"errors"
"runtime"
"strings"
"unsafe"
)
type ErrorClass int
const (
ErrClassNone ErrorClass = C.GITERR_NONE
ErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY
ErrClassOs ErrorClass = C.GITERR_OS
ErrClassInvalid ErrorClass = C.GITERR_INVALID
ErrClassReference ErrorClass = C.GITERR_REFERENCE
ErrClassZlib ErrorClass = C.GITERR_ZLIB
ErrClassRepository ErrorClass = C.GITERR_REPOSITORY
ErrClassConfig ErrorClass = C.GITERR_CONFIG
ErrClassRegex ErrorClass = C.GITERR_REGEX
ErrClassOdb ErrorClass = C.GITERR_ODB
ErrClassIndex ErrorClass = C.GITERR_INDEX
ErrClassObject ErrorClass = C.GITERR_OBJECT
ErrClassNet ErrorClass = C.GITERR_NET
ErrClassTag ErrorClass = C.GITERR_TAG
ErrClassTree ErrorClass = C.GITERR_TREE
ErrClassIndexer ErrorClass = C.GITERR_INDEXER
ErrClassSSL ErrorClass = C.GITERR_SSL
ErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE
ErrClassThread ErrorClass = C.GITERR_THREAD
ErrClassStash ErrorClass = C.GITERR_STASH
ErrClassCheckout ErrorClass = C.GITERR_CHECKOUT
ErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD
ErrClassMerge ErrorClass = C.GITERR_MERGE
ErrClassSsh ErrorClass = C.GITERR_SSH
ErrClassFilter ErrorClass = C.GITERR_FILTER
ErrClassRevert ErrorClass = C.GITERR_REVERT
ErrClassCallback ErrorClass = C.GITERR_CALLBACK
)
type ErrorCode int
const (
// No error
ErrOk ErrorCode = C.GIT_OK
// Generic error
ErrGeneric ErrorCode = C.GIT_ERROR
// Requested object could not be found
ErrNotFound ErrorCode = C.GIT_ENOTFOUND
// Object exists preventing operation
ErrExists ErrorCode = C.GIT_EEXISTS
// More than one object matches
ErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS
// Output buffer too short to hold data
ErrBuffs ErrorCode = C.GIT_EBUFS
// GIT_EUSER is a special error that is never generated by libgit2
// code. You can return it from a callback (e.g to stop an iteration)
// to know that it was generated by the callback and not by libgit2.
ErrUser ErrorCode = C.GIT_EUSER
// Operation not allowed on bare repository
ErrBareRepo ErrorCode = C.GIT_EBAREREPO
// HEAD refers to branch with no commits
ErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH
// Merge in progress prevented operation
ErrUnmerged ErrorCode = C.GIT_EUNMERGED
// Reference was not fast-forwardable
ErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD
// Name/ref spec was not in a valid format
ErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC
// Checkout conflicts prevented operation
ErrConflict ErrorCode = C.GIT_ECONFLICT
// Lock file prevented operation
ErrLocked ErrorCode = C.GIT_ELOCKED
// Reference value does not match expected
ErrModified ErrorCode = C.GIT_EMODIFIED
// Authentication failed
ErrAuth ErrorCode = C.GIT_EAUTH
// Server certificate is invalid
ErrCertificate ErrorCode = C.GIT_ECERTIFICATE
// Patch/merge has already been applied
ErrApplied ErrorCode = C.GIT_EAPPLIED
// The requested peel operation is not possible
ErrPeel ErrorCode = C.GIT_EPEEL
// Unexpected EOF
ErrEOF ErrorCode = C.GIT_EEOF
// Uncommitted changes in index prevented operation
ErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED
// The operation is not valid for a directory
ErrDirectory ErrorCode = C.GIT_EDIRECTORY
// A merge conflict exists and cannot continue
ErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT
// Internal only
ErrPassthrough ErrorCode = C.GIT_PASSTHROUGH
// Signals end of iteration with iterator
ErrIterOver ErrorCode = C.GIT_ITEROVER
)
var (
ErrInvalid = errors.New("Invalid state for operation")
)
var pointerHandles *HandleList
func init() {
pointerHandles = NewHandleList()
C.git_libgit2_init()
// This is not something we should be doing, as we may be
// stomping all over someone else's setup. The user should do
// this themselves or use some binding/wrapper which does it
// in such a way that they can be sure they're the only ones
// setting it up.
C.git_openssl_set_locking()
}
// Oid represents the id for a Git object.
type Oid [20]byte
func newOidFromC(coid *C.git_oid) *Oid {
if coid == nil {
return nil
}
oid := new(Oid)
copy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))
return oid
}
func NewOidFromBytes(b []byte) *Oid {
oid := new(Oid)
copy(oid[0:20], b[0:20])
return oid
}
func (oid *Oid) toC() *C.git_oid {
return (*C.git_oid)(unsafe.Pointer(oid))
}
func NewOid(s string) (*Oid, error) {
if len(s) > C.GIT_OID_HEXSZ {
return nil, errors.New("string is too long for oid")
}
o := new(Oid)
slice, error := hex.DecodeString(s)
if error != nil {
return nil, error
}
if len(slice) != 20 {
return nil, &GitError{"Invalid Oid", ErrClassNone, ErrGeneric}
}
copy(o[:], slice[:20])
return o, nil
}
func (oid *Oid) String() string {
return hex.EncodeToString(oid[:])
}
func (oid *Oid) Cmp(oid2 *Oid) int {
return bytes.Compare(oid[:], oid2[:])
}
func (oid *Oid) Copy() *Oid {
ret := new(Oid)
copy(ret[:], oid[:])
return ret
}
func (oid *Oid) Equal(oid2 *Oid) bool {
return bytes.Equal(oid[:], oid2[:])
}
func (oid *Oid) IsZero() bool {
for _, a := range oid {
if a != 0 {
return false
}
}
return true
}
func (oid *Oid) NCmp(oid2 *Oid, n uint) int {
return bytes.Compare(oid[:n], oid2[:n])
}
func ShortenOids(ids []*Oid, minlen int) (int, error) {
shorten := C.git_oid_shorten_new(C.size_t(minlen))
if shorten == nil {
panic("Out of memory")
}
defer C.git_oid_shorten_free(shorten)
var ret C.int
runtime.LockOSThread()
defer runtime.UnlockOSThread()
for _, id := range ids {
buf := make([]byte, 41)
C.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())
buf[40] = 0
ret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))
if ret < 0 {
return int(ret), MakeGitError(ret)
}
}
return int(ret), nil
}
type GitError struct {
Message string
Class ErrorClass
Code ErrorCode
}
func (e GitError) Error() string {
return e.Message
}
func IsErrorClass(err error, c ErrorClass) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Class == c
}
return false
}
func IsErrorCode(err error, c ErrorCode) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Code == c
}
return false
}
func MakeGitError(errorCode C.int) error {
var errMessage string
var errClass ErrorClass
if errorCode != C.GIT_ITEROVER {
err := C.giterr_last()
if err != nil {
errMessage = C.GoString(err.message)
errClass = ErrorClass(err.klass)
} else {
errClass = ErrClassInvalid
}
}
return &GitError{errMessage, errClass, ErrorCode(errorCode)}
}
func MakeGitError2(err int) error {
return MakeGitError(C.int(err))
}
func cbool(b bool) C.int {
if b {
return C.int(1)
}
return C.int(0)
}
func ucbool(b bool) C.uint {
if b {
return C.uint(1)
}
return C.uint(0)
}
func Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {
ceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))
defer C.free(unsafe.Pointer(ceildirs))
cstart := C.CString(start)
defer C.free(unsafe.Pointer(cstart))
var buf C.git_buf
defer C.git_buf_free(&buf)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(buf.ptr), nil
}
|
package git
/*
#include <git2.h>
#include <git2/sys/openssl.h>
#cgo pkg-config: libgit2
#if LIBGIT2_VER_MAJOR != 0 || LIBGIT2_VER_MINOR != 24
# error "Invalid libgit2 version; this git2go supports libgit2 v0.24"
#endif
*/
import "C"
import (
"bytes"
"encoding/hex"
"errors"
"runtime"
"strings"
"unsafe"
)
type ErrorClass int
const (
ErrClassNone ErrorClass = C.GITERR_NONE
ErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY
ErrClassOs ErrorClass = C.GITERR_OS
ErrClassInvalid ErrorClass = C.GITERR_INVALID
ErrClassReference ErrorClass = C.GITERR_REFERENCE
ErrClassZlib ErrorClass = C.GITERR_ZLIB
ErrClassRepository ErrorClass = C.GITERR_REPOSITORY
ErrClassConfig ErrorClass = C.GITERR_CONFIG
ErrClassRegex ErrorClass = C.GITERR_REGEX
ErrClassOdb ErrorClass = C.GITERR_ODB
ErrClassIndex ErrorClass = C.GITERR_INDEX
ErrClassObject ErrorClass = C.GITERR_OBJECT
ErrClassNet ErrorClass = C.GITERR_NET
ErrClassTag ErrorClass = C.GITERR_TAG
ErrClassTree ErrorClass = C.GITERR_TREE
ErrClassIndexer ErrorClass = C.GITERR_INDEXER
ErrClassSSL ErrorClass = C.GITERR_SSL
ErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE
ErrClassThread ErrorClass = C.GITERR_THREAD
ErrClassStash ErrorClass = C.GITERR_STASH
ErrClassCheckout ErrorClass = C.GITERR_CHECKOUT
ErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD
ErrClassMerge ErrorClass = C.GITERR_MERGE
ErrClassSsh ErrorClass = C.GITERR_SSH
ErrClassFilter ErrorClass = C.GITERR_FILTER
ErrClassRevert ErrorClass = C.GITERR_REVERT
ErrClassCallback ErrorClass = C.GITERR_CALLBACK
)
type ErrorCode int
const (
// No error
ErrOk ErrorCode = C.GIT_OK
// Generic error
ErrGeneric ErrorCode = C.GIT_ERROR
// Requested object could not be found
ErrNotFound ErrorCode = C.GIT_ENOTFOUND
// Object exists preventing operation
ErrExists ErrorCode = C.GIT_EEXISTS
// More than one object matches
ErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS
// Output buffer too short to hold data
ErrBuffs ErrorCode = C.GIT_EBUFS
// GIT_EUSER is a special error that is never generated by libgit2
// code. You can return it from a callback (e.g to stop an iteration)
// to know that it was generated by the callback and not by libgit2.
ErrUser ErrorCode = C.GIT_EUSER
// Operation not allowed on bare repository
ErrBareRepo ErrorCode = C.GIT_EBAREREPO
// HEAD refers to branch with no commits
ErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH
// Merge in progress prevented operation
ErrUnmerged ErrorCode = C.GIT_EUNMERGED
// Reference was not fast-forwardable
ErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD
// Name/ref spec was not in a valid format
ErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC
// Checkout conflicts prevented operation
ErrConflict ErrorCode = C.GIT_ECONFLICT
// Lock file prevented operation
ErrLocked ErrorCode = C.GIT_ELOCKED
// Reference value does not match expected
ErrModified ErrorCode = C.GIT_EMODIFIED
// Authentication failed
ErrAuth ErrorCode = C.GIT_EAUTH
// Server certificate is invalid
ErrCertificate ErrorCode = C.GIT_ECERTIFICATE
// Patch/merge has already been applied
ErrApplied ErrorCode = C.GIT_EAPPLIED
// The requested peel operation is not possible
ErrPeel ErrorCode = C.GIT_EPEEL
// Unexpected EOF
ErrEOF ErrorCode = C.GIT_EEOF
// Uncommitted changes in index prevented operation
ErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED
// The operation is not valid for a directory
ErrDirectory ErrorCode = C.GIT_EDIRECTORY
// A merge conflict exists and cannot continue
ErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT
// Internal only
ErrPassthrough ErrorCode = C.GIT_PASSTHROUGH
// Signals end of iteration with iterator
ErrIterOver ErrorCode = C.GIT_ITEROVER
)
var (
ErrInvalid = errors.New("Invalid state for operation")
)
var pointerHandles *HandleList
func init() {
pointerHandles = NewHandleList()
C.git_libgit2_init()
// This is not something we should be doing, as we may be
// stomping all over someone else's setup. The user should do
// this themselves or use some binding/wrapper which does it
// in such a way that they can be sure they're the only ones
// setting it up.
C.git_openssl_set_locking()
}
// Oid represents the id for a Git object.
type Oid [20]byte
func newOidFromC(coid *C.git_oid) *Oid {
if coid == nil {
return nil
}
oid := new(Oid)
copy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))
return oid
}
func NewOidFromBytes(b []byte) *Oid {
oid := new(Oid)
copy(oid[0:20], b[0:20])
return oid
}
func (oid *Oid) toC() *C.git_oid {
return (*C.git_oid)(unsafe.Pointer(oid))
}
func NewOid(s string) (*Oid, error) {
if len(s) > C.GIT_OID_HEXSZ {
return nil, errors.New("string is too long for oid")
}
o := new(Oid)
slice, error := hex.DecodeString(s)
if error != nil {
return nil, error
}
if len(slice) != 20 {
return nil, &GitError{"Invalid Oid", ErrClassNone, ErrGeneric}
}
copy(o[:], slice[:20])
return o, nil
}
func (oid *Oid) String() string {
return hex.EncodeToString(oid[:])
}
func (oid *Oid) Cmp(oid2 *Oid) int {
return bytes.Compare(oid[:], oid2[:])
}
func (oid *Oid) Copy() *Oid {
ret := new(Oid)
copy(ret[:], oid[:])
return ret
}
func (oid *Oid) Equal(oid2 *Oid) bool {
return bytes.Equal(oid[:], oid2[:])
}
func (oid *Oid) IsZero() bool {
for _, a := range oid {
if a != 0 {
return false
}
}
return true
}
func (oid *Oid) NCmp(oid2 *Oid, n uint) int {
return bytes.Compare(oid[:n], oid2[:n])
}
func ShortenOids(ids []*Oid, minlen int) (int, error) {
shorten := C.git_oid_shorten_new(C.size_t(minlen))
if shorten == nil {
panic("Out of memory")
}
defer C.git_oid_shorten_free(shorten)
var ret C.int
runtime.LockOSThread()
defer runtime.UnlockOSThread()
for _, id := range ids {
buf := make([]byte, 41)
C.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())
buf[40] = 0
ret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))
if ret < 0 {
return int(ret), MakeGitError(ret)
}
}
return int(ret), nil
}
type GitError struct {
Message string
Class ErrorClass
Code ErrorCode
}
func (e GitError) Error() string {
return e.Message
}
func IsErrorClass(err error, c ErrorClass) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Class == c
}
return false
}
func IsErrorCode(err error, c ErrorCode) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Code == c
}
return false
}
func MakeGitError(errorCode C.int) error {
var errMessage string
var errClass ErrorClass
if errorCode != C.GIT_ITEROVER {
err := C.giterr_last()
if err != nil {
errMessage = C.GoString(err.message)
errClass = ErrorClass(err.klass)
} else {
errClass = ErrClassInvalid
}
}
return &GitError{errMessage, errClass, ErrorCode(errorCode)}
}
func MakeGitError2(err int) error {
return MakeGitError(C.int(err))
}
func cbool(b bool) C.int {
if b {
return C.int(1)
}
return C.int(0)
}
func ucbool(b bool) C.uint {
if b {
return C.uint(1)
}
return C.uint(0)
}
func Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {
ceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))
defer C.free(unsafe.Pointer(ceildirs))
cstart := C.CString(start)
defer C.free(unsafe.Pointer(cstart))
var buf C.git_buf
defer C.git_buf_free(&buf)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(buf.ptr), nil
}
Also remove a pkg-config directive that snuck in
package git
/*
#include <git2.h>
#include <git2/sys/openssl.h>
#if LIBGIT2_VER_MAJOR != 0 || LIBGIT2_VER_MINOR != 24
# error "Invalid libgit2 version; this git2go supports libgit2 v0.24"
#endif
*/
import "C"
import (
"bytes"
"encoding/hex"
"errors"
"runtime"
"strings"
"unsafe"
)
type ErrorClass int
const (
ErrClassNone ErrorClass = C.GITERR_NONE
ErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY
ErrClassOs ErrorClass = C.GITERR_OS
ErrClassInvalid ErrorClass = C.GITERR_INVALID
ErrClassReference ErrorClass = C.GITERR_REFERENCE
ErrClassZlib ErrorClass = C.GITERR_ZLIB
ErrClassRepository ErrorClass = C.GITERR_REPOSITORY
ErrClassConfig ErrorClass = C.GITERR_CONFIG
ErrClassRegex ErrorClass = C.GITERR_REGEX
ErrClassOdb ErrorClass = C.GITERR_ODB
ErrClassIndex ErrorClass = C.GITERR_INDEX
ErrClassObject ErrorClass = C.GITERR_OBJECT
ErrClassNet ErrorClass = C.GITERR_NET
ErrClassTag ErrorClass = C.GITERR_TAG
ErrClassTree ErrorClass = C.GITERR_TREE
ErrClassIndexer ErrorClass = C.GITERR_INDEXER
ErrClassSSL ErrorClass = C.GITERR_SSL
ErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE
ErrClassThread ErrorClass = C.GITERR_THREAD
ErrClassStash ErrorClass = C.GITERR_STASH
ErrClassCheckout ErrorClass = C.GITERR_CHECKOUT
ErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD
ErrClassMerge ErrorClass = C.GITERR_MERGE
ErrClassSsh ErrorClass = C.GITERR_SSH
ErrClassFilter ErrorClass = C.GITERR_FILTER
ErrClassRevert ErrorClass = C.GITERR_REVERT
ErrClassCallback ErrorClass = C.GITERR_CALLBACK
)
type ErrorCode int
const (
// No error
ErrOk ErrorCode = C.GIT_OK
// Generic error
ErrGeneric ErrorCode = C.GIT_ERROR
// Requested object could not be found
ErrNotFound ErrorCode = C.GIT_ENOTFOUND
// Object exists preventing operation
ErrExists ErrorCode = C.GIT_EEXISTS
// More than one object matches
ErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS
// Output buffer too short to hold data
ErrBuffs ErrorCode = C.GIT_EBUFS
// GIT_EUSER is a special error that is never generated by libgit2
// code. You can return it from a callback (e.g to stop an iteration)
// to know that it was generated by the callback and not by libgit2.
ErrUser ErrorCode = C.GIT_EUSER
// Operation not allowed on bare repository
ErrBareRepo ErrorCode = C.GIT_EBAREREPO
// HEAD refers to branch with no commits
ErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH
// Merge in progress prevented operation
ErrUnmerged ErrorCode = C.GIT_EUNMERGED
// Reference was not fast-forwardable
ErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD
// Name/ref spec was not in a valid format
ErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC
// Checkout conflicts prevented operation
ErrConflict ErrorCode = C.GIT_ECONFLICT
// Lock file prevented operation
ErrLocked ErrorCode = C.GIT_ELOCKED
// Reference value does not match expected
ErrModified ErrorCode = C.GIT_EMODIFIED
// Authentication failed
ErrAuth ErrorCode = C.GIT_EAUTH
// Server certificate is invalid
ErrCertificate ErrorCode = C.GIT_ECERTIFICATE
// Patch/merge has already been applied
ErrApplied ErrorCode = C.GIT_EAPPLIED
// The requested peel operation is not possible
ErrPeel ErrorCode = C.GIT_EPEEL
// Unexpected EOF
ErrEOF ErrorCode = C.GIT_EEOF
// Uncommitted changes in index prevented operation
ErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED
// The operation is not valid for a directory
ErrDirectory ErrorCode = C.GIT_EDIRECTORY
// A merge conflict exists and cannot continue
ErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT
// Internal only
ErrPassthrough ErrorCode = C.GIT_PASSTHROUGH
// Signals end of iteration with iterator
ErrIterOver ErrorCode = C.GIT_ITEROVER
)
var (
ErrInvalid = errors.New("Invalid state for operation")
)
var pointerHandles *HandleList
func init() {
pointerHandles = NewHandleList()
C.git_libgit2_init()
// This is not something we should be doing, as we may be
// stomping all over someone else's setup. The user should do
// this themselves or use some binding/wrapper which does it
// in such a way that they can be sure they're the only ones
// setting it up.
C.git_openssl_set_locking()
}
// Oid represents the id for a Git object.
type Oid [20]byte
func newOidFromC(coid *C.git_oid) *Oid {
if coid == nil {
return nil
}
oid := new(Oid)
copy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))
return oid
}
func NewOidFromBytes(b []byte) *Oid {
oid := new(Oid)
copy(oid[0:20], b[0:20])
return oid
}
func (oid *Oid) toC() *C.git_oid {
return (*C.git_oid)(unsafe.Pointer(oid))
}
func NewOid(s string) (*Oid, error) {
if len(s) > C.GIT_OID_HEXSZ {
return nil, errors.New("string is too long for oid")
}
o := new(Oid)
slice, error := hex.DecodeString(s)
if error != nil {
return nil, error
}
if len(slice) != 20 {
return nil, &GitError{"Invalid Oid", ErrClassNone, ErrGeneric}
}
copy(o[:], slice[:20])
return o, nil
}
func (oid *Oid) String() string {
return hex.EncodeToString(oid[:])
}
func (oid *Oid) Cmp(oid2 *Oid) int {
return bytes.Compare(oid[:], oid2[:])
}
func (oid *Oid) Copy() *Oid {
ret := new(Oid)
copy(ret[:], oid[:])
return ret
}
func (oid *Oid) Equal(oid2 *Oid) bool {
return bytes.Equal(oid[:], oid2[:])
}
func (oid *Oid) IsZero() bool {
for _, a := range oid {
if a != 0 {
return false
}
}
return true
}
func (oid *Oid) NCmp(oid2 *Oid, n uint) int {
return bytes.Compare(oid[:n], oid2[:n])
}
func ShortenOids(ids []*Oid, minlen int) (int, error) {
shorten := C.git_oid_shorten_new(C.size_t(minlen))
if shorten == nil {
panic("Out of memory")
}
defer C.git_oid_shorten_free(shorten)
var ret C.int
runtime.LockOSThread()
defer runtime.UnlockOSThread()
for _, id := range ids {
buf := make([]byte, 41)
C.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())
buf[40] = 0
ret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))
if ret < 0 {
return int(ret), MakeGitError(ret)
}
}
return int(ret), nil
}
type GitError struct {
Message string
Class ErrorClass
Code ErrorCode
}
func (e GitError) Error() string {
return e.Message
}
func IsErrorClass(err error, c ErrorClass) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Class == c
}
return false
}
func IsErrorCode(err error, c ErrorCode) bool {
if err == nil {
return false
}
if gitError, ok := err.(*GitError); ok {
return gitError.Code == c
}
return false
}
func MakeGitError(errorCode C.int) error {
var errMessage string
var errClass ErrorClass
if errorCode != C.GIT_ITEROVER {
err := C.giterr_last()
if err != nil {
errMessage = C.GoString(err.message)
errClass = ErrorClass(err.klass)
} else {
errClass = ErrClassInvalid
}
}
return &GitError{errMessage, errClass, ErrorCode(errorCode)}
}
func MakeGitError2(err int) error {
return MakeGitError(C.int(err))
}
func cbool(b bool) C.int {
if b {
return C.int(1)
}
return C.int(0)
}
func ucbool(b bool) C.uint {
if b {
return C.uint(1)
}
return C.uint(0)
}
func Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {
ceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))
defer C.free(unsafe.Pointer(ceildirs))
cstart := C.CString(start)
defer C.free(unsafe.Pointer(cstart))
var buf C.git_buf
defer C.git_buf_free(&buf)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(buf.ptr), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.